xref: /freebsd/sys/dev/bce/if_bce.c (revision 531c890b8aecbf157fe3491503b5ca62c0b01093)
1 /*-
2  * Copyright (c) 2006-2008 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5706S A2, A3
38  *   BCM5708C B1, B2
39  *   BCM5708S B1, B2
40  *
41  * The following controllers are not supported by this driver:
42  *   BCM5706C A0, A1 (pre-production)
43  *   BCM5706S A0, A1 (pre-production)
44  *   BCM5708C A0, B0 (pre-production)
45  *   BCM5708S A0, B0 (pre-production)
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Debug Options                                                        */
55 /****************************************************************************/
56 #ifdef BCE_DEBUG
57 	u32 bce_debug = BCE_WARN;
58 
59 	/*          0 = Never              */
60 	/*          1 = 1 in 2,147,483,648 */
61 	/*        256 = 1 in     8,388,608 */
62 	/*       2048 = 1 in     1,048,576 */
63 	/*      65536 = 1 in        32,768 */
64 	/*    1048576 = 1 in         2,048 */
65 	/*  268435456 =	1 in             8 */
66 	/*  536870912 = 1 in             4 */
67 	/* 1073741824 = 1 in             2 */
68 
69 	/* Controls how often the l2_fhdr frame error check will fail. */
70 	int bce_debug_l2fhdr_status_check = 0;
71 
72 	/* Controls how often the unexpected attention check will fail. */
73 	int bce_debug_unexpected_attention = 0;
74 
75 	/* Controls how often to simulate an mbuf allocation failure. */
76 	int bce_debug_mbuf_allocation_failure = 0;
77 
78 	/* Controls how often to simulate a DMA mapping failure. */
79 	int bce_debug_dma_map_addr_failure = 0;
80 
81 	/* Controls how often to simulate a bootcode failure. */
82 	int bce_debug_bootcode_running_failure = 0;
83 #endif
84 
85 
86 /****************************************************************************/
87 /* PCI Device ID Table                                                      */
88 /*                                                                          */
89 /* Used by bce_probe() to identify the devices supported by this driver.    */
90 /****************************************************************************/
91 #define BCE_DEVDESC_MAX		64
92 
93 static struct bce_type bce_devs[] = {
94 	/* BCM5706C Controllers and OEM boards. */
95 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
96 		"HP NC370T Multifunction Gigabit Server Adapter" },
97 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
98 		"HP NC370i Multifunction Gigabit Server Adapter" },
99 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
100 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
101 
102 	/* BCM5706S controllers and OEM boards. */
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
104 		"HP NC370F Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
107 
108 	/* BCM5708C controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
110 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
111 
112 	/* BCM5708S controllers and OEM boards. */
113 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
114 		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
115 	{ 0, 0, 0, 0, NULL }
116 };
117 
118 
119 /****************************************************************************/
120 /* Supported Flash NVRAM device data.                                       */
121 /****************************************************************************/
122 static struct flash_spec flash_table[] =
123 {
124 	/* Slow EEPROM */
125 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 	 "EEPROM - slow"},
129 	/* Expansion entry 0001 */
130 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 	 "Entry 0001"},
134 	/* Saifun SA25F010 (non-buffered flash) */
135 	/* strap, cfg1, & write1 need updates */
136 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 	 "Non-buffered flash (128kB)"},
140 	/* Saifun SA25F020 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 	 "Non-buffered flash (256kB)"},
146 	/* Expansion entry 0100 */
147 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 	 "Entry 0100"},
151 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
153 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 	/* Saifun SA25F005 (non-buffered flash) */
162 	/* strap, cfg1, & write1 need updates */
163 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 	 "Non-buffered flash (64kB)"},
167 	/* Fast EEPROM */
168 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 	 "EEPROM - fast"},
172 	/* Expansion entry 1001 */
173 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 	 "Entry 1001"},
177 	/* Expansion entry 1010 */
178 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 	 "Entry 1010"},
182 	/* ATMEL AT45DB011B (buffered flash) */
183 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 	 "Buffered flash (128kB)"},
187 	/* Expansion entry 1100 */
188 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 	 "Entry 1100"},
192 	/* Expansion entry 1101 */
193 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 	 "Entry 1101"},
197 	/* Ateml Expansion entry 1110 */
198 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 	 "Entry 1110 (Atmel)"},
202 	/* ATMEL AT45DB021B (buffered flash) */
203 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 	 "Buffered flash (256kB)"},
207 };
208 
209 
210 /****************************************************************************/
211 /* FreeBSD device entry points.                                             */
212 /****************************************************************************/
213 static int  bce_probe				(device_t);
214 static int  bce_attach				(device_t);
215 static int  bce_detach				(device_t);
216 static int  bce_shutdown			(device_t);
217 
218 
219 /****************************************************************************/
220 /* BCE Debug Data Structure Dump Routines                                   */
221 /****************************************************************************/
222 #ifdef BCE_DEBUG
223 static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
224 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
225 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
226 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
227 static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
228 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
229 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
230 static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
231 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
232 static void bce_dump_ctx			(struct bce_softc *, u16);
233 static void bce_dump_ftqs			(struct bce_softc *);
234 static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
235 static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
236 static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_dump_bc_state		(struct bce_softc *);
242 static void bce_breakpoint			(struct bce_softc *);
243 #endif
244 
245 
246 /****************************************************************************/
247 /* BCE Register/Memory Access Routines                                      */
248 /****************************************************************************/
249 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
250 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
251 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
252 static int  bce_miibus_read_reg		(device_t, int, int);
253 static int  bce_miibus_write_reg	(device_t, int, int, int);
254 static void bce_miibus_statchg		(device_t);
255 
256 
257 /****************************************************************************/
258 /* BCE NVRAM Access Routines                                                */
259 /****************************************************************************/
260 static int  bce_acquire_nvram_lock	(struct bce_softc *);
261 static int  bce_release_nvram_lock	(struct bce_softc *);
262 static void bce_enable_nvram_access	(struct bce_softc *);
263 static void	bce_disable_nvram_access(struct bce_softc *);
264 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
265 static int  bce_init_nvram			(struct bce_softc *);
266 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
267 static int  bce_nvram_test			(struct bce_softc *);
268 #ifdef BCE_NVRAM_WRITE_SUPPORT
269 static int  bce_enable_nvram_write	(struct bce_softc *);
270 static void bce_disable_nvram_write	(struct bce_softc *);
271 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
272 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
273 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
274 #endif
275 
276 /****************************************************************************/
277 /*                                                                          */
278 /****************************************************************************/
279 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
280 static int  bce_dma_alloc			(device_t);
281 static void bce_dma_free			(struct bce_softc *);
282 static void bce_release_resources	(struct bce_softc *);
283 
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync				(struct bce_softc *, u32);
288 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus			(struct bce_softc *);
291 
292 static void bce_stop				(struct bce_softc *);
293 static int  bce_reset				(struct bce_softc *, u32);
294 static int  bce_chipinit 			(struct bce_softc *);
295 static int  bce_blockinit 			(struct bce_softc *);
296 static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297 static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
298 
299 static int  bce_init_tx_chain		(struct bce_softc *);
300 static void bce_free_tx_chain		(struct bce_softc *);
301 
302 static int  bce_init_rx_chain		(struct bce_softc *);
303 static void bce_fill_rx_chain		(struct bce_softc *);
304 static void bce_free_rx_chain		(struct bce_softc *);
305 
306 static int  bce_init_pg_chain		(struct bce_softc *);
307 static void bce_fill_pg_chain		(struct bce_softc *);
308 static void bce_free_pg_chain		(struct bce_softc *);
309 
310 static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
311 static void bce_start_locked		(struct ifnet *);
312 static void bce_start				(struct ifnet *);
313 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
314 static void bce_watchdog			(struct bce_softc *);
315 static int  bce_ifmedia_upd			(struct ifnet *);
316 static void bce_ifmedia_upd_locked	(struct ifnet *);
317 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
318 static void bce_init_locked			(struct bce_softc *);
319 static void bce_init				(void *);
320 static void bce_mgmt_init_locked	(struct bce_softc *sc);
321 
322 static void bce_init_ctx			(struct bce_softc *);
323 static void bce_get_mac_addr		(struct bce_softc *);
324 static void bce_set_mac_addr		(struct bce_softc *);
325 static void bce_phy_intr			(struct bce_softc *);
326 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
327 static void bce_rx_intr				(struct bce_softc *);
328 static void bce_tx_intr				(struct bce_softc *);
329 static void bce_disable_intr		(struct bce_softc *);
330 static void bce_enable_intr			(struct bce_softc *);
331 
332 #ifdef DEVICE_POLLING
333 static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
334 static void bce_poll				(struct ifnet *, enum poll_cmd, int);
335 #endif
336 static void bce_intr				(void *);
337 static void bce_set_rx_mode			(struct bce_softc *);
338 static void bce_stats_update		(struct bce_softc *);
339 static void bce_tick				(void *);
340 static void bce_pulse				(void *);
341 static void bce_add_sysctls			(struct bce_softc *);
342 
343 
344 /****************************************************************************/
345 /* FreeBSD device dispatch table.                                           */
346 /****************************************************************************/
347 static device_method_t bce_methods[] = {
348 	/* Device interface (device_if.h) */
349 	DEVMETHOD(device_probe,		bce_probe),
350 	DEVMETHOD(device_attach,	bce_attach),
351 	DEVMETHOD(device_detach,	bce_detach),
352 	DEVMETHOD(device_shutdown,	bce_shutdown),
353 /* Supported by device interface but not used here. */
354 /*	DEVMETHOD(device_identify,	bce_identify),      */
355 /*	DEVMETHOD(device_suspend,	bce_suspend),       */
356 /*	DEVMETHOD(device_resume,	bce_resume),        */
357 /*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
358 
359 	/* Bus interface (bus_if.h) */
360 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
361 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
362 
363 	/* MII interface (miibus_if.h) */
364 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
365 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
366 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
367 /* Supported by MII interface but not used here.       */
368 /*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
369 /*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
370 
371 	{ 0, 0 }
372 };
373 
374 static driver_t bce_driver = {
375 	"bce",
376 	bce_methods,
377 	sizeof(struct bce_softc)
378 };
379 
380 static devclass_t bce_devclass;
381 
382 MODULE_DEPEND(bce, pci, 1, 1, 1);
383 MODULE_DEPEND(bce, ether, 1, 1, 1);
384 MODULE_DEPEND(bce, miibus, 1, 1, 1);
385 
386 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
387 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
388 
389 
390 /****************************************************************************/
391 /* Tunable device values                                                    */
392 /****************************************************************************/
393 static int bce_tso_enable = TRUE;
394 static int bce_msi_enable = 1;
395 
396 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
397 
398 /* Allowable values are TRUE or FALSE */
399 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
400 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
401 "TSO Enable/Disable");
402 
403 /* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
404 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
405 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
406 "MSI | INTx selector");
407 
408 /* ToDo: Add tunable to enable/disable strict MTU handling. */
409 /* Currently allows "loose" RX MTU checking (i.e. sets the  */
410 /* h/w RX MTU to the size of the largest receive buffer, or */
411 /* 2048 bytes).                                             */
412 
413 /****************************************************************************/
414 /* Device probe function.                                                   */
415 /*                                                                          */
416 /* Compares the device to the driver's list of supported devices and        */
417 /* reports back to the OS whether this is the right driver for the device.  */
418 /*                                                                          */
419 /* Returns:                                                                 */
420 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
421 /****************************************************************************/
422 static int
423 bce_probe(device_t dev)
424 {
425 	struct bce_type *t;
426 	struct bce_softc *sc;
427 	char *descbuf;
428 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
429 
430 	t = bce_devs;
431 
432 	sc = device_get_softc(dev);
433 	bzero(sc, sizeof(struct bce_softc));
434 	sc->bce_unit = device_get_unit(dev);
435 	sc->bce_dev = dev;
436 
437 	/* Get the data for the device to be probed. */
438 	vid  = pci_get_vendor(dev);
439 	did  = pci_get_device(dev);
440 	svid = pci_get_subvendor(dev);
441 	sdid = pci_get_subdevice(dev);
442 
443 	DBPRINT(sc, BCE_VERBOSE_LOAD,
444 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
445 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
446 
447 	/* Look through the list of known devices for a match. */
448 	while(t->bce_name != NULL) {
449 
450 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
451 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
452 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
453 
454 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
455 
456 			if (descbuf == NULL)
457 				return(ENOMEM);
458 
459 			/* Print out the device identity. */
460 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
461 				t->bce_name,
462 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
463 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
464 
465 			device_set_desc_copy(dev, descbuf);
466 			free(descbuf, M_TEMP);
467 			return(BUS_PROBE_DEFAULT);
468 		}
469 		t++;
470 	}
471 
472 	return(ENXIO);
473 }
474 
475 
476 /****************************************************************************/
477 /* Device attach function.                                                  */
478 /*                                                                          */
479 /* Allocates device resources, performs secondary chip identification,      */
480 /* resets and initializes the hardware, and initializes driver instance     */
481 /* variables.                                                               */
482 /*                                                                          */
483 /* Returns:                                                                 */
484 /*   0 on success, positive value on failure.                               */
485 /****************************************************************************/
486 static int
487 bce_attach(device_t dev)
488 {
489 	struct bce_softc *sc;
490 	struct ifnet *ifp;
491 	u32 val;
492 	int count, rid, rc = 0;
493 
494 	sc = device_get_softc(dev);
495 	sc->bce_dev = dev;
496 
497 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
498 
499 	sc->bce_unit = device_get_unit(dev);
500 
501 	/* Set initial device and PHY flags */
502 	sc->bce_flags = 0;
503 	sc->bce_phy_flags = 0;
504 
505 	pci_enable_busmaster(dev);
506 
507 	/* Allocate PCI memory resources. */
508 	rid = PCIR_BAR(0);
509 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
510 		&rid, RF_ACTIVE | PCI_RF_DENSE);
511 
512 	if (sc->bce_res_mem == NULL) {
513 		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
514 			__FILE__, __LINE__);
515 		rc = ENXIO;
516 		goto bce_attach_fail;
517 	}
518 
519 	/* Get various resource handles. */
520 	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
521 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
522 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
523 
524 	/* If MSI is enabled in the driver, get the vector count. */
525 	count = bce_msi_enable ? pci_msi_count(dev) : 0;
526 
527 	/* Allocate PCI IRQ resources. */
528 	if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) {
529 		rid = 1;
530 		sc->bce_flags |= BCE_USING_MSI_FLAG;
531 		DBPRINT(sc, BCE_VERBOSE_LOAD,
532 			"Allocating %d MSI interrupt(s)\n", count);
533 	} else {
534 		rid = 0;
535 		DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n");
536 	}
537 
538 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
539 	    RF_SHAREABLE | RF_ACTIVE);
540 
541 	if (sc->bce_res_irq == NULL) {
542 		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
543 			__FILE__, __LINE__);
544 		rc = ENXIO;
545 		goto bce_attach_fail;
546 	}
547 
548 	/* Initialize mutex for the current device instance. */
549 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
550 
551 	/*
552 	 * Configure byte swap and enable indirect register access.
553 	 * Rely on CPU to do target byte swapping on big endian systems.
554 	 * Access to registers outside of PCI configurtion space are not
555 	 * valid until this is done.
556 	 */
557 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
558 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
559 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
560 
561 	/* Save ASIC revsion info. */
562 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
563 
564 	/* Weed out any non-production controller revisions. */
565 	switch(BCE_CHIP_ID(sc)) {
566 		case BCE_CHIP_ID_5706_A0:
567 		case BCE_CHIP_ID_5706_A1:
568 		case BCE_CHIP_ID_5708_A0:
569 		case BCE_CHIP_ID_5708_B0:
570 			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
571 				__FILE__, __LINE__,
572 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
573 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
574 			rc = ENODEV;
575 			goto bce_attach_fail;
576 	}
577 
578 	/*
579 	 * The embedded PCIe to PCI-X bridge (EPB)
580 	 * in the 5708 cannot address memory above
581 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
582 	 */
583 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
584 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
585 	else
586 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
587 
588 	/*
589 	 * Find the base address for shared memory access.
590 	 * Newer versions of bootcode use a signature and offset
591 	 * while older versions use a fixed address.
592 	 */
593 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
594 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
595 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
596 	else
597 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
598 
599 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
600 		__FUNCTION__, sc->bce_shmem_base);
601 
602 	/* Fetch the bootcode revision. */
603 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
604 		BCE_DEV_INFO_BC_REV);
605 
606 	/* Check if any management firmware is running. */
607 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
608 	if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
609 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
610 
611 	/* Get PCI bus information (speed and type). */
612 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
613 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
614 		u32 clkreg;
615 
616 		sc->bce_flags |= BCE_PCIX_FLAG;
617 
618 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
619 
620 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
621 		switch (clkreg) {
622 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
623 			sc->bus_speed_mhz = 133;
624 			break;
625 
626 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
627 			sc->bus_speed_mhz = 100;
628 			break;
629 
630 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
631 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
632 			sc->bus_speed_mhz = 66;
633 			break;
634 
635 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
636 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
637 			sc->bus_speed_mhz = 50;
638 			break;
639 
640 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
641 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
642 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
643 			sc->bus_speed_mhz = 33;
644 			break;
645 		}
646 	} else {
647 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
648 			sc->bus_speed_mhz = 66;
649 		else
650 			sc->bus_speed_mhz = 33;
651 	}
652 
653 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
654 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
655 
656 	/* Reset the controller and announce to bootcode that driver is present. */
657 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
658 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
659 			__FILE__, __LINE__);
660 		rc = ENXIO;
661 		goto bce_attach_fail;
662 	}
663 
664 	/* Initialize the controller. */
665 	if (bce_chipinit(sc)) {
666 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
667 			__FILE__, __LINE__);
668 		rc = ENXIO;
669 		goto bce_attach_fail;
670 	}
671 
672 	/* Perform NVRAM test. */
673 	if (bce_nvram_test(sc)) {
674 		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
675 			__FILE__, __LINE__);
676 		rc = ENXIO;
677 		goto bce_attach_fail;
678 	}
679 
680 	/* Fetch the permanent Ethernet MAC address. */
681 	bce_get_mac_addr(sc);
682 
683 	/*
684 	 * Trip points control how many BDs
685 	 * should be ready before generating an
686 	 * interrupt while ticks control how long
687 	 * a BD can sit in the chain before
688 	 * generating an interrupt.  Set the default
689 	 * values for the RX and TX chains.
690 	 */
691 
692 #ifdef BCE_DEBUG
693 	/* Force more frequent interrupts. */
694 	sc->bce_tx_quick_cons_trip_int = 1;
695 	sc->bce_tx_quick_cons_trip     = 1;
696 	sc->bce_tx_ticks_int           = 0;
697 	sc->bce_tx_ticks               = 0;
698 
699 	sc->bce_rx_quick_cons_trip_int = 1;
700 	sc->bce_rx_quick_cons_trip     = 1;
701 	sc->bce_rx_ticks_int           = 0;
702 	sc->bce_rx_ticks               = 0;
703 #else
704 	/* Improve throughput at the expense of increased latency. */
705 	sc->bce_tx_quick_cons_trip_int = 20;
706 	sc->bce_tx_quick_cons_trip     = 20;
707 	sc->bce_tx_ticks_int           = 80;
708 	sc->bce_tx_ticks               = 80;
709 
710 	sc->bce_rx_quick_cons_trip_int = 6;
711 	sc->bce_rx_quick_cons_trip     = 6;
712 	sc->bce_rx_ticks_int           = 18;
713 	sc->bce_rx_ticks               = 18;
714 #endif
715 
716 	/* Update statistics once every second. */
717 	sc->bce_stats_ticks = 1000000 & 0xffff00;
718 
719 	/*
720 	 * The SerDes based NetXtreme II controllers
721 	 * that support 2.5Gb operation (currently
722 	 * 5708S) use a PHY at address 2, otherwise
723 	 * the PHY is present at address 1.
724 	 */
725 	sc->bce_phy_addr = 1;
726 
727 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
728 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
729 		sc->bce_flags |= BCE_NO_WOL_FLAG;
730 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
731 			sc->bce_phy_addr = 2;
732 			val = REG_RD_IND(sc, sc->bce_shmem_base +
733 					 BCE_SHARED_HW_CFG_CONFIG);
734 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
735 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
736 				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
737 			}
738 		}
739 	}
740 
741 	/* Store data needed by PHY driver for backplane applications */
742 	sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
743 		BCE_SHARED_HW_CFG_CONFIG);
744 	sc->bce_port_hw_cfg   = REG_RD_IND(sc, sc->bce_shmem_base +
745 		BCE_SHARED_HW_CFG_CONFIG);
746 
747 	/* Allocate DMA memory resources. */
748 	if (bce_dma_alloc(dev)) {
749 		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
750 		    __FILE__, __LINE__);
751 		rc = ENXIO;
752 		goto bce_attach_fail;
753 	}
754 
755 	/* Allocate an ifnet structure. */
756 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
757 	if (ifp == NULL) {
758 		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
759 			__FILE__, __LINE__);
760 		rc = ENXIO;
761 		goto bce_attach_fail;
762 	}
763 
764 	/* Initialize the ifnet interface. */
765 	ifp->if_softc        = sc;
766 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
767 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
768 	ifp->if_ioctl        = bce_ioctl;
769 	ifp->if_start        = bce_start;
770 	ifp->if_init         = bce_init;
771 	ifp->if_mtu          = ETHERMTU;
772 
773 	if (bce_tso_enable) {
774 		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
775 		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
776 	} else {
777 		ifp->if_hwassist = BCE_IF_HWASSIST;
778 		ifp->if_capabilities = BCE_IF_CAPABILITIES;
779 	}
780 
781 	ifp->if_capenable    = ifp->if_capabilities;
782 
783 	/* Use standard mbuf sizes for buffer allocation. */
784 	sc->rx_bd_mbuf_alloc_size = MHLEN;
785 	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
786 
787 #ifdef DEVICE_POLLING
788 	ifp->if_capabilities |= IFCAP_POLLING;
789 #endif
790 
791 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
792 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
793 	IFQ_SET_READY(&ifp->if_snd);
794 
795 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
796 		ifp->if_baudrate = IF_Mbps(2500ULL);
797 	else
798 		ifp->if_baudrate = IF_Mbps(1000);
799 
800 	/* Check for an MII child bus by probing the PHY. */
801 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
802 		bce_ifmedia_sts)) {
803 		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
804 			__FILE__, __LINE__);
805 		rc = ENXIO;
806 		goto bce_attach_fail;
807 	}
808 
809 	/* Attach to the Ethernet interface list. */
810 	ether_ifattach(ifp, sc->eaddr);
811 
812 #if __FreeBSD_version < 500000
813 	callout_init(&sc->bce_tick_callout);
814 	callout_init(&sc->bce_pulse_callout);
815 #else
816 	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
817 	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
818 #endif
819 
820 	/* Hookup IRQ last. */
821 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL,
822 	   bce_intr, sc, &sc->bce_intrhand);
823 
824 	if (rc) {
825 		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
826 			__FILE__, __LINE__);
827 		bce_detach(dev);
828 		goto bce_attach_exit;
829 	}
830 
831 	/*
832 	 * At this point we've acquired all the resources
833 	 * we need to run so there's no turning back, we're
834 	 * cleared for launch.
835 	 */
836 
837 	/* Print some important debugging info. */
838 	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
839 
840 	/* Add the supported sysctls to the kernel. */
841 	bce_add_sysctls(sc);
842 
843 	BCE_LOCK(sc);
844 	/*
845 	 * The chip reset earlier notified the bootcode that
846 	 * a driver is present.  We now need to start our pulse
847 	 * routine so that the bootcode is reminded that we're
848 	 * still running.
849 	 */
850 	bce_pulse(sc);
851 
852 	bce_mgmt_init_locked(sc);
853 	BCE_UNLOCK(sc);
854 
855 	/* Finally, print some useful adapter info */
856 	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
857 	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
858 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
859 	printf("Bus (PCI%s, %s, %dMHz); ",
860 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
861 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
862 		sc->bus_speed_mhz);
863 	printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
864 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
865 		printf("MFW ");
866 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
867 		printf("MSI ");
868 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
869 		printf("2.5G ");
870 	printf(")\n");
871 
872 	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
873 		__FUNCTION__, sc);
874 
875 	goto bce_attach_exit;
876 
877 bce_attach_fail:
878 	bce_release_resources(sc);
879 
880 bce_attach_exit:
881 
882 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
883 
884 	return(rc);
885 }
886 
887 
888 /****************************************************************************/
889 /* Device detach function.                                                  */
890 /*                                                                          */
891 /* Stops the controller, resets the controller, and releases resources.     */
892 /*                                                                          */
893 /* Returns:                                                                 */
894 /*   0 on success, positive value on failure.                               */
895 /****************************************************************************/
896 static int
897 bce_detach(device_t dev)
898 {
899 	struct bce_softc *sc = device_get_softc(dev);
900 	struct ifnet *ifp;
901 	u32 msg;
902 
903 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
904 
905 	ifp = sc->bce_ifp;
906 
907 #ifdef DEVICE_POLLING
908 	if (ifp->if_capenable & IFCAP_POLLING)
909 		ether_poll_deregister(ifp);
910 #endif
911 
912 	/* Stop and reset the controller. */
913 	BCE_LOCK(sc);
914 
915 	/* Stop the pulse so the bootcode can go to driver absent state. */
916 	callout_stop(&sc->bce_pulse_callout);
917 
918 	bce_stop(sc);
919 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
920 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
921 	else
922 		msg = BCE_DRV_MSG_CODE_UNLOAD;
923 	bce_reset(sc, msg);
924 
925 	BCE_UNLOCK(sc);
926 
927 	ether_ifdetach(ifp);
928 
929 	/* If we have a child device on the MII bus remove it too. */
930 	bus_generic_detach(dev);
931 	device_delete_child(dev, sc->bce_miibus);
932 
933 	/* Release all remaining resources. */
934 	bce_release_resources(sc);
935 
936 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
937 
938 	return(0);
939 }
940 
941 
942 /****************************************************************************/
943 /* Device shutdown function.                                                */
944 /*                                                                          */
945 /* Stops and resets the controller.                                         */
946 /*                                                                          */
947 /* Returns:                                                                 */
948 /*   0 on success, positive value on failure.                               */
949 /****************************************************************************/
950 static int
951 bce_shutdown(device_t dev)
952 {
953 	struct bce_softc *sc = device_get_softc(dev);
954 	u32 msg;
955 
956 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__);
957 
958 	BCE_LOCK(sc);
959 	bce_stop(sc);
960 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
961 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
962 	else
963 		msg = BCE_DRV_MSG_CODE_UNLOAD;
964 	bce_reset(sc, msg);
965 	BCE_UNLOCK(sc);
966 
967 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__);
968 
969 	return (0);
970 }
971 
972 
973 /****************************************************************************/
974 /* Indirect register read.                                                  */
975 /*                                                                          */
976 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
977 /* configuration space.  Using this mechanism avoids issues with posted     */
978 /* reads but is much slower than memory-mapped I/O.                         */
979 /*                                                                          */
980 /* Returns:                                                                 */
981 /*   The value of the register.                                             */
982 /****************************************************************************/
983 static u32
984 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
985 {
986 	device_t dev;
987 	dev = sc->bce_dev;
988 
989 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
990 #ifdef BCE_DEBUG
991 	{
992 		u32 val;
993 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
994 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
995 			__FUNCTION__, offset, val);
996 		return val;
997 	}
998 #else
999 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1000 #endif
1001 }
1002 
1003 
1004 /****************************************************************************/
1005 /* Indirect register write.                                                 */
1006 /*                                                                          */
1007 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1008 /* configuration space.  Using this mechanism avoids issues with posted     */
1009 /* writes but is muchh slower than memory-mapped I/O.                       */
1010 /*                                                                          */
1011 /* Returns:                                                                 */
1012 /*   Nothing.                                                               */
1013 /****************************************************************************/
1014 static void
1015 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1016 {
1017 	device_t dev;
1018 	dev = sc->bce_dev;
1019 
1020 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1021 		__FUNCTION__, offset, val);
1022 
1023 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1024 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1025 }
1026 
1027 
1028 #ifdef BCE_DEBUG
1029 /****************************************************************************/
1030 /* Context memory read.                                                     */
1031 /*                                                                          */
1032 /* The NetXtreme II controller uses context memory to track connection      */
1033 /* information for L2 and higher network protocols.                         */
1034 /*                                                                          */
1035 /* Returns:                                                                 */
1036 /*   The requested 32 bit value of context memory.                          */
1037 /****************************************************************************/
1038 static u32
1039 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 offset)
1040 {
1041 	u32 val;
1042 
1043 	offset += cid_addr;
1044 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1045 	val = REG_RD(sc, BCE_CTX_DATA);
1046 
1047 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1048 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1049 
1050 	return(val);
1051 }
1052 #endif
1053 
1054 
1055 /****************************************************************************/
1056 /* Context memory write.                                                    */
1057 /*                                                                          */
1058 /* The NetXtreme II controller uses context memory to track connection      */
1059 /* information for L2 and higher network protocols.                         */
1060 /*                                                                          */
1061 /* Returns:                                                                 */
1062 /*   Nothing.                                                               */
1063 /****************************************************************************/
1064 static void
1065 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
1066 {
1067 
1068 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1069 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1070 
1071 	offset += cid_addr;
1072 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1073 	REG_WR(sc, BCE_CTX_DATA, val);
1074 }
1075 
1076 
1077 /****************************************************************************/
1078 /* PHY register read.                                                       */
1079 /*                                                                          */
1080 /* Implements register reads on the MII bus.                                */
1081 /*                                                                          */
1082 /* Returns:                                                                 */
1083 /*   The value of the register.                                             */
1084 /****************************************************************************/
1085 static int
1086 bce_miibus_read_reg(device_t dev, int phy, int reg)
1087 {
1088 	struct bce_softc *sc;
1089 	u32 val;
1090 	int i;
1091 
1092 	sc = device_get_softc(dev);
1093 
1094 	/* Make sure we are accessing the correct PHY address. */
1095 	if (phy != sc->bce_phy_addr) {
1096 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1097 		return(0);
1098 	}
1099 
1100 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1101 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1102 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1103 
1104 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1105 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1106 
1107 		DELAY(40);
1108 	}
1109 
1110 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1111 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1112 		BCE_EMAC_MDIO_COMM_START_BUSY;
1113 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1114 
1115 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1116 		DELAY(10);
1117 
1118 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1119 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1120 			DELAY(5);
1121 
1122 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1123 			val &= BCE_EMAC_MDIO_COMM_DATA;
1124 
1125 			break;
1126 		}
1127 	}
1128 
1129 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1130 		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1131 			__FILE__, __LINE__, phy, reg);
1132 		val = 0x0;
1133 	} else {
1134 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1135 	}
1136 
1137 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1138 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1139 
1140 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1141 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1142 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1143 
1144 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1145 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1146 
1147 		DELAY(40);
1148 	}
1149 
1150 	return (val & 0xffff);
1151 
1152 }
1153 
1154 
1155 /****************************************************************************/
1156 /* PHY register write.                                                      */
1157 /*                                                                          */
1158 /* Implements register writes on the MII bus.                               */
1159 /*                                                                          */
1160 /* Returns:                                                                 */
1161 /*   The value of the register.                                             */
1162 /****************************************************************************/
1163 static int
1164 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1165 {
1166 	struct bce_softc *sc;
1167 	u32 val1;
1168 	int i;
1169 
1170 	sc = device_get_softc(dev);
1171 
1172 	/* Make sure we are accessing the correct PHY address. */
1173 	if (phy != sc->bce_phy_addr) {
1174 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1175 		return(0);
1176 	}
1177 
1178 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1179 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1180 
1181 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1182 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1183 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1184 
1185 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1186 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1187 
1188 		DELAY(40);
1189 	}
1190 
1191 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1192 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1193 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1194 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1195 
1196 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1197 		DELAY(10);
1198 
1199 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1200 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1201 			DELAY(5);
1202 			break;
1203 		}
1204 	}
1205 
1206 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1207 		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1208 			__FILE__, __LINE__);
1209 
1210 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1211 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1212 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1213 
1214 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1215 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1216 
1217 		DELAY(40);
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 
1224 /****************************************************************************/
1225 /* MII bus status change.                                                   */
1226 /*                                                                          */
1227 /* Called by the MII bus driver when the PHY establishes link to set the    */
1228 /* MAC interface registers.                                                 */
1229 /*                                                                          */
1230 /* Returns:                                                                 */
1231 /*   Nothing.                                                               */
1232 /****************************************************************************/
1233 static void
1234 bce_miibus_statchg(device_t dev)
1235 {
1236 	struct bce_softc *sc;
1237 	struct mii_data *mii;
1238 	int val;
1239 
1240 	sc = device_get_softc(dev);
1241 
1242 	mii = device_get_softc(sc->bce_miibus);
1243 
1244 	val = REG_RD(sc, BCE_EMAC_MODE);
1245 	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1246 		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1247 		BCE_EMAC_MODE_25G);
1248 
1249 	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1250 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1251 	case IFM_10_T:
1252 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1253 			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1254 			val |= BCE_EMAC_MODE_PORT_MII_10;
1255 			break;
1256 		}
1257 		/* fall-through */
1258 	case IFM_100_TX:
1259 		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1260 		val |= BCE_EMAC_MODE_PORT_MII;
1261 		break;
1262 	case IFM_2500_SX:
1263 		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1264 		val |= BCE_EMAC_MODE_25G;
1265 		/* fall-through */
1266 	case IFM_1000_T:
1267 	case IFM_1000_SX:
1268 		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1269 		val |= BCE_EMAC_MODE_PORT_GMII;
1270 		break;
1271 	default:
1272 		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1273 			"interface.\n");
1274 		val |= BCE_EMAC_MODE_PORT_GMII;
1275 	}
1276 
1277 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1278 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1279 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1280 		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1281 	} else
1282 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1283 
1284 	REG_WR(sc, BCE_EMAC_MODE, val);
1285 
1286 #if 0
1287 	/* ToDo: Enable flow control support in brgphy and bge. */
1288 	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1289 	if (mii->mii_media_active & IFM_FLAG0)
1290 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1291 	if (mii->mii_media_active & IFM_FLAG1)
1292 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1293 #endif
1294 
1295 }
1296 
1297 
1298 /****************************************************************************/
1299 /* Acquire NVRAM lock.                                                      */
1300 /*                                                                          */
1301 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1302 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1303 /* for use by the driver.                                                   */
1304 /*                                                                          */
1305 /* Returns:                                                                 */
1306 /*   0 on success, positive value on failure.                               */
1307 /****************************************************************************/
1308 static int
1309 bce_acquire_nvram_lock(struct bce_softc *sc)
1310 {
1311 	u32 val;
1312 	int j;
1313 
1314 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n");
1315 
1316 	/* Request access to the flash interface. */
1317 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1318 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1319 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1320 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1321 			break;
1322 
1323 		DELAY(5);
1324 	}
1325 
1326 	if (j >= NVRAM_TIMEOUT_COUNT) {
1327 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1328 		return EBUSY;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 
1335 /****************************************************************************/
1336 /* Release NVRAM lock.                                                      */
1337 /*                                                                          */
1338 /* When the caller is finished accessing NVRAM the lock must be released.   */
1339 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1340 /* for use by the driver.                                                   */
1341 /*                                                                          */
1342 /* Returns:                                                                 */
1343 /*   0 on success, positive value on failure.                               */
1344 /****************************************************************************/
1345 static int
1346 bce_release_nvram_lock(struct bce_softc *sc)
1347 {
1348 	int j;
1349 	u32 val;
1350 
1351 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n");
1352 
1353 	/*
1354 	 * Relinquish nvram interface.
1355 	 */
1356 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1357 
1358 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1359 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1360 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1361 			break;
1362 
1363 		DELAY(5);
1364 	}
1365 
1366 	if (j >= NVRAM_TIMEOUT_COUNT) {
1367 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1368 		return EBUSY;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 
1375 #ifdef BCE_NVRAM_WRITE_SUPPORT
1376 /****************************************************************************/
1377 /* Enable NVRAM write access.                                               */
1378 /*                                                                          */
1379 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1380 /*                                                                          */
1381 /* Returns:                                                                 */
1382 /*   0 on success, positive value on failure.                               */
1383 /****************************************************************************/
1384 static int
1385 bce_enable_nvram_write(struct bce_softc *sc)
1386 {
1387 	u32 val;
1388 
1389 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n");
1390 
1391 	val = REG_RD(sc, BCE_MISC_CFG);
1392 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1393 
1394 	if (!sc->bce_flash_info->buffered) {
1395 		int j;
1396 
1397 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1398 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1399 
1400 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1401 			DELAY(5);
1402 
1403 			val = REG_RD(sc, BCE_NVM_COMMAND);
1404 			if (val & BCE_NVM_COMMAND_DONE)
1405 				break;
1406 		}
1407 
1408 		if (j >= NVRAM_TIMEOUT_COUNT) {
1409 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1410 			return EBUSY;
1411 		}
1412 	}
1413 	return 0;
1414 }
1415 
1416 
1417 /****************************************************************************/
1418 /* Disable NVRAM write access.                                              */
1419 /*                                                                          */
1420 /* When the caller is finished writing to NVRAM write access must be        */
1421 /* disabled.                                                                */
1422 /*                                                                          */
1423 /* Returns:                                                                 */
1424 /*   Nothing.                                                               */
1425 /****************************************************************************/
1426 static void
1427 bce_disable_nvram_write(struct bce_softc *sc)
1428 {
1429 	u32 val;
1430 
1431 	DBPRINT(sc, BCE_VERBOSE_NVRAM,  "Disabling NVRAM write.\n");
1432 
1433 	val = REG_RD(sc, BCE_MISC_CFG);
1434 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1435 }
1436 #endif
1437 
1438 
1439 /****************************************************************************/
1440 /* Enable NVRAM access.                                                     */
1441 /*                                                                          */
1442 /* Before accessing NVRAM for read or write operations the caller must      */
1443 /* enabled NVRAM access.                                                    */
1444 /*                                                                          */
1445 /* Returns:                                                                 */
1446 /*   Nothing.                                                               */
1447 /****************************************************************************/
1448 static void
1449 bce_enable_nvram_access(struct bce_softc *sc)
1450 {
1451 	u32 val;
1452 
1453 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n");
1454 
1455 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1456 	/* Enable both bits, even on read. */
1457 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1458 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1459 }
1460 
1461 
1462 /****************************************************************************/
1463 /* Disable NVRAM access.                                                    */
1464 /*                                                                          */
1465 /* When the caller is finished accessing NVRAM access must be disabled.     */
1466 /*                                                                          */
1467 /* Returns:                                                                 */
1468 /*   Nothing.                                                               */
1469 /****************************************************************************/
1470 static void
1471 bce_disable_nvram_access(struct bce_softc *sc)
1472 {
1473 	u32 val;
1474 
1475 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n");
1476 
1477 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1478 
1479 	/* Disable both bits, even after read. */
1480 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1481 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1482 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1483 }
1484 
1485 
1486 #ifdef BCE_NVRAM_WRITE_SUPPORT
1487 /****************************************************************************/
1488 /* Erase NVRAM page before writing.                                         */
1489 /*                                                                          */
1490 /* Non-buffered flash parts require that a page be erased before it is      */
1491 /* written.                                                                 */
1492 /*                                                                          */
1493 /* Returns:                                                                 */
1494 /*   0 on success, positive value on failure.                               */
1495 /****************************************************************************/
1496 static int
1497 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1498 {
1499 	u32 cmd;
1500 	int j;
1501 
1502 	/* Buffered flash doesn't require an erase. */
1503 	if (sc->bce_flash_info->buffered)
1504 		return 0;
1505 
1506 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n");
1507 
1508 	/* Build an erase command. */
1509 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1510 	      BCE_NVM_COMMAND_DOIT;
1511 
1512 	/*
1513 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1514 	 * and issue the erase command.
1515 	 */
1516 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1517 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1518 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1519 
1520 	/* Wait for completion. */
1521 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1522 		u32 val;
1523 
1524 		DELAY(5);
1525 
1526 		val = REG_RD(sc, BCE_NVM_COMMAND);
1527 		if (val & BCE_NVM_COMMAND_DONE)
1528 			break;
1529 	}
1530 
1531 	if (j >= NVRAM_TIMEOUT_COUNT) {
1532 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1533 		return EBUSY;
1534 	}
1535 
1536 	return 0;
1537 }
1538 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1539 
1540 
1541 /****************************************************************************/
1542 /* Read a dword (32 bits) from NVRAM.                                       */
1543 /*                                                                          */
1544 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1545 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1546 /*                                                                          */
1547 /* Returns:                                                                 */
1548 /*   0 on success and the 32 bit value read, positive value on failure.     */
1549 /****************************************************************************/
1550 static int
1551 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1552 							u32 cmd_flags)
1553 {
1554 	u32 cmd;
1555 	int i, rc = 0;
1556 
1557 	/* Build the command word. */
1558 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1559 
1560 	/* Calculate the offset for buffered flash. */
1561 	if (sc->bce_flash_info->buffered) {
1562 		offset = ((offset / sc->bce_flash_info->page_size) <<
1563 			   sc->bce_flash_info->page_bits) +
1564 			  (offset % sc->bce_flash_info->page_size);
1565 	}
1566 
1567 	/*
1568 	 * Clear the DONE bit separately, set the address to read,
1569 	 * and issue the read.
1570 	 */
1571 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1572 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1573 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1574 
1575 	/* Wait for completion. */
1576 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1577 		u32 val;
1578 
1579 		DELAY(5);
1580 
1581 		val = REG_RD(sc, BCE_NVM_COMMAND);
1582 		if (val & BCE_NVM_COMMAND_DONE) {
1583 			val = REG_RD(sc, BCE_NVM_READ);
1584 
1585 			val = bce_be32toh(val);
1586 			memcpy(ret_val, &val, 4);
1587 			break;
1588 		}
1589 	}
1590 
1591 	/* Check for errors. */
1592 	if (i >= NVRAM_TIMEOUT_COUNT) {
1593 		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1594 			__FILE__, __LINE__, offset);
1595 		rc = EBUSY;
1596 	}
1597 
1598 	return(rc);
1599 }
1600 
1601 
1602 #ifdef BCE_NVRAM_WRITE_SUPPORT
1603 /****************************************************************************/
1604 /* Write a dword (32 bits) to NVRAM.                                        */
1605 /*                                                                          */
1606 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1607 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1608 /* enabled NVRAM write access.                                              */
1609 /*                                                                          */
1610 /* Returns:                                                                 */
1611 /*   0 on success, positive value on failure.                               */
1612 /****************************************************************************/
1613 static int
1614 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1615 	u32 cmd_flags)
1616 {
1617 	u32 cmd, val32;
1618 	int j;
1619 
1620 	/* Build the command word. */
1621 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1622 
1623 	/* Calculate the offset for buffered flash. */
1624 	if (sc->bce_flash_info->buffered) {
1625 		offset = ((offset / sc->bce_flash_info->page_size) <<
1626 			  sc->bce_flash_info->page_bits) +
1627 			 (offset % sc->bce_flash_info->page_size);
1628 	}
1629 
1630 	/*
1631 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1632 	 * set the NVRAM address to write, and issue the write command
1633 	 */
1634 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1635 	memcpy(&val32, val, 4);
1636 	val32 = htobe32(val32);
1637 	REG_WR(sc, BCE_NVM_WRITE, val32);
1638 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1639 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1640 
1641 	/* Wait for completion. */
1642 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1643 		DELAY(5);
1644 
1645 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1646 			break;
1647 	}
1648 	if (j >= NVRAM_TIMEOUT_COUNT) {
1649 		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1650 			__FILE__, __LINE__, offset);
1651 		return EBUSY;
1652 	}
1653 
1654 	return 0;
1655 }
1656 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1657 
1658 
1659 /****************************************************************************/
1660 /* Initialize NVRAM access.                                                 */
1661 /*                                                                          */
1662 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1663 /* access that device.                                                      */
1664 /*                                                                          */
1665 /* Returns:                                                                 */
1666 /*   0 on success, positive value on failure.                               */
1667 /****************************************************************************/
1668 static int
1669 bce_init_nvram(struct bce_softc *sc)
1670 {
1671 	u32 val;
1672 	int j, entry_count, rc;
1673 	struct flash_spec *flash;
1674 
1675 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__);
1676 
1677 	/* Determine the selected interface. */
1678 	val = REG_RD(sc, BCE_NVM_CFG1);
1679 
1680 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1681 
1682 	rc = 0;
1683 
1684 	/*
1685 	 * Flash reconfiguration is required to support additional
1686 	 * NVRAM devices not directly supported in hardware.
1687 	 * Check if the flash interface was reconfigured
1688 	 * by the bootcode.
1689 	 */
1690 
1691 	if (val & 0x40000000) {
1692 		/* Flash interface reconfigured by bootcode. */
1693 
1694 		DBPRINT(sc,BCE_INFO_LOAD,
1695 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1696 
1697 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1698 		     j++, flash++) {
1699 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1700 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1701 				sc->bce_flash_info = flash;
1702 				break;
1703 			}
1704 		}
1705 	} else {
1706 		/* Flash interface not yet reconfigured. */
1707 		u32 mask;
1708 
1709 		DBPRINT(sc,BCE_INFO_LOAD,
1710 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1711 
1712 		if (val & (1 << 23))
1713 			mask = FLASH_BACKUP_STRAP_MASK;
1714 		else
1715 			mask = FLASH_STRAP_MASK;
1716 
1717 		/* Look for the matching NVRAM device configuration data. */
1718 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1719 
1720 			/* Check if the device matches any of the known devices. */
1721 			if ((val & mask) == (flash->strapping & mask)) {
1722 				/* Found a device match. */
1723 				sc->bce_flash_info = flash;
1724 
1725 				/* Request access to the flash interface. */
1726 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1727 					return rc;
1728 
1729 				/* Reconfigure the flash interface. */
1730 				bce_enable_nvram_access(sc);
1731 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1732 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1733 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1734 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1735 				bce_disable_nvram_access(sc);
1736 				bce_release_nvram_lock(sc);
1737 
1738 				break;
1739 			}
1740 		}
1741 	}
1742 
1743 	/* Check if a matching device was found. */
1744 	if (j == entry_count) {
1745 		sc->bce_flash_info = NULL;
1746 		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
1747 			__FILE__, __LINE__);
1748 		rc = ENODEV;
1749 	}
1750 
1751 	/* Write the flash config data to the shared memory interface. */
1752 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1753 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1754 	if (val)
1755 		sc->bce_flash_size = val;
1756 	else
1757 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1758 
1759 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1760 		sc->bce_flash_info->total_size);
1761 
1762 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__);
1763 
1764 	return rc;
1765 }
1766 
1767 
1768 /****************************************************************************/
1769 /* Read an arbitrary range of data from NVRAM.                              */
1770 /*                                                                          */
1771 /* Prepares the NVRAM interface for access and reads the requested data     */
1772 /* into the supplied buffer.                                                */
1773 /*                                                                          */
1774 /* Returns:                                                                 */
1775 /*   0 on success and the data read, positive value on failure.             */
1776 /****************************************************************************/
1777 static int
1778 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1779 	int buf_size)
1780 {
1781 	int rc = 0;
1782 	u32 cmd_flags, offset32, len32, extra;
1783 
1784 	if (buf_size == 0)
1785 		return 0;
1786 
1787 	/* Request access to the flash interface. */
1788 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1789 		return rc;
1790 
1791 	/* Enable access to flash interface */
1792 	bce_enable_nvram_access(sc);
1793 
1794 	len32 = buf_size;
1795 	offset32 = offset;
1796 	extra = 0;
1797 
1798 	cmd_flags = 0;
1799 
1800 	if (offset32 & 3) {
1801 		u8 buf[4];
1802 		u32 pre_len;
1803 
1804 		offset32 &= ~3;
1805 		pre_len = 4 - (offset & 3);
1806 
1807 		if (pre_len >= len32) {
1808 			pre_len = len32;
1809 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1810 		}
1811 		else {
1812 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1813 		}
1814 
1815 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1816 
1817 		if (rc)
1818 			return rc;
1819 
1820 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1821 
1822 		offset32 += 4;
1823 		ret_buf += pre_len;
1824 		len32 -= pre_len;
1825 	}
1826 
1827 	if (len32 & 3) {
1828 		extra = 4 - (len32 & 3);
1829 		len32 = (len32 + 4) & ~3;
1830 	}
1831 
1832 	if (len32 == 4) {
1833 		u8 buf[4];
1834 
1835 		if (cmd_flags)
1836 			cmd_flags = BCE_NVM_COMMAND_LAST;
1837 		else
1838 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1839 				    BCE_NVM_COMMAND_LAST;
1840 
1841 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1842 
1843 		memcpy(ret_buf, buf, 4 - extra);
1844 	}
1845 	else if (len32 > 0) {
1846 		u8 buf[4];
1847 
1848 		/* Read the first word. */
1849 		if (cmd_flags)
1850 			cmd_flags = 0;
1851 		else
1852 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1853 
1854 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1855 
1856 		/* Advance to the next dword. */
1857 		offset32 += 4;
1858 		ret_buf += 4;
1859 		len32 -= 4;
1860 
1861 		while (len32 > 4 && rc == 0) {
1862 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1863 
1864 			/* Advance to the next dword. */
1865 			offset32 += 4;
1866 			ret_buf += 4;
1867 			len32 -= 4;
1868 		}
1869 
1870 		if (rc)
1871 			return rc;
1872 
1873 		cmd_flags = BCE_NVM_COMMAND_LAST;
1874 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1875 
1876 		memcpy(ret_buf, buf, 4 - extra);
1877 	}
1878 
1879 	/* Disable access to flash interface and release the lock. */
1880 	bce_disable_nvram_access(sc);
1881 	bce_release_nvram_lock(sc);
1882 
1883 	return rc;
1884 }
1885 
1886 
1887 #ifdef BCE_NVRAM_WRITE_SUPPORT
1888 /****************************************************************************/
1889 /* Write an arbitrary range of data from NVRAM.                             */
1890 /*                                                                          */
1891 /* Prepares the NVRAM interface for write access and writes the requested   */
1892 /* data from the supplied buffer.  The caller is responsible for            */
1893 /* calculating any appropriate CRCs.                                        */
1894 /*                                                                          */
1895 /* Returns:                                                                 */
1896 /*   0 on success, positive value on failure.                               */
1897 /****************************************************************************/
1898 static int
1899 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1900 	int buf_size)
1901 {
1902 	u32 written, offset32, len32;
1903 	u8 *buf, start[4], end[4];
1904 	int rc = 0;
1905 	int align_start, align_end;
1906 
1907 	buf = data_buf;
1908 	offset32 = offset;
1909 	len32 = buf_size;
1910 	align_start = align_end = 0;
1911 
1912 	if ((align_start = (offset32 & 3))) {
1913 		offset32 &= ~3;
1914 		len32 += align_start;
1915 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1916 			return rc;
1917 	}
1918 
1919 	if (len32 & 3) {
1920 	       	if ((len32 > 4) || !align_start) {
1921 			align_end = 4 - (len32 & 3);
1922 			len32 += align_end;
1923 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1924 				end, 4))) {
1925 				return rc;
1926 			}
1927 		}
1928 	}
1929 
1930 	if (align_start || align_end) {
1931 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1932 		if (buf == 0)
1933 			return ENOMEM;
1934 		if (align_start) {
1935 			memcpy(buf, start, 4);
1936 		}
1937 		if (align_end) {
1938 			memcpy(buf + len32 - 4, end, 4);
1939 		}
1940 		memcpy(buf + align_start, data_buf, buf_size);
1941 	}
1942 
1943 	written = 0;
1944 	while ((written < len32) && (rc == 0)) {
1945 		u32 page_start, page_end, data_start, data_end;
1946 		u32 addr, cmd_flags;
1947 		int i;
1948 		u8 flash_buffer[264];
1949 
1950 	    /* Find the page_start addr */
1951 		page_start = offset32 + written;
1952 		page_start -= (page_start % sc->bce_flash_info->page_size);
1953 		/* Find the page_end addr */
1954 		page_end = page_start + sc->bce_flash_info->page_size;
1955 		/* Find the data_start addr */
1956 		data_start = (written == 0) ? offset32 : page_start;
1957 		/* Find the data_end addr */
1958 		data_end = (page_end > offset32 + len32) ?
1959 			(offset32 + len32) : page_end;
1960 
1961 		/* Request access to the flash interface. */
1962 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1963 			goto nvram_write_end;
1964 
1965 		/* Enable access to flash interface */
1966 		bce_enable_nvram_access(sc);
1967 
1968 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1969 		if (sc->bce_flash_info->buffered == 0) {
1970 			int j;
1971 
1972 			/* Read the whole page into the buffer
1973 			 * (non-buffer flash only) */
1974 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1975 				if (j == (sc->bce_flash_info->page_size - 4)) {
1976 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1977 				}
1978 				rc = bce_nvram_read_dword(sc,
1979 					page_start + j,
1980 					&flash_buffer[j],
1981 					cmd_flags);
1982 
1983 				if (rc)
1984 					goto nvram_write_end;
1985 
1986 				cmd_flags = 0;
1987 			}
1988 		}
1989 
1990 		/* Enable writes to flash interface (unlock write-protect) */
1991 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1992 			goto nvram_write_end;
1993 
1994 		/* Erase the page */
1995 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1996 			goto nvram_write_end;
1997 
1998 		/* Re-enable the write again for the actual write */
1999 		bce_enable_nvram_write(sc);
2000 
2001 		/* Loop to write back the buffer data from page_start to
2002 		 * data_start */
2003 		i = 0;
2004 		if (sc->bce_flash_info->buffered == 0) {
2005 			for (addr = page_start; addr < data_start;
2006 				addr += 4, i += 4) {
2007 
2008 				rc = bce_nvram_write_dword(sc, addr,
2009 					&flash_buffer[i], cmd_flags);
2010 
2011 				if (rc != 0)
2012 					goto nvram_write_end;
2013 
2014 				cmd_flags = 0;
2015 			}
2016 		}
2017 
2018 		/* Loop to write the new data from data_start to data_end */
2019 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2020 			if ((addr == page_end - 4) ||
2021 				((sc->bce_flash_info->buffered) &&
2022 				 (addr == data_end - 4))) {
2023 
2024 				cmd_flags |= BCE_NVM_COMMAND_LAST;
2025 			}
2026 			rc = bce_nvram_write_dword(sc, addr, buf,
2027 				cmd_flags);
2028 
2029 			if (rc != 0)
2030 				goto nvram_write_end;
2031 
2032 			cmd_flags = 0;
2033 			buf += 4;
2034 		}
2035 
2036 		/* Loop to write back the buffer data from data_end
2037 		 * to page_end */
2038 		if (sc->bce_flash_info->buffered == 0) {
2039 			for (addr = data_end; addr < page_end;
2040 				addr += 4, i += 4) {
2041 
2042 				if (addr == page_end-4) {
2043 					cmd_flags = BCE_NVM_COMMAND_LAST;
2044                 		}
2045 				rc = bce_nvram_write_dword(sc, addr,
2046 					&flash_buffer[i], cmd_flags);
2047 
2048 				if (rc != 0)
2049 					goto nvram_write_end;
2050 
2051 				cmd_flags = 0;
2052 			}
2053 		}
2054 
2055 		/* Disable writes to flash interface (lock write-protect) */
2056 		bce_disable_nvram_write(sc);
2057 
2058 		/* Disable access to flash interface */
2059 		bce_disable_nvram_access(sc);
2060 		bce_release_nvram_lock(sc);
2061 
2062 		/* Increment written */
2063 		written += data_end - data_start;
2064 	}
2065 
2066 nvram_write_end:
2067 	if (align_start || align_end)
2068 		free(buf, M_DEVBUF);
2069 
2070 	return rc;
2071 }
2072 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2073 
2074 
2075 /****************************************************************************/
2076 /* Verifies that NVRAM is accessible and contains valid data.               */
2077 /*                                                                          */
2078 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2079 /* correct.                                                                 */
2080 /*                                                                          */
2081 /* Returns:                                                                 */
2082 /*   0 on success, positive value on failure.                               */
2083 /****************************************************************************/
2084 static int
2085 bce_nvram_test(struct bce_softc *sc)
2086 {
2087 	u32 buf[BCE_NVRAM_SIZE / 4];
2088 	u8 *data = (u8 *) buf;
2089 	int rc = 0;
2090 	u32 magic, csum;
2091 
2092 
2093 	/*
2094 	 * Check that the device NVRAM is valid by reading
2095 	 * the magic value at offset 0.
2096 	 */
2097 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
2098 		goto bce_nvram_test_done;
2099 
2100 
2101     magic = bce_be32toh(buf[0]);
2102 	if (magic != BCE_NVRAM_MAGIC) {
2103 		rc = ENODEV;
2104 		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2105 			"Found: 0x%08X\n",
2106 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2107 		goto bce_nvram_test_done;
2108 	}
2109 
2110 	/*
2111 	 * Verify that the device NVRAM includes valid
2112 	 * configuration data.
2113 	 */
2114 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
2115 		goto bce_nvram_test_done;
2116 
2117 	csum = ether_crc32_le(data, 0x100);
2118 	if (csum != BCE_CRC32_RESIDUAL) {
2119 		rc = ENODEV;
2120 		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2121 			"Expected: 0x%08X, Found: 0x%08X\n",
2122 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2123 		goto bce_nvram_test_done;
2124 	}
2125 
2126 	csum = ether_crc32_le(data + 0x100, 0x100);
2127 	if (csum != BCE_CRC32_RESIDUAL) {
2128 		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2129 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2130 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2131 		rc = ENODEV;
2132 	}
2133 
2134 bce_nvram_test_done:
2135 	return rc;
2136 }
2137 
2138 
2139 /****************************************************************************/
2140 /* Free any DMA memory owned by the driver.                                 */
2141 /*                                                                          */
2142 /* Scans through each data structre that requires DMA memory and frees      */
2143 /* the memory if allocated.                                                 */
2144 /*                                                                          */
2145 /* Returns:                                                                 */
2146 /*   Nothing.                                                               */
2147 /****************************************************************************/
2148 static void
2149 bce_dma_free(struct bce_softc *sc)
2150 {
2151 	int i;
2152 
2153 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2154 
2155 	/* Destroy the status block. */
2156 	if (sc->status_block != NULL) {
2157 		bus_dmamem_free(
2158 			sc->status_tag,
2159 		    sc->status_block,
2160 		    sc->status_map);
2161 		sc->status_block = NULL;
2162 	}
2163 
2164 	if (sc->status_map != NULL) {
2165 		bus_dmamap_unload(
2166 			sc->status_tag,
2167 		    sc->status_map);
2168 		bus_dmamap_destroy(sc->status_tag,
2169 		    sc->status_map);
2170 		sc->status_map = NULL;
2171 	}
2172 
2173 	if (sc->status_tag != NULL) {
2174 		bus_dma_tag_destroy(sc->status_tag);
2175 		sc->status_tag = NULL;
2176 	}
2177 
2178 
2179 	/* Destroy the statistics block. */
2180 	if (sc->stats_block != NULL) {
2181 		bus_dmamem_free(
2182 			sc->stats_tag,
2183 		    sc->stats_block,
2184 		    sc->stats_map);
2185 		sc->stats_block = NULL;
2186 	}
2187 
2188 	if (sc->stats_map != NULL) {
2189 		bus_dmamap_unload(
2190 			sc->stats_tag,
2191 		    sc->stats_map);
2192 		bus_dmamap_destroy(sc->stats_tag,
2193 		    sc->stats_map);
2194 		sc->stats_map = NULL;
2195 	}
2196 
2197 	if (sc->stats_tag != NULL) {
2198 		bus_dma_tag_destroy(sc->stats_tag);
2199 		sc->stats_tag = NULL;
2200 	}
2201 
2202 
2203 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2204 	for (i = 0; i < TX_PAGES; i++ ) {
2205 		if (sc->tx_bd_chain[i] != NULL) {
2206 			bus_dmamem_free(
2207 				sc->tx_bd_chain_tag,
2208 			    sc->tx_bd_chain[i],
2209 			    sc->tx_bd_chain_map[i]);
2210 			sc->tx_bd_chain[i] = NULL;
2211 		}
2212 
2213 		if (sc->tx_bd_chain_map[i] != NULL) {
2214 			bus_dmamap_unload(
2215 				sc->tx_bd_chain_tag,
2216 		    	sc->tx_bd_chain_map[i]);
2217 			bus_dmamap_destroy(
2218 				sc->tx_bd_chain_tag,
2219 			    sc->tx_bd_chain_map[i]);
2220 			sc->tx_bd_chain_map[i] = NULL;
2221 		}
2222 	}
2223 
2224 	/* Destroy the TX buffer descriptor tag. */
2225 	if (sc->tx_bd_chain_tag != NULL) {
2226 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2227 		sc->tx_bd_chain_tag = NULL;
2228 	}
2229 
2230 
2231 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2232 	for (i = 0; i < RX_PAGES; i++ ) {
2233 		if (sc->rx_bd_chain[i] != NULL) {
2234 			bus_dmamem_free(
2235 				sc->rx_bd_chain_tag,
2236 			    sc->rx_bd_chain[i],
2237 			    sc->rx_bd_chain_map[i]);
2238 			sc->rx_bd_chain[i] = NULL;
2239 		}
2240 
2241 		if (sc->rx_bd_chain_map[i] != NULL) {
2242 			bus_dmamap_unload(
2243 				sc->rx_bd_chain_tag,
2244 		    	sc->rx_bd_chain_map[i]);
2245 			bus_dmamap_destroy(
2246 				sc->rx_bd_chain_tag,
2247 			    sc->rx_bd_chain_map[i]);
2248 			sc->rx_bd_chain_map[i] = NULL;
2249 		}
2250 	}
2251 
2252 	/* Destroy the RX buffer descriptor tag. */
2253 	if (sc->rx_bd_chain_tag != NULL) {
2254 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2255 		sc->rx_bd_chain_tag = NULL;
2256 	}
2257 
2258 
2259 	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2260 	for (i = 0; i < PG_PAGES; i++ ) {
2261 		if (sc->pg_bd_chain[i] != NULL) {
2262 			bus_dmamem_free(
2263 				sc->pg_bd_chain_tag,
2264 			    sc->pg_bd_chain[i],
2265 			    sc->pg_bd_chain_map[i]);
2266 			sc->pg_bd_chain[i] = NULL;
2267 		}
2268 
2269 		if (sc->pg_bd_chain_map[i] != NULL) {
2270 			bus_dmamap_unload(
2271 				sc->pg_bd_chain_tag,
2272 		    	sc->pg_bd_chain_map[i]);
2273 			bus_dmamap_destroy(
2274 				sc->pg_bd_chain_tag,
2275 			    sc->pg_bd_chain_map[i]);
2276 			sc->pg_bd_chain_map[i] = NULL;
2277 		}
2278 	}
2279 
2280 	/* Destroy the page buffer descriptor tag. */
2281 	if (sc->pg_bd_chain_tag != NULL) {
2282 		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2283 		sc->pg_bd_chain_tag = NULL;
2284 	}
2285 
2286 
2287 	/* Unload and destroy the TX mbuf maps. */
2288 	for (i = 0; i < TOTAL_TX_BD; i++) {
2289 		if (sc->tx_mbuf_map[i] != NULL) {
2290 			bus_dmamap_unload(sc->tx_mbuf_tag,
2291 				sc->tx_mbuf_map[i]);
2292 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2293 	 			sc->tx_mbuf_map[i]);
2294 			sc->tx_mbuf_map[i] = NULL;
2295 		}
2296 	}
2297 
2298 	/* Destroy the TX mbuf tag. */
2299 	if (sc->tx_mbuf_tag != NULL) {
2300 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2301 		sc->tx_mbuf_tag = NULL;
2302 	}
2303 
2304 	/* Unload and destroy the RX mbuf maps. */
2305 	for (i = 0; i < TOTAL_RX_BD; i++) {
2306 		if (sc->rx_mbuf_map[i] != NULL) {
2307 			bus_dmamap_unload(sc->rx_mbuf_tag,
2308 				sc->rx_mbuf_map[i]);
2309 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2310 	 			sc->rx_mbuf_map[i]);
2311 			sc->rx_mbuf_map[i] = NULL;
2312 		}
2313 	}
2314 
2315 	/* Destroy the RX mbuf tag. */
2316 	if (sc->rx_mbuf_tag != NULL) {
2317 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2318 		sc->rx_mbuf_tag = NULL;
2319 	}
2320 
2321 	/* Unload and destroy the page mbuf maps. */
2322 	for (i = 0; i < TOTAL_PG_BD; i++) {
2323 		if (sc->pg_mbuf_map[i] != NULL) {
2324 			bus_dmamap_unload(sc->pg_mbuf_tag,
2325 				sc->pg_mbuf_map[i]);
2326 			bus_dmamap_destroy(sc->pg_mbuf_tag,
2327 	 			sc->pg_mbuf_map[i]);
2328 			sc->pg_mbuf_map[i] = NULL;
2329 		}
2330 	}
2331 
2332 	/* Destroy the page mbuf tag. */
2333 	if (sc->pg_mbuf_tag != NULL) {
2334 		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2335 		sc->pg_mbuf_tag = NULL;
2336 	}
2337 
2338 	/* Destroy the parent tag */
2339 	if (sc->parent_tag != NULL) {
2340 		bus_dma_tag_destroy(sc->parent_tag);
2341 		sc->parent_tag = NULL;
2342 	}
2343 
2344 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2345 
2346 }
2347 
2348 
2349 /****************************************************************************/
2350 /* Get DMA memory from the OS.                                              */
2351 /*                                                                          */
2352 /* Validates that the OS has provided DMA buffers in response to a          */
2353 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2354 /* When the callback is used the OS will return 0 for the mapping function  */
2355 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2356 /* failures back to the caller.                                             */
2357 /*                                                                          */
2358 /* Returns:                                                                 */
2359 /*   Nothing.                                                               */
2360 /****************************************************************************/
2361 static void
2362 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2363 {
2364 	bus_addr_t *busaddr = arg;
2365 
2366 	/* Simulate a mapping failure. */
2367 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2368 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2369 			__FILE__, __LINE__);
2370 		error = ENOMEM);
2371 
2372 	/* Check for an error and signal the caller that an error occurred. */
2373 	if (error) {
2374 		printf("bce %s(%d): DMA mapping error! error = %d, "
2375 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2376 		*busaddr = 0;
2377 		return;
2378 	}
2379 
2380 	*busaddr = segs->ds_addr;
2381 	return;
2382 }
2383 
2384 
2385 /****************************************************************************/
2386 /* Allocate any DMA memory needed by the driver.                            */
2387 /*                                                                          */
2388 /* Allocates DMA memory needed for the various global structures needed by  */
2389 /* hardware.                                                                */
2390 /*                                                                          */
2391 /* Returns:                                                                 */
2392 /*   0 for success, positive value for failure.                             */
2393 /****************************************************************************/
2394 static int
2395 bce_dma_alloc(device_t dev)
2396 {
2397 	struct bce_softc *sc;
2398 	int i, error, rc = 0;
2399 	bus_addr_t busaddr;
2400 	bus_size_t max_size, max_seg_size;
2401 	int max_segments;
2402 
2403 	sc = device_get_softc(dev);
2404 
2405 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2406 
2407 	/*
2408 	 * Allocate the parent bus DMA tag appropriate for PCI.
2409 	 */
2410 	if (bus_dma_tag_create(NULL,
2411 			1,
2412 			BCE_DMA_BOUNDARY,
2413 			sc->max_bus_addr,
2414 			BUS_SPACE_MAXADDR,
2415 			NULL, NULL,
2416 			MAXBSIZE,
2417 			BUS_SPACE_UNRESTRICTED,
2418 			BUS_SPACE_MAXSIZE_32BIT,
2419 			0,
2420 			NULL, NULL,
2421 			&sc->parent_tag)) {
2422 		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2423 			__FILE__, __LINE__);
2424 		rc = ENOMEM;
2425 		goto bce_dma_alloc_exit;
2426 	}
2427 
2428 	/*
2429 	 * Create a DMA tag for the status block, allocate and clear the
2430 	 * memory, map the memory into DMA space, and fetch the physical
2431 	 * address of the block.
2432 	 */
2433 	if (bus_dma_tag_create(sc->parent_tag,
2434 	    	BCE_DMA_ALIGN,
2435 	    	BCE_DMA_BOUNDARY,
2436 	    	sc->max_bus_addr,
2437 	    	BUS_SPACE_MAXADDR,
2438 	    	NULL, NULL,
2439 	    	BCE_STATUS_BLK_SZ,
2440 	    	1,
2441 	    	BCE_STATUS_BLK_SZ,
2442 	    	0,
2443 	    	NULL, NULL,
2444 	    	&sc->status_tag)) {
2445 		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2446 			__FILE__, __LINE__);
2447 		rc = ENOMEM;
2448 		goto bce_dma_alloc_exit;
2449 	}
2450 
2451 	if(bus_dmamem_alloc(sc->status_tag,
2452 	    	(void **)&sc->status_block,
2453 	    	BUS_DMA_NOWAIT,
2454 	    	&sc->status_map)) {
2455 		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2456 			__FILE__, __LINE__);
2457 		rc = ENOMEM;
2458 		goto bce_dma_alloc_exit;
2459 	}
2460 
2461 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2462 
2463 	error = bus_dmamap_load(sc->status_tag,
2464 	    	sc->status_map,
2465 	    	sc->status_block,
2466 	    	BCE_STATUS_BLK_SZ,
2467 	    	bce_dma_map_addr,
2468 	    	&busaddr,
2469 	    	BUS_DMA_NOWAIT);
2470 
2471 	if (error) {
2472 		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2473 			__FILE__, __LINE__);
2474 		rc = ENOMEM;
2475 		goto bce_dma_alloc_exit;
2476 	}
2477 
2478 	sc->status_block_paddr = busaddr;
2479 	/* DRC - Fix for 64 bit addresses. */
2480 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2481 		(u32) sc->status_block_paddr);
2482 
2483 	/*
2484 	 * Create a DMA tag for the statistics block, allocate and clear the
2485 	 * memory, map the memory into DMA space, and fetch the physical
2486 	 * address of the block.
2487 	 */
2488 	if (bus_dma_tag_create(sc->parent_tag,
2489 	    	BCE_DMA_ALIGN,
2490 	    	BCE_DMA_BOUNDARY,
2491 	    	sc->max_bus_addr,
2492 	    	BUS_SPACE_MAXADDR,
2493 	    	NULL, NULL,
2494 	    	BCE_STATS_BLK_SZ,
2495 	    	1,
2496 	    	BCE_STATS_BLK_SZ,
2497 	    	0,
2498 	    	NULL, NULL,
2499 	    	&sc->stats_tag)) {
2500 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2501 			__FILE__, __LINE__);
2502 		rc = ENOMEM;
2503 		goto bce_dma_alloc_exit;
2504 	}
2505 
2506 	if (bus_dmamem_alloc(sc->stats_tag,
2507 	    	(void **)&sc->stats_block,
2508 	    	BUS_DMA_NOWAIT,
2509 	    	&sc->stats_map)) {
2510 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
2511 			__FILE__, __LINE__);
2512 		rc = ENOMEM;
2513 		goto bce_dma_alloc_exit;
2514 	}
2515 
2516 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2517 
2518 	error = bus_dmamap_load(sc->stats_tag,
2519 	    	sc->stats_map,
2520 	    	sc->stats_block,
2521 	    	BCE_STATS_BLK_SZ,
2522 	    	bce_dma_map_addr,
2523 	    	&busaddr,
2524 	    	BUS_DMA_NOWAIT);
2525 
2526 	if(error) {
2527 		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
2528 			__FILE__, __LINE__);
2529 		rc = ENOMEM;
2530 		goto bce_dma_alloc_exit;
2531 	}
2532 
2533 	sc->stats_block_paddr = busaddr;
2534 	/* DRC - Fix for 64 bit address. */
2535 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2536 		(u32) sc->stats_block_paddr);
2537 
2538 	/*
2539 	 * Create a DMA tag for the TX buffer descriptor chain,
2540 	 * allocate and clear the  memory, and fetch the
2541 	 * physical address of the block.
2542 	 */
2543 	if(bus_dma_tag_create(sc->parent_tag,
2544 			BCM_PAGE_SIZE,
2545 		    BCE_DMA_BOUNDARY,
2546 			sc->max_bus_addr,
2547 			BUS_SPACE_MAXADDR,
2548 			NULL, NULL,
2549 			BCE_TX_CHAIN_PAGE_SZ,
2550 			1,
2551 			BCE_TX_CHAIN_PAGE_SZ,
2552 			0,
2553 			NULL, NULL,
2554 			&sc->tx_bd_chain_tag)) {
2555 		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2556 			__FILE__, __LINE__);
2557 		rc = ENOMEM;
2558 		goto bce_dma_alloc_exit;
2559 	}
2560 
2561 	for (i = 0; i < TX_PAGES; i++) {
2562 
2563 		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
2564 	    		(void **)&sc->tx_bd_chain[i],
2565 	    		BUS_DMA_NOWAIT,
2566 		    	&sc->tx_bd_chain_map[i])) {
2567 			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
2568 				"chain DMA memory!\n", __FILE__, __LINE__);
2569 			rc = ENOMEM;
2570 			goto bce_dma_alloc_exit;
2571 		}
2572 
2573 		error = bus_dmamap_load(sc->tx_bd_chain_tag,
2574 	    		sc->tx_bd_chain_map[i],
2575 	    		sc->tx_bd_chain[i],
2576 		    	BCE_TX_CHAIN_PAGE_SZ,
2577 		    	bce_dma_map_addr,
2578 	    		&busaddr,
2579 	    		BUS_DMA_NOWAIT);
2580 
2581 		if (error) {
2582 			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
2583 				__FILE__, __LINE__);
2584 			rc = ENOMEM;
2585 			goto bce_dma_alloc_exit;
2586 		}
2587 
2588 		sc->tx_bd_chain_paddr[i] = busaddr;
2589 		/* DRC - Fix for 64 bit systems. */
2590 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2591 			i, (u32) sc->tx_bd_chain_paddr[i]);
2592 	}
2593 
2594 	/* Check the required size before mapping to conserve resources. */
2595 	if (bce_tso_enable) {
2596 		max_size     = BCE_TSO_MAX_SIZE;
2597 		max_segments = BCE_MAX_SEGMENTS;
2598 		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
2599 	} else {
2600 		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
2601 		max_segments = BCE_MAX_SEGMENTS;
2602 		max_seg_size = MCLBYTES;
2603 	}
2604 
2605 	/* Create a DMA tag for TX mbufs. */
2606 	if (bus_dma_tag_create(sc->parent_tag,
2607 			1,
2608 			BCE_DMA_BOUNDARY,
2609 			sc->max_bus_addr,
2610 			BUS_SPACE_MAXADDR,
2611 			NULL, NULL,
2612 			max_size,
2613 			max_segments,
2614 			max_seg_size,
2615 			0,
2616 			NULL, NULL,
2617 			&sc->tx_mbuf_tag)) {
2618 		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
2619 			__FILE__, __LINE__);
2620 		rc = ENOMEM;
2621 		goto bce_dma_alloc_exit;
2622 	}
2623 
2624 	/* Create DMA maps for the TX mbufs clusters. */
2625 	for (i = 0; i < TOTAL_TX_BD; i++) {
2626 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2627 			&sc->tx_mbuf_map[i])) {
2628 			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
2629 				__FILE__, __LINE__);
2630 			rc = ENOMEM;
2631 			goto bce_dma_alloc_exit;
2632 		}
2633 	}
2634 
2635 	/*
2636 	 * Create a DMA tag for the RX buffer descriptor chain,
2637 	 * allocate and clear the memory, and fetch the physical
2638 	 * address of the blocks.
2639 	 */
2640 	if (bus_dma_tag_create(sc->parent_tag,
2641 			BCM_PAGE_SIZE,
2642 			BCE_DMA_BOUNDARY,
2643 			BUS_SPACE_MAXADDR,
2644 			sc->max_bus_addr,
2645 			NULL, NULL,
2646 			BCE_RX_CHAIN_PAGE_SZ,
2647 			1,
2648 			BCE_RX_CHAIN_PAGE_SZ,
2649 			0,
2650 			NULL, NULL,
2651 			&sc->rx_bd_chain_tag)) {
2652 		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2653 			__FILE__, __LINE__);
2654 		rc = ENOMEM;
2655 		goto bce_dma_alloc_exit;
2656 	}
2657 
2658 	for (i = 0; i < RX_PAGES; i++) {
2659 
2660 		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
2661 	    		(void **)&sc->rx_bd_chain[i],
2662 	    		BUS_DMA_NOWAIT,
2663 		    	&sc->rx_bd_chain_map[i])) {
2664 			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
2665 				"DMA memory!\n", __FILE__, __LINE__);
2666 			rc = ENOMEM;
2667 			goto bce_dma_alloc_exit;
2668 		}
2669 
2670 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2671 
2672 		error = bus_dmamap_load(sc->rx_bd_chain_tag,
2673 	    		sc->rx_bd_chain_map[i],
2674 	    		sc->rx_bd_chain[i],
2675 		    	BCE_RX_CHAIN_PAGE_SZ,
2676 		    	bce_dma_map_addr,
2677 	    		&busaddr,
2678 	    		BUS_DMA_NOWAIT);
2679 
2680 		if (error) {
2681 			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
2682 				__FILE__, __LINE__);
2683 			rc = ENOMEM;
2684 			goto bce_dma_alloc_exit;
2685 		}
2686 
2687 		sc->rx_bd_chain_paddr[i] = busaddr;
2688 		/* DRC - Fix for 64 bit systems. */
2689 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2690 			i, (u32) sc->rx_bd_chain_paddr[i]);
2691 	}
2692 
2693 	/*
2694 	 * Create a DMA tag for RX mbufs.
2695 	 */
2696 	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
2697 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2698 
2699 	if (bus_dma_tag_create(sc->parent_tag,
2700 			1,
2701 			BCE_DMA_BOUNDARY,
2702 			sc->max_bus_addr,
2703 			BUS_SPACE_MAXADDR,
2704 			NULL, NULL,
2705 			max_size,
2706 			1,
2707 			max_seg_size,
2708 			0,
2709 			NULL, NULL,
2710 	    	&sc->rx_mbuf_tag)) {
2711 		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
2712 			__FILE__, __LINE__);
2713 		rc = ENOMEM;
2714 		goto bce_dma_alloc_exit;
2715 	}
2716 
2717 	/* Create DMA maps for the RX mbuf clusters. */
2718 	for (i = 0; i < TOTAL_RX_BD; i++) {
2719 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2720 				&sc->rx_mbuf_map[i])) {
2721 			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
2722 				__FILE__, __LINE__);
2723 			rc = ENOMEM;
2724 			goto bce_dma_alloc_exit;
2725 		}
2726 	}
2727 
2728 	/*
2729 	 * Create a DMA tag for the page buffer descriptor chain,
2730 	 * allocate and clear the memory, and fetch the physical
2731 	 * address of the blocks.
2732 	 */
2733 	if (bus_dma_tag_create(sc->parent_tag,
2734 			BCM_PAGE_SIZE,
2735 			BCE_DMA_BOUNDARY,
2736 			BUS_SPACE_MAXADDR,
2737 			sc->max_bus_addr,
2738 			NULL, NULL,
2739 			BCE_PG_CHAIN_PAGE_SZ,
2740 			1,
2741 			BCE_PG_CHAIN_PAGE_SZ,
2742 			0,
2743 			NULL, NULL,
2744 			&sc->pg_bd_chain_tag)) {
2745 		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
2746 			__FILE__, __LINE__);
2747 		rc = ENOMEM;
2748 		goto bce_dma_alloc_exit;
2749 	}
2750 
2751 	for (i = 0; i < PG_PAGES; i++) {
2752 
2753 		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
2754 	    		(void **)&sc->pg_bd_chain[i],
2755 	    		BUS_DMA_NOWAIT,
2756 		    	&sc->pg_bd_chain_map[i])) {
2757 			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
2758 				"DMA memory!\n", __FILE__, __LINE__);
2759 			rc = ENOMEM;
2760 			goto bce_dma_alloc_exit;
2761 		}
2762 
2763 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
2764 
2765 		error = bus_dmamap_load(sc->pg_bd_chain_tag,
2766 	    		sc->pg_bd_chain_map[i],
2767 	    		sc->pg_bd_chain[i],
2768 		    	BCE_PG_CHAIN_PAGE_SZ,
2769 		    	bce_dma_map_addr,
2770 	    		&busaddr,
2771 	    		BUS_DMA_NOWAIT);
2772 
2773 		if (error) {
2774 			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
2775 				__FILE__, __LINE__);
2776 			rc = ENOMEM;
2777 			goto bce_dma_alloc_exit;
2778 		}
2779 
2780 		sc->pg_bd_chain_paddr[i] = busaddr;
2781 		/* DRC - Fix for 64 bit systems. */
2782 		DBPRINT(sc, BCE_INFO, "pg_bd_chain_paddr[%d] = 0x%08X\n",
2783 			i, (u32) sc->pg_bd_chain_paddr[i]);
2784 	}
2785 
2786 	/*
2787 	 * Create a DMA tag for page mbufs.
2788 	 */
2789 	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
2790 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2791 
2792 	if (bus_dma_tag_create(sc->parent_tag,
2793 			1,
2794 			BCE_DMA_BOUNDARY,
2795 			sc->max_bus_addr,
2796 			BUS_SPACE_MAXADDR,
2797 			NULL, NULL,
2798 			max_size,
2799 			1,
2800 			max_seg_size,
2801 			0,
2802 			NULL, NULL,
2803 	    	&sc->pg_mbuf_tag)) {
2804 		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
2805 			__FILE__, __LINE__);
2806 		rc = ENOMEM;
2807 		goto bce_dma_alloc_exit;
2808 	}
2809 
2810 	/* Create DMA maps for the page mbuf clusters. */
2811 	for (i = 0; i < TOTAL_PG_BD; i++) {
2812 		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
2813 				&sc->pg_mbuf_map[i])) {
2814 			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
2815 				__FILE__, __LINE__);
2816 			rc = ENOMEM;
2817 			goto bce_dma_alloc_exit;
2818 		}
2819 	}
2820 
2821 bce_dma_alloc_exit:
2822 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2823 
2824 	return(rc);
2825 }
2826 
2827 
2828 /****************************************************************************/
2829 /* Release all resources used by the driver.                                */
2830 /*                                                                          */
2831 /* Releases all resources acquired by the driver including interrupts,      */
2832 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2833 /*                                                                          */
2834 /* Returns:                                                                 */
2835 /*   Nothing.                                                               */
2836 /****************************************************************************/
2837 static void
2838 bce_release_resources(struct bce_softc *sc)
2839 {
2840 	device_t dev;
2841 
2842 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2843 
2844 	dev = sc->bce_dev;
2845 
2846 	bce_dma_free(sc);
2847 
2848 	if (sc->bce_intrhand != NULL) {
2849 		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
2850 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
2851 	}
2852 
2853 	if (sc->bce_res_irq != NULL) {
2854 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
2855 		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2856 			sc->bce_res_irq);
2857 	}
2858 
2859 	if (sc->bce_flags & BCE_USING_MSI_FLAG) {
2860 		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n");
2861 		pci_release_msi(dev);
2862 	}
2863 
2864 	if (sc->bce_res_mem != NULL) {
2865 		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
2866 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
2867 	}
2868 
2869 	if (sc->bce_ifp != NULL) {
2870 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
2871 		if_free(sc->bce_ifp);
2872 	}
2873 
2874 	if (mtx_initialized(&sc->bce_mtx))
2875 		BCE_LOCK_DESTROY(sc);
2876 
2877 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2878 
2879 }
2880 
2881 
2882 /****************************************************************************/
2883 /* Firmware synchronization.                                                */
2884 /*                                                                          */
2885 /* Before performing certain events such as a chip reset, synchronize with  */
2886 /* the firmware first.                                                      */
2887 /*                                                                          */
2888 /* Returns:                                                                 */
2889 /*   0 for success, positive value for failure.                             */
2890 /****************************************************************************/
2891 static int
2892 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2893 {
2894 	int i, rc = 0;
2895 	u32 val;
2896 
2897 	/* Don't waste any time if we've timed out before. */
2898 	if (sc->bce_fw_timed_out) {
2899 		rc = EBUSY;
2900 		goto bce_fw_sync_exit;
2901 	}
2902 
2903 	/* Increment the message sequence number. */
2904 	sc->bce_fw_wr_seq++;
2905 	msg_data |= sc->bce_fw_wr_seq;
2906 
2907  	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2908 
2909 	/* Send the message to the bootcode driver mailbox. */
2910 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2911 
2912 	/* Wait for the bootcode to acknowledge the message. */
2913 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2914 		/* Check for a response in the bootcode firmware mailbox. */
2915 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2916 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2917 			break;
2918 		DELAY(1000);
2919 	}
2920 
2921 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2922 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2923 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2924 
2925 		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
2926 			"msg_data = 0x%08X\n",
2927 			__FILE__, __LINE__, msg_data);
2928 
2929 		msg_data &= ~BCE_DRV_MSG_CODE;
2930 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2931 
2932 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2933 
2934 		sc->bce_fw_timed_out = 1;
2935 		rc = EBUSY;
2936 	}
2937 
2938 bce_fw_sync_exit:
2939 	return (rc);
2940 }
2941 
2942 
2943 /****************************************************************************/
2944 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2945 /*                                                                          */
2946 /* Returns:                                                                 */
2947 /*   Nothing.                                                               */
2948 /****************************************************************************/
2949 static void
2950 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2951 	u32 rv2p_code_len, u32 rv2p_proc)
2952 {
2953 	int i;
2954 	u32 val;
2955 
2956 	/* Set the page size used by RV2P. */
2957 	if (rv2p_proc == RV2P_PROC2) {
2958 		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
2959 	}
2960 
2961 	for (i = 0; i < rv2p_code_len; i += 8) {
2962 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2963 		rv2p_code++;
2964 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2965 		rv2p_code++;
2966 
2967 		if (rv2p_proc == RV2P_PROC1) {
2968 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2969 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2970 		}
2971 		else {
2972 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2973 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2974 		}
2975 	}
2976 
2977 	/* Reset the processor, un-stall is done later. */
2978 	if (rv2p_proc == RV2P_PROC1) {
2979 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2980 	}
2981 	else {
2982 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2983 	}
2984 }
2985 
2986 
2987 /****************************************************************************/
2988 /* Load RISC processor firmware.                                            */
2989 /*                                                                          */
2990 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2991 /* associated with a particular processor.                                  */
2992 /*                                                                          */
2993 /* Returns:                                                                 */
2994 /*   Nothing.                                                               */
2995 /****************************************************************************/
2996 static void
2997 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2998 	struct fw_info *fw)
2999 {
3000 	u32 offset;
3001 	u32 val;
3002 
3003 	/* Halt the CPU. */
3004 	val = REG_RD_IND(sc, cpu_reg->mode);
3005 	val |= cpu_reg->mode_value_halt;
3006 	REG_WR_IND(sc, cpu_reg->mode, val);
3007 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3008 
3009 	/* Load the Text area. */
3010 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3011 	if (fw->text) {
3012 		int j;
3013 
3014 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3015 			REG_WR_IND(sc, offset, fw->text[j]);
3016 	        }
3017 	}
3018 
3019 	/* Load the Data area. */
3020 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3021 	if (fw->data) {
3022 		int j;
3023 
3024 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3025 			REG_WR_IND(sc, offset, fw->data[j]);
3026 		}
3027 	}
3028 
3029 	/* Load the SBSS area. */
3030 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3031 	if (fw->sbss) {
3032 		int j;
3033 
3034 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3035 			REG_WR_IND(sc, offset, fw->sbss[j]);
3036 		}
3037 	}
3038 
3039 	/* Load the BSS area. */
3040 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3041 	if (fw->bss) {
3042 		int j;
3043 
3044 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3045 			REG_WR_IND(sc, offset, fw->bss[j]);
3046 		}
3047 	}
3048 
3049 	/* Load the Read-Only area. */
3050 	offset = cpu_reg->spad_base +
3051 		(fw->rodata_addr - cpu_reg->mips_view_base);
3052 	if (fw->rodata) {
3053 		int j;
3054 
3055 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3056 			REG_WR_IND(sc, offset, fw->rodata[j]);
3057 		}
3058 	}
3059 
3060 	/* Clear the pre-fetch instruction. */
3061 	REG_WR_IND(sc, cpu_reg->inst, 0);
3062 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3063 
3064 	/* Start the CPU. */
3065 	val = REG_RD_IND(sc, cpu_reg->mode);
3066 	val &= ~cpu_reg->mode_value_halt;
3067 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3068 	REG_WR_IND(sc, cpu_reg->mode, val);
3069 }
3070 
3071 
3072 /****************************************************************************/
3073 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
3074 /*                                                                          */
3075 /* Loads the firmware for each CPU and starts the CPU.                      */
3076 /*                                                                          */
3077 /* Returns:                                                                 */
3078 /*   Nothing.                                                               */
3079 /****************************************************************************/
3080 static void
3081 bce_init_cpus(struct bce_softc *sc)
3082 {
3083 	struct cpu_reg cpu_reg;
3084 	struct fw_info fw;
3085 
3086 	/* Initialize the RV2P processor. */
3087 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
3088 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
3089 
3090 	/* Initialize the RX Processor. */
3091 	cpu_reg.mode = BCE_RXP_CPU_MODE;
3092 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3093 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3094 	cpu_reg.state = BCE_RXP_CPU_STATE;
3095 	cpu_reg.state_value_clear = 0xffffff;
3096 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3097 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3098 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3099 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3100 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3101 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3102 	cpu_reg.mips_view_base = 0x8000000;
3103 
3104 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
3105 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3106 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
3107 	fw.start_addr = bce_RXP_b06FwStartAddr;
3108 
3109 	fw.text_addr = bce_RXP_b06FwTextAddr;
3110 	fw.text_len = bce_RXP_b06FwTextLen;
3111 	fw.text_index = 0;
3112 	fw.text = bce_RXP_b06FwText;
3113 
3114 	fw.data_addr = bce_RXP_b06FwDataAddr;
3115 	fw.data_len = bce_RXP_b06FwDataLen;
3116 	fw.data_index = 0;
3117 	fw.data = bce_RXP_b06FwData;
3118 
3119 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3120 	fw.sbss_len = bce_RXP_b06FwSbssLen;
3121 	fw.sbss_index = 0;
3122 	fw.sbss = bce_RXP_b06FwSbss;
3123 
3124 	fw.bss_addr = bce_RXP_b06FwBssAddr;
3125 	fw.bss_len = bce_RXP_b06FwBssLen;
3126 	fw.bss_index = 0;
3127 	fw.bss = bce_RXP_b06FwBss;
3128 
3129 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3130 	fw.rodata_len = bce_RXP_b06FwRodataLen;
3131 	fw.rodata_index = 0;
3132 	fw.rodata = bce_RXP_b06FwRodata;
3133 
3134 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3135 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3136 
3137 	/* Initialize the TX Processor. */
3138 	cpu_reg.mode = BCE_TXP_CPU_MODE;
3139 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3140 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3141 	cpu_reg.state = BCE_TXP_CPU_STATE;
3142 	cpu_reg.state_value_clear = 0xffffff;
3143 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3144 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3145 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3146 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3147 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3148 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3149 	cpu_reg.mips_view_base = 0x8000000;
3150 
3151 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
3152 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3153 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
3154 	fw.start_addr = bce_TXP_b06FwStartAddr;
3155 
3156 	fw.text_addr = bce_TXP_b06FwTextAddr;
3157 	fw.text_len = bce_TXP_b06FwTextLen;
3158 	fw.text_index = 0;
3159 	fw.text = bce_TXP_b06FwText;
3160 
3161 	fw.data_addr = bce_TXP_b06FwDataAddr;
3162 	fw.data_len = bce_TXP_b06FwDataLen;
3163 	fw.data_index = 0;
3164 	fw.data = bce_TXP_b06FwData;
3165 
3166 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3167 	fw.sbss_len = bce_TXP_b06FwSbssLen;
3168 	fw.sbss_index = 0;
3169 	fw.sbss = bce_TXP_b06FwSbss;
3170 
3171 	fw.bss_addr = bce_TXP_b06FwBssAddr;
3172 	fw.bss_len = bce_TXP_b06FwBssLen;
3173 	fw.bss_index = 0;
3174 	fw.bss = bce_TXP_b06FwBss;
3175 
3176 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3177 	fw.rodata_len = bce_TXP_b06FwRodataLen;
3178 	fw.rodata_index = 0;
3179 	fw.rodata = bce_TXP_b06FwRodata;
3180 
3181 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3182 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3183 
3184 	/* Initialize the TX Patch-up Processor. */
3185 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3186 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3187 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3188 	cpu_reg.state = BCE_TPAT_CPU_STATE;
3189 	cpu_reg.state_value_clear = 0xffffff;
3190 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3191 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3192 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3193 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3194 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3195 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3196 	cpu_reg.mips_view_base = 0x8000000;
3197 
3198 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3199 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3200 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3201 	fw.start_addr = bce_TPAT_b06FwStartAddr;
3202 
3203 	fw.text_addr = bce_TPAT_b06FwTextAddr;
3204 	fw.text_len = bce_TPAT_b06FwTextLen;
3205 	fw.text_index = 0;
3206 	fw.text = bce_TPAT_b06FwText;
3207 
3208 	fw.data_addr = bce_TPAT_b06FwDataAddr;
3209 	fw.data_len = bce_TPAT_b06FwDataLen;
3210 	fw.data_index = 0;
3211 	fw.data = bce_TPAT_b06FwData;
3212 
3213 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3214 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3215 	fw.sbss_index = 0;
3216 	fw.sbss = bce_TPAT_b06FwSbss;
3217 
3218 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3219 	fw.bss_len = bce_TPAT_b06FwBssLen;
3220 	fw.bss_index = 0;
3221 	fw.bss = bce_TPAT_b06FwBss;
3222 
3223 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3224 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3225 	fw.rodata_index = 0;
3226 	fw.rodata = bce_TPAT_b06FwRodata;
3227 
3228 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3229 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3230 
3231 	/* Initialize the Completion Processor. */
3232 	cpu_reg.mode = BCE_COM_CPU_MODE;
3233 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3234 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3235 	cpu_reg.state = BCE_COM_CPU_STATE;
3236 	cpu_reg.state_value_clear = 0xffffff;
3237 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3238 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3239 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3240 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3241 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3242 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3243 	cpu_reg.mips_view_base = 0x8000000;
3244 
3245 	fw.ver_major = bce_COM_b06FwReleaseMajor;
3246 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3247 	fw.ver_fix = bce_COM_b06FwReleaseFix;
3248 	fw.start_addr = bce_COM_b06FwStartAddr;
3249 
3250 	fw.text_addr = bce_COM_b06FwTextAddr;
3251 	fw.text_len = bce_COM_b06FwTextLen;
3252 	fw.text_index = 0;
3253 	fw.text = bce_COM_b06FwText;
3254 
3255 	fw.data_addr = bce_COM_b06FwDataAddr;
3256 	fw.data_len = bce_COM_b06FwDataLen;
3257 	fw.data_index = 0;
3258 	fw.data = bce_COM_b06FwData;
3259 
3260 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3261 	fw.sbss_len = bce_COM_b06FwSbssLen;
3262 	fw.sbss_index = 0;
3263 	fw.sbss = bce_COM_b06FwSbss;
3264 
3265 	fw.bss_addr = bce_COM_b06FwBssAddr;
3266 	fw.bss_len = bce_COM_b06FwBssLen;
3267 	fw.bss_index = 0;
3268 	fw.bss = bce_COM_b06FwBss;
3269 
3270 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3271 	fw.rodata_len = bce_COM_b06FwRodataLen;
3272 	fw.rodata_index = 0;
3273 	fw.rodata = bce_COM_b06FwRodata;
3274 
3275 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3276 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3277 
3278 	/* Initialize the Command Processor. */
3279 	cpu_reg.mode = BCE_CP_CPU_MODE;
3280 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3281 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3282 	cpu_reg.state = BCE_CP_CPU_STATE;
3283 	cpu_reg.state_value_clear = 0xffffff;
3284 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3285 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3286 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3287 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3288 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3289 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3290 	cpu_reg.mips_view_base = 0x8000000;
3291 
3292 	fw.ver_major = bce_CP_b06FwReleaseMajor;
3293 	fw.ver_minor = bce_CP_b06FwReleaseMinor;
3294 	fw.ver_fix = bce_CP_b06FwReleaseFix;
3295 	fw.start_addr = bce_CP_b06FwStartAddr;
3296 
3297 	fw.text_addr = bce_CP_b06FwTextAddr;
3298 	fw.text_len = bce_CP_b06FwTextLen;
3299 	fw.text_index = 0;
3300 	fw.text = bce_CP_b06FwText;
3301 
3302 	fw.data_addr = bce_CP_b06FwDataAddr;
3303 	fw.data_len = bce_CP_b06FwDataLen;
3304 	fw.data_index = 0;
3305 	fw.data = bce_CP_b06FwData;
3306 
3307 	fw.sbss_addr = bce_CP_b06FwSbssAddr;
3308 	fw.sbss_len = bce_CP_b06FwSbssLen;
3309 	fw.sbss_index = 0;
3310 	fw.sbss = bce_CP_b06FwSbss;
3311 
3312 	fw.bss_addr = bce_CP_b06FwBssAddr;
3313 	fw.bss_len = bce_CP_b06FwBssLen;
3314 	fw.bss_index = 0;
3315 	fw.bss = bce_CP_b06FwBss;
3316 
3317 	fw.rodata_addr = bce_CP_b06FwRodataAddr;
3318 	fw.rodata_len = bce_CP_b06FwRodataLen;
3319 	fw.rodata_index = 0;
3320 	fw.rodata = bce_CP_b06FwRodata;
3321 
3322 	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
3323 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3324 }
3325 
3326 
3327 /****************************************************************************/
3328 /* Initialize context memory.                                               */
3329 /*                                                                          */
3330 /* Clears the memory associated with each Context ID (CID).                 */
3331 /*                                                                          */
3332 /* Returns:                                                                 */
3333 /*   Nothing.                                                               */
3334 /****************************************************************************/
3335 static void
3336 bce_init_ctx(struct bce_softc *sc)
3337 {
3338 	u32 vcid = 96;
3339 
3340 	while (vcid) {
3341 		u32 vcid_addr, pcid_addr, offset;
3342 		int i;
3343 
3344 		vcid--;
3345 
3346    		vcid_addr = GET_CID_ADDR(vcid);
3347 		pcid_addr = vcid_addr;
3348 
3349 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
3350 			vcid_addr += (i << PHY_CTX_SHIFT);
3351 			pcid_addr += (i << PHY_CTX_SHIFT);
3352 
3353 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3354 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3355 
3356 			/* Zero out the context. */
3357 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3358 				CTX_WR(sc, vcid_addr, offset, 0);
3359 		}
3360 	}
3361 }
3362 
3363 
3364 /****************************************************************************/
3365 /* Fetch the permanent MAC address of the controller.                       */
3366 /*                                                                          */
3367 /* Returns:                                                                 */
3368 /*   Nothing.                                                               */
3369 /****************************************************************************/
3370 static void
3371 bce_get_mac_addr(struct bce_softc *sc)
3372 {
3373 	u32 mac_lo = 0, mac_hi = 0;
3374 
3375 	/*
3376 	 * The NetXtreme II bootcode populates various NIC
3377 	 * power-on and runtime configuration items in a
3378 	 * shared memory area.  The factory configured MAC
3379 	 * address is available from both NVRAM and the
3380 	 * shared memory area so we'll read the value from
3381 	 * shared memory for speed.
3382 	 */
3383 
3384 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3385 		BCE_PORT_HW_CFG_MAC_UPPER);
3386 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3387 		BCE_PORT_HW_CFG_MAC_LOWER);
3388 
3389 	if ((mac_lo == 0) && (mac_hi == 0)) {
3390 		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
3391 			__FILE__, __LINE__);
3392 	} else {
3393 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3394 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3395 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3396 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3397 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3398 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3399 	}
3400 
3401 	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3402 }
3403 
3404 
3405 /****************************************************************************/
3406 /* Program the MAC address.                                                 */
3407 /*                                                                          */
3408 /* Returns:                                                                 */
3409 /*   Nothing.                                                               */
3410 /****************************************************************************/
3411 static void
3412 bce_set_mac_addr(struct bce_softc *sc)
3413 {
3414 	u32 val;
3415 	u8 *mac_addr = sc->eaddr;
3416 
3417 	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3418 
3419 	val = (mac_addr[0] << 8) | mac_addr[1];
3420 
3421 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3422 
3423 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3424 		(mac_addr[4] << 8) | mac_addr[5];
3425 
3426 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3427 }
3428 
3429 
3430 /****************************************************************************/
3431 /* Stop the controller.                                                     */
3432 /*                                                                          */
3433 /* Returns:                                                                 */
3434 /*   Nothing.                                                               */
3435 /****************************************************************************/
3436 static void
3437 bce_stop(struct bce_softc *sc)
3438 {
3439 	struct ifnet *ifp;
3440 	struct ifmedia_entry *ifm;
3441 	struct mii_data *mii = NULL;
3442 	int mtmp, itmp;
3443 
3444 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3445 
3446 	BCE_LOCK_ASSERT(sc);
3447 
3448 	ifp = sc->bce_ifp;
3449 
3450 	mii = device_get_softc(sc->bce_miibus);
3451 
3452 	callout_stop(&sc->bce_tick_callout);
3453 
3454 	/* Disable the transmit/receive blocks. */
3455 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3456 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3457 	DELAY(20);
3458 
3459 	bce_disable_intr(sc);
3460 
3461 	/* Free RX buffers. */
3462 	bce_free_pg_chain(sc);
3463 	bce_free_rx_chain(sc);
3464 
3465 	/* Free TX buffers. */
3466 	bce_free_tx_chain(sc);
3467 
3468 	/*
3469 	 * Isolate/power down the PHY, but leave the media selection
3470 	 * unchanged so that things will be put back to normal when
3471 	 * we bring the interface back up.
3472 	 */
3473 
3474 	itmp = ifp->if_flags;
3475 	ifp->if_flags |= IFF_UP;
3476 
3477 	/* If we are called from bce_detach(), mii is already NULL. */
3478 	if (mii != NULL) {
3479 		ifm = mii->mii_media.ifm_cur;
3480 		mtmp = ifm->ifm_media;
3481 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3482 		mii_mediachg(mii);
3483 		ifm->ifm_media = mtmp;
3484 	}
3485 
3486 	ifp->if_flags = itmp;
3487 	sc->watchdog_timer = 0;
3488 
3489 	sc->bce_link = 0;
3490 
3491 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3492 
3493 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3494 }
3495 
3496 
3497 static int
3498 bce_reset(struct bce_softc *sc, u32 reset_code)
3499 {
3500 	u32 val;
3501 	int i, rc = 0;
3502 
3503 	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
3504 		__FUNCTION__, reset_code);
3505 
3506 	/* Wait for pending PCI transactions to complete. */
3507 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3508 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3509 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3510 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3511 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3512 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3513 	DELAY(5);
3514 
3515 	/* Assume bootcode is running. */
3516 	sc->bce_fw_timed_out = 0;
3517 
3518 	/* Give the firmware a chance to prepare for the reset. */
3519 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3520 	if (rc)
3521 		goto bce_reset_exit;
3522 
3523 	/* Set a firmware reminder that this is a soft reset. */
3524 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3525 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3526 
3527 	/* Dummy read to force the chip to complete all current transactions. */
3528 	val = REG_RD(sc, BCE_MISC_ID);
3529 
3530 	/* Chip reset. */
3531 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3532 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3533 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3534 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3535 
3536 	/* Allow up to 30us for reset to complete. */
3537 	for (i = 0; i < 10; i++) {
3538 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3539 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3540 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3541 			break;
3542 		}
3543 		DELAY(10);
3544 	}
3545 
3546 	/* Check that reset completed successfully. */
3547 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3548 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3549 		BCE_PRINTF("%s(%d): Reset failed!\n",
3550 			__FILE__, __LINE__);
3551 		rc = EBUSY;
3552 		goto bce_reset_exit;
3553 	}
3554 
3555 	/* Make sure byte swapping is properly configured. */
3556 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3557 	if (val != 0x01020304) {
3558 		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
3559 			__FILE__, __LINE__);
3560 		rc = ENODEV;
3561 		goto bce_reset_exit;
3562 	}
3563 
3564 	/* Just completed a reset, assume that firmware is running again. */
3565 	sc->bce_fw_timed_out = 0;
3566 
3567 	/* Wait for the firmware to finish its initialization. */
3568 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3569 	if (rc)
3570 		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
3571 			__FILE__, __LINE__);
3572 
3573 bce_reset_exit:
3574 	return (rc);
3575 }
3576 
3577 
3578 static int
3579 bce_chipinit(struct bce_softc *sc)
3580 {
3581 	u32 val;
3582 	int rc = 0;
3583 
3584 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3585 
3586 	/* Make sure the interrupt is not active. */
3587 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3588 
3589 	/*
3590 	 * Initialize DMA byte/word swapping, configure the number of DMA
3591 	 * channels and PCI clock compensation delay.
3592 	 */
3593 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3594 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3595 #if BYTE_ORDER == BIG_ENDIAN
3596 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3597 #endif
3598 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3599 	      DMA_READ_CHANS << 12 |
3600 	      DMA_WRITE_CHANS << 16;
3601 
3602 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3603 
3604 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3605 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3606 
3607 	/*
3608 	 * This setting resolves a problem observed on certain Intel PCI
3609 	 * chipsets that cannot handle multiple outstanding DMA operations.
3610 	 * See errata E9_5706A1_65.
3611 	 */
3612 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3613 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3614 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3615 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3616 
3617 	REG_WR(sc, BCE_DMA_CONFIG, val);
3618 
3619 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3620 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3621 		u16 val;
3622 
3623 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3624 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3625 	}
3626 
3627 	/* Enable the RX_V2P and Context state machines before access. */
3628 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3629 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3630 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3631 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3632 
3633 	/* Initialize context mapping and zero out the quick contexts. */
3634 	bce_init_ctx(sc);
3635 
3636 	/* Initialize the on-boards CPUs */
3637 	bce_init_cpus(sc);
3638 
3639 	/* Prepare NVRAM for access. */
3640 	if (bce_init_nvram(sc)) {
3641 		rc = ENODEV;
3642 		goto bce_chipinit_exit;
3643 	}
3644 
3645 	/* Set the kernel bypass block size */
3646 	val = REG_RD(sc, BCE_MQ_CONFIG);
3647 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3648 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3649 	REG_WR(sc, BCE_MQ_CONFIG, val);
3650 
3651 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3652 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3653 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3654 
3655 	/* Set the page size and clear the RV2P processor stall bits. */
3656 	val = (BCM_PAGE_BITS - 8) << 24;
3657 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3658 
3659 	/* Configure page size. */
3660 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3661 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3662 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3663 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3664 
3665 bce_chipinit_exit:
3666 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3667 
3668 	return(rc);
3669 }
3670 
3671 
3672 /****************************************************************************/
3673 /* Initialize the controller in preparation to send/receive traffic.        */
3674 /*                                                                          */
3675 /* Returns:                                                                 */
3676 /*   0 for success, positive value for failure.                             */
3677 /****************************************************************************/
3678 static int
3679 bce_blockinit(struct bce_softc *sc)
3680 {
3681 	u32 reg, val;
3682 	int rc = 0;
3683 
3684 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3685 
3686 	/* Load the hardware default MAC address. */
3687 	bce_set_mac_addr(sc);
3688 
3689 	/* Set the Ethernet backoff seed value */
3690 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3691 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3692 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3693 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3694 
3695 	sc->last_status_idx = 0;
3696 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3697 
3698 	/* Set up link change interrupt generation. */
3699 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3700 
3701 	/* Program the physical address of the status block. */
3702 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3703 		BCE_ADDR_LO(sc->status_block_paddr));
3704 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3705 		BCE_ADDR_HI(sc->status_block_paddr));
3706 
3707 	/* Program the physical address of the statistics block. */
3708 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3709 		BCE_ADDR_LO(sc->stats_block_paddr));
3710 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3711 		BCE_ADDR_HI(sc->stats_block_paddr));
3712 
3713 	/* Program various host coalescing parameters. */
3714 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3715 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3716 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3717 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3718 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3719 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3720 	REG_WR(sc, BCE_HC_TX_TICKS,
3721 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3722 	REG_WR(sc, BCE_HC_RX_TICKS,
3723 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3724 	REG_WR(sc, BCE_HC_COM_TICKS,
3725 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3726 	REG_WR(sc, BCE_HC_CMD_TICKS,
3727 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3728 	REG_WR(sc, BCE_HC_STATS_TICKS,
3729 		(sc->bce_stats_ticks & 0xffff00));
3730 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3731 		0xbb8);  /* 3ms */
3732 	REG_WR(sc, BCE_HC_CONFIG,
3733 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3734 		BCE_HC_CONFIG_COLLECT_STATS));
3735 
3736 	/* Clear the internal statistics counters. */
3737 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3738 
3739 	/* Verify that bootcode is running. */
3740 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3741 
3742 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3743 		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
3744 			__FILE__, __LINE__);
3745 		reg = 0);
3746 
3747 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3748 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3749 		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
3750 			"Expected: 08%08X\n", __FILE__, __LINE__,
3751 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3752 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3753 		rc = ENODEV;
3754 		goto bce_blockinit_exit;
3755 	}
3756 
3757 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3758 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3759 
3760 	/* Enable link state change interrupt generation. */
3761 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3762 
3763 	/* Enable all remaining blocks in the MAC. */
3764 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3765 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3766 	DELAY(20);
3767 
3768 bce_blockinit_exit:
3769 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3770 
3771 	return (rc);
3772 }
3773 
3774 
3775 /****************************************************************************/
3776 /* Encapsulate an mbuf into the rx_bd chain.                                */
3777 /*                                                                          */
3778 /* Returns:                                                                 */
3779 /*   0 for success, positive value for failure.                             */
3780 /****************************************************************************/
3781 static int
3782 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3783 	u16 *chain_prod, u32 *prod_bseq)
3784 {
3785 	bus_dmamap_t map;
3786 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
3787 	struct mbuf *m_new = NULL;
3788 	struct rx_bd *rxbd;
3789 	int nsegs, error, rc = 0;
3790 #ifdef BCE_DEBUG
3791 	u16 debug_chain_prod = *chain_prod;
3792 #endif
3793 
3794 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Entering %s()\n",
3795 		__FUNCTION__);
3796 
3797 	/* Make sure the inputs are valid. */
3798 	DBRUNIF((*chain_prod > MAX_RX_BD),
3799 		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3800 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3801 
3802 	DBPRINT(sc, BCE_VERBOSE, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3803 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3804 
3805 	/* Update some debug statistic counters */
3806 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3807 		sc->rx_low_watermark = sc->free_rx_bd);
3808 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3809 
3810 	/* Check whether this is a new mbuf allocation. */
3811 	if (m == NULL) {
3812 
3813 		/* Simulate an mbuf allocation failure. */
3814 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3815 			sc->mbuf_alloc_failed++;
3816 			sc->debug_mbuf_sim_alloc_failed++;
3817 			rc = ENOBUFS;
3818 			goto bce_get_rx_buf_exit);
3819 
3820 		/* This is a new mbuf allocation. */
3821 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3822 		if (m_new == NULL) {
3823 			sc->mbuf_alloc_failed++;
3824 			rc = ENOBUFS;
3825 			goto bce_get_rx_buf_exit;
3826 		}
3827 
3828 		DBRUN(sc->debug_rx_mbuf_alloc++);
3829 	} else {
3830 		/* Reuse an existing mbuf. */
3831 		m_new = m;
3832 	}
3833 
3834 	M_ASSERTPKTHDR(m_new);
3835 
3836 	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
3837 
3838 	/* ToDo: Consider calling m_fragment() to test error handling. */
3839 
3840 	/* Map the mbuf cluster into device memory. */
3841 	map = sc->rx_mbuf_map[*chain_prod];
3842 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3843 	    segs, &nsegs, BUS_DMA_NOWAIT);
3844 
3845 	/* Handle any mapping errors. */
3846 	if (error) {
3847 		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain!\n",
3848 			__FILE__, __LINE__);
3849 
3850 		m_freem(m_new);
3851 		DBRUN(sc->debug_rx_mbuf_alloc--);
3852 
3853 		rc = ENOBUFS;
3854 		goto bce_get_rx_buf_exit;
3855 	}
3856 
3857 	/* All mbufs must map to a single segment. */
3858 	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
3859 		 __FUNCTION__, nsegs));
3860 
3861 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3862 
3863 	/* Setup the rx_bd for the segment. */
3864 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3865 
3866 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3867 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3868 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3869 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3870 	*prod_bseq += segs[0].ds_len;
3871 
3872 	/* Save the mbuf and update our counter. */
3873 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3874 	sc->free_rx_bd -= nsegs;
3875 
3876 	DBRUNMSG(BCE_EXCESSIVE, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3877 		nsegs));
3878 
3879 	DBPRINT(sc, BCE_VERBOSE, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3880 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3881 
3882 bce_get_rx_buf_exit:
3883 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Exiting %s()\n",
3884 		__FUNCTION__);
3885 
3886 	return(rc);
3887 }
3888 
3889 
3890 /****************************************************************************/
3891 /* Encapsulate an mbuf cluster into the page chain.                        */
3892 /*                                                                          */
3893 /* Returns:                                                                 */
3894 /*   0 for success, positive value for failure.                             */
3895 /****************************************************************************/
3896 static int
3897 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3898 	u16 *prod_idx)
3899 {
3900 	bus_dmamap_t map;
3901 	bus_addr_t busaddr;
3902 	struct mbuf *m_new = NULL;
3903 	struct rx_bd *pgbd;
3904 	int error, rc = 0;
3905 #ifdef BCE_DEBUG
3906 	u16 debug_prod_idx = *prod_idx;
3907 #endif
3908 
3909 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3910 		__FUNCTION__);
3911 
3912 	/* Make sure the inputs are valid. */
3913 	DBRUNIF((*prod_idx > MAX_PG_BD),
3914 		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
3915 		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
3916 
3917 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, "
3918 		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3919 
3920 	/* Update counters if we've hit a new low or run out of pages. */
3921 	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
3922 		sc->pg_low_watermark = sc->free_pg_bd);
3923 	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
3924 
3925 	/* Check whether this is a new mbuf allocation. */
3926 	if (m == NULL) {
3927 
3928 		/* Simulate an mbuf allocation failure. */
3929 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3930 			sc->mbuf_alloc_failed++;
3931 			sc->debug_mbuf_sim_alloc_failed++;
3932 			rc = ENOBUFS;
3933 			goto bce_get_pg_buf_exit);
3934 
3935 		/* This is a new mbuf allocation. */
3936 		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
3937 		if (m_new == NULL) {
3938 			sc->mbuf_alloc_failed++;
3939 			rc = ENOBUFS;
3940 			goto bce_get_pg_buf_exit;
3941 		}
3942 
3943 		DBRUN(sc->debug_pg_mbuf_alloc++);
3944 	} else {
3945 		/* Reuse an existing mbuf. */
3946 		m_new = m;
3947 		m_new->m_data = m_new->m_ext.ext_buf;
3948 	}
3949 
3950 	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
3951 
3952 	/* ToDo: Consider calling m_fragment() to test error handling. */
3953 
3954 	/* Map the mbuf cluster into device memory. */
3955 	map = sc->pg_mbuf_map[*prod_idx];
3956 	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
3957 	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
3958 
3959 	/* Handle any mapping errors. */
3960 	if (error) {
3961 		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
3962 			__FILE__, __LINE__);
3963 
3964 		m_freem(m_new);
3965 		DBRUN(sc->debug_pg_mbuf_alloc--);
3966 
3967 		rc = ENOBUFS;
3968 		goto bce_get_pg_buf_exit;
3969 	}
3970 
3971 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3972 
3973 	/*
3974 	 * The page chain uses the same rx_bd data structure
3975 	 * as the receive chain but doesn't require a byte sequence (bseq).
3976 	 */
3977 	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
3978 
3979 	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
3980 	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
3981 	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
3982 	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3983 
3984 	/* Save the mbuf and update our counter. */
3985 	sc->pg_mbuf_ptr[*prod_idx] = m_new;
3986 	sc->free_pg_bd--;
3987 
3988 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
3989 		1));
3990 
3991 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, "
3992 		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3993 
3994 bce_get_pg_buf_exit:
3995 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3996 		__FUNCTION__);
3997 
3998 	return(rc);
3999 }
4000 
4001 
4002 /****************************************************************************/
4003 /* Allocate memory and initialize the TX data structures.                   */
4004 /*                                                                          */
4005 /* Returns:                                                                 */
4006 /*   0 for success, positive value for failure.                             */
4007 /****************************************************************************/
4008 static int
4009 bce_init_tx_chain(struct bce_softc *sc)
4010 {
4011 	struct tx_bd *txbd;
4012 	u32 val;
4013 	int i, rc = 0;
4014 
4015 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4016 
4017 	/* Set the initial TX producer/consumer indices. */
4018 	sc->tx_prod        = 0;
4019 	sc->tx_cons        = 0;
4020 	sc->tx_prod_bseq   = 0;
4021 	sc->used_tx_bd     = 0;
4022 	sc->max_tx_bd      = USABLE_TX_BD;
4023 	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
4024 	DBRUN(sc->tx_full_count = 0);
4025 
4026 	/*
4027 	 * The NetXtreme II supports a linked-list structre called
4028 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
4029 	 * consists of a series of 1 or more chain pages, each of which
4030 	 * consists of a fixed number of BD entries.
4031 	 * The last BD entry on each page is a pointer to the next page
4032 	 * in the chain, and the last pointer in the BD chain
4033 	 * points back to the beginning of the chain.
4034 	 */
4035 
4036 	/* Set the TX next pointer chain entries. */
4037 	for (i = 0; i < TX_PAGES; i++) {
4038 		int j;
4039 
4040 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4041 
4042 		/* Check if we've reached the last page. */
4043 		if (i == (TX_PAGES - 1))
4044 			j = 0;
4045 		else
4046 			j = i + 1;
4047 
4048 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
4049 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
4050 	}
4051 
4052 	/* Initialize the context ID for an L2 TX chain. */
4053 	val = BCE_L2CTX_TYPE_TYPE_L2;
4054 	val |= BCE_L2CTX_TYPE_SIZE_L2;
4055 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
4056 
4057 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4058 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
4059 
4060 	/* Point the hardware to the first page in the chain. */
4061 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
4062 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
4063 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
4064 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
4065 
4066 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
4067 
4068 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4069 
4070 	return(rc);
4071 }
4072 
4073 
4074 /****************************************************************************/
4075 /* Free memory and clear the TX data structures.                            */
4076 /*                                                                          */
4077 /* Returns:                                                                 */
4078 /*   Nothing.                                                               */
4079 /****************************************************************************/
4080 static void
4081 bce_free_tx_chain(struct bce_softc *sc)
4082 {
4083 	int i;
4084 
4085 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4086 
4087 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4088 	for (i = 0; i < TOTAL_TX_BD; i++) {
4089 		if (sc->tx_mbuf_ptr[i] != NULL) {
4090 			if (sc->tx_mbuf_map != NULL)
4091 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
4092 					BUS_DMASYNC_POSTWRITE);
4093 			m_freem(sc->tx_mbuf_ptr[i]);
4094 			sc->tx_mbuf_ptr[i] = NULL;
4095 			DBRUN(sc->debug_tx_mbuf_alloc--);
4096 		}
4097 	}
4098 
4099 	/* Clear each TX chain page. */
4100 	for (i = 0; i < TX_PAGES; i++)
4101 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4102 
4103 	sc->used_tx_bd     = 0;
4104 
4105 	/* Check if we lost any mbufs in the process. */
4106 	DBRUNIF((sc->debug_tx_mbuf_alloc),
4107 		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
4108 			"from tx chain!\n",
4109 			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
4110 
4111 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4112 }
4113 
4114 
4115 /****************************************************************************/
4116 /* Allocate memory and initialize the RX data structures.                   */
4117 /*                                                                          */
4118 /* Returns:                                                                 */
4119 /*   0 for success, positive value for failure.                             */
4120 /****************************************************************************/
4121 static int
4122 bce_init_rx_chain(struct bce_softc *sc)
4123 {
4124 	struct rx_bd *rxbd;
4125 	int i, rc = 0;
4126 	u32 val;
4127 
4128 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4129 
4130 	/* Initialize the RX producer and consumer indices. */
4131 	sc->rx_prod        = 0;
4132 	sc->rx_cons        = 0;
4133 	sc->rx_prod_bseq   = 0;
4134 	sc->free_rx_bd     = USABLE_RX_BD;
4135 	sc->max_rx_bd      = USABLE_RX_BD;
4136 	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
4137 	DBRUN(sc->rx_empty_count = 0);
4138 
4139 	/* Initialize the RX next pointer chain entries. */
4140 	for (i = 0; i < RX_PAGES; i++) {
4141 		int j;
4142 
4143 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4144 
4145 		/* Check if we've reached the last page. */
4146 		if (i == (RX_PAGES - 1))
4147 			j = 0;
4148 		else
4149 			j = i + 1;
4150 
4151 		/* Setup the chain page pointers. */
4152 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
4153 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
4154 	}
4155 
4156 	/* Initialize the context ID for an L2 RX chain. */
4157 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4158 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
4159 	val |= 0x02 << 8;
4160 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
4161 
4162 	/* Point the hardware to the first page in the chain. */
4163 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
4164 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
4165 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
4166 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
4167 
4168 	/* Fill up the RX chain. */
4169 	bce_fill_rx_chain(sc);
4170 
4171 	for (i = 0; i < RX_PAGES; i++) {
4172 		bus_dmamap_sync(
4173 			sc->rx_bd_chain_tag,
4174 	    	sc->rx_bd_chain_map[i],
4175 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4176 	}
4177 
4178 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4179 
4180 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4181 
4182 	return(rc);
4183 }
4184 
4185 
4186 /****************************************************************************/
4187 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
4188 /* occurs.                                                                  */
4189 /*                                                                          */
4190 /* Returns:                                                                 */
4191 /*   Nothing                                                                */
4192 /****************************************************************************/
4193 static void
4194 bce_fill_rx_chain(struct bce_softc *sc)
4195 {
4196 	u16 prod, prod_idx;
4197 	u32 prod_bseq;
4198 
4199 	DBPRINT(sc, BCE_VERBOSE_RECV, "Entering %s()\n", __FUNCTION__);
4200 
4201 	prod      = sc->rx_prod;
4202 	prod_bseq = sc->rx_prod_bseq;
4203 
4204 	/* Keep filling the RX chain until it's full. */
4205 	while (sc->free_rx_bd > 0) {
4206 		prod_idx = RX_CHAIN_IDX(prod);
4207 		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
4208 			/* Bail out if we can't add an mbuf to the chain. */
4209 			break;
4210 		}
4211 		prod = NEXT_RX_BD(prod);
4212 	}
4213 
4214 	/* Save the RX chain producer index. */
4215 	sc->rx_prod      = prod;
4216 	sc->rx_prod_bseq = prod_bseq;
4217 
4218 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4219 		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
4220 		__FUNCTION__, sc->rx_prod));
4221 
4222 	/* Tell the chip about the waiting rx_bd's. */
4223 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4224 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4225 
4226 	DBPRINT(sc, BCE_VERBOSE_RECV, "Exiting %s()\n", __FUNCTION__);
4227 }
4228 
4229 
4230 /****************************************************************************/
4231 /* Free memory and clear the RX data structures.                            */
4232 /*                                                                          */
4233 /* Returns:                                                                 */
4234 /*   Nothing.                                                               */
4235 /****************************************************************************/
4236 static void
4237 bce_free_rx_chain(struct bce_softc *sc)
4238 {
4239 	int i;
4240 
4241 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4242 
4243 	/* Clear the jumbo page chain support. */
4244 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, 0);
4245 
4246 	/* Free any mbufs still in the RX mbuf chain. */
4247 	for (i = 0; i < TOTAL_RX_BD; i++) {
4248 		if (sc->rx_mbuf_ptr[i] != NULL) {
4249 			if (sc->rx_mbuf_map[i] != NULL)
4250 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
4251 					BUS_DMASYNC_POSTREAD);
4252 			m_freem(sc->rx_mbuf_ptr[i]);
4253 			sc->rx_mbuf_ptr[i] = NULL;
4254 			DBRUN(sc->debug_rx_mbuf_alloc--);
4255 		}
4256 	}
4257 
4258 	/* Clear each RX chain page. */
4259 	for (i = 0; i < RX_PAGES; i++)
4260 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4261 
4262 	sc->free_rx_bd = sc->max_rx_bd;
4263 
4264 	/* Check if we lost any mbufs in the process. */
4265 	DBRUNIF((sc->debug_rx_mbuf_alloc),
4266 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
4267 			__FUNCTION__, sc->debug_rx_mbuf_alloc));
4268 
4269 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4270 }
4271 
4272 
4273 /****************************************************************************/
4274 /* Allocate memory and initialize the page data structures.                 */
4275 /* Assumes that bce_init_rx_chain() has not already been called.            */
4276 /*                                                                          */
4277 /* Returns:                                                                 */
4278 /*   0 for success, positive value for failure.                             */
4279 /****************************************************************************/
4280 static int
4281 bce_init_pg_chain(struct bce_softc *sc)
4282 {
4283 	struct rx_bd *pgbd;
4284 	int i, rc = 0;
4285 	u32 val;
4286 
4287 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4288 
4289 	/* Initialize the page producer and consumer indices. */
4290 	sc->pg_prod        = 0;
4291 	sc->pg_cons        = 0;
4292 	sc->free_pg_bd     = USABLE_PG_BD;
4293 	sc->max_pg_bd      = USABLE_PG_BD;
4294 	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
4295 	DBRUN(sc->pg_empty_count = 0);
4296 
4297 	/* Initialize the page next pointer chain entries. */
4298 	for (i = 0; i < PG_PAGES; i++) {
4299 		int j;
4300 
4301 		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
4302 
4303 		/* Check if we've reached the last page. */
4304 		if (i == (PG_PAGES - 1))
4305 			j = 0;
4306 		else
4307 			j = i + 1;
4308 
4309 		/* Setup the chain page pointers. */
4310 		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
4311 		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
4312 	}
4313 
4314 	/* Point the hardware to the first page in the page chain. */
4315 	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
4316 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_HI, val);
4317 	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
4318 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_LO, val);
4319 
4320 	/* Configure the rx_bd and page chain mbuf cluster size. */
4321 	val = (sc->rx_bd_mbuf_alloc_size << 16) | sc->pg_bd_mbuf_alloc_size;
4322 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, val);
4323 
4324 	/* Configure the context reserved for jumbo support. */
4325 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RBDC_KEY,
4326 		BCE_L2CTX_RBDC_JUMBO_KEY);
4327 
4328 	/* Fill up the page chain. */
4329 	bce_fill_pg_chain(sc);
4330 
4331 	for (i = 0; i < PG_PAGES; i++) {
4332 		bus_dmamap_sync(
4333 			sc->pg_bd_chain_tag,
4334 	    	sc->pg_bd_chain_map[i],
4335 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4336 	}
4337 
4338 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
4339 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4340 
4341 	return(rc);
4342 }
4343 
4344 /****************************************************************************/
4345 /* Add mbufs to the page chain until its full or an mbuf allocation error   */
4346 /* occurs.                                                                  */
4347 /*                                                                          */
4348 /* Returns:                                                                 */
4349 /*   Nothing                                                                */
4350 /****************************************************************************/
4351 static void
4352 bce_fill_pg_chain(struct bce_softc *sc)
4353 {
4354 	u16 prod, prod_idx;
4355 
4356 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
4357 
4358 	prod = sc->pg_prod;
4359 
4360 	/* Keep filling the page chain until it's full. */
4361 	while (sc->free_pg_bd > 0) {
4362 		prod_idx = PG_CHAIN_IDX(prod);
4363 		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
4364 			/* Bail out if we can't add an mbuf to the chain. */
4365 			break;
4366 		}
4367 		prod = NEXT_PG_BD(prod);
4368 	}
4369 
4370 	/* Save the page chain producer index. */
4371 	sc->pg_prod = prod;
4372 
4373 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4374 		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
4375 		__FUNCTION__, sc->pg_prod));
4376 
4377 	/* Tell the chip about the new rx_bd's in the page chain. */
4378 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_PG_BDIDX, sc->pg_prod);
4379 
4380 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4381 }
4382 
4383 
4384 /****************************************************************************/
4385 /* Free memory and clear the RX data structures.                            */
4386 /*                                                                          */
4387 /* Returns:                                                                 */
4388 /*   Nothing.                                                               */
4389 /****************************************************************************/
4390 static void
4391 bce_free_pg_chain(struct bce_softc *sc)
4392 {
4393 	int i;
4394 
4395 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4396 
4397 	/* Free any mbufs still in the mbuf page chain. */
4398 	for (i = 0; i < TOTAL_PG_BD; i++) {
4399 		if (sc->pg_mbuf_ptr[i] != NULL) {
4400 			if (sc->pg_mbuf_map[i] != NULL)
4401 				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
4402 					BUS_DMASYNC_POSTREAD);
4403 			m_freem(sc->pg_mbuf_ptr[i]);
4404 			sc->pg_mbuf_ptr[i] = NULL;
4405 			DBRUN(sc->debug_pg_mbuf_alloc--);
4406 		}
4407 	}
4408 
4409 	/* Clear each page chain pages. */
4410 	for (i = 0; i < PG_PAGES; i++)
4411 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
4412 
4413 	sc->free_pg_bd = sc->max_pg_bd;
4414 
4415 	/* Check if we lost any mbufs in the process. */
4416 	DBRUNIF((sc->debug_pg_mbuf_alloc),
4417 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
4418 			__FUNCTION__, sc->debug_pg_mbuf_alloc));
4419 
4420 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4421 }
4422 
4423 
4424 /****************************************************************************/
4425 /* Set media options.                                                       */
4426 /*                                                                          */
4427 /* Returns:                                                                 */
4428 /*   0 for success, positive value for failure.                             */
4429 /****************************************************************************/
4430 static int
4431 bce_ifmedia_upd(struct ifnet *ifp)
4432 {
4433 	struct bce_softc *sc;
4434 
4435 	sc = ifp->if_softc;
4436 	BCE_LOCK(sc);
4437 	bce_ifmedia_upd_locked(ifp);
4438 	BCE_UNLOCK(sc);
4439 	return (0);
4440 }
4441 
4442 
4443 /****************************************************************************/
4444 /* Set media options.                                                       */
4445 /*                                                                          */
4446 /* Returns:                                                                 */
4447 /*   Nothing.                                                               */
4448 /****************************************************************************/
4449 static void
4450 bce_ifmedia_upd_locked(struct ifnet *ifp)
4451 {
4452 	struct bce_softc *sc;
4453 	struct mii_data *mii;
4454 	struct ifmedia *ifm;
4455 
4456 	sc = ifp->if_softc;
4457 	ifm = &sc->bce_ifmedia;
4458 	BCE_LOCK_ASSERT(sc);
4459 
4460 	mii = device_get_softc(sc->bce_miibus);
4461 
4462 	/* Make sure the MII bus has been enumerated. */
4463 	if (mii) {
4464 		sc->bce_link = 0;
4465 		if (mii->mii_instance) {
4466 			struct mii_softc *miisc;
4467 
4468 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4469 				mii_phy_reset(miisc);
4470 		}
4471 		mii_mediachg(mii);
4472 	}
4473 }
4474 
4475 
4476 /****************************************************************************/
4477 /* Reports current media status.                                            */
4478 /*                                                                          */
4479 /* Returns:                                                                 */
4480 /*   Nothing.                                                               */
4481 /****************************************************************************/
4482 static void
4483 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4484 {
4485 	struct bce_softc *sc;
4486 	struct mii_data *mii;
4487 
4488 	sc = ifp->if_softc;
4489 
4490 	BCE_LOCK(sc);
4491 
4492 	mii = device_get_softc(sc->bce_miibus);
4493 
4494 	mii_pollstat(mii);
4495 	ifmr->ifm_active = mii->mii_media_active;
4496 	ifmr->ifm_status = mii->mii_media_status;
4497 
4498 	BCE_UNLOCK(sc);
4499 }
4500 
4501 
4502 /****************************************************************************/
4503 /* Handles PHY generated interrupt events.                                  */
4504 /*                                                                          */
4505 /* Returns:                                                                 */
4506 /*   Nothing.                                                               */
4507 /****************************************************************************/
4508 static void
4509 bce_phy_intr(struct bce_softc *sc)
4510 {
4511 	u32 new_link_state, old_link_state;
4512 
4513 	new_link_state = sc->status_block->status_attn_bits &
4514 		STATUS_ATTN_BITS_LINK_STATE;
4515 	old_link_state = sc->status_block->status_attn_bits_ack &
4516 		STATUS_ATTN_BITS_LINK_STATE;
4517 
4518 	/* Handle any changes if the link state has changed. */
4519 	if (new_link_state != old_link_state) {
4520 
4521 		DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4522 
4523 		sc->bce_link = 0;
4524 		callout_stop(&sc->bce_tick_callout);
4525 		bce_tick(sc);
4526 
4527 		/* Update the status_attn_bits_ack field in the status block. */
4528 		if (new_link_state) {
4529 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4530 				STATUS_ATTN_BITS_LINK_STATE);
4531 			DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n");
4532 		}
4533 		else {
4534 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4535 				STATUS_ATTN_BITS_LINK_STATE);
4536 			DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n");
4537 		}
4538 
4539 	}
4540 
4541 	/* Acknowledge the link change interrupt. */
4542 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4543 }
4544 
4545 
4546 /****************************************************************************/
4547 /* Reads the receive consumer value from the status block (skipping over    */
4548 /* chain page pointer if necessary).                                        */
4549 /*                                                                          */
4550 /* Returns:                                                                 */
4551 /*   hw_cons                                                                */
4552 /****************************************************************************/
4553 static inline u16
4554 bce_get_hw_rx_cons(struct bce_softc *sc)
4555 {
4556 	u16 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4557 
4558 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4559 		hw_cons++;
4560 
4561 	return hw_cons;
4562 }
4563 
4564 /****************************************************************************/
4565 /* Handles received frame interrupt events.                                 */
4566 /*                                                                          */
4567 /* Returns:                                                                 */
4568 /*   Nothing.                                                               */
4569 /****************************************************************************/
4570 static void
4571 bce_rx_intr(struct bce_softc *sc)
4572 {
4573 	struct ifnet *ifp = sc->bce_ifp;
4574 	struct l2_fhdr *l2fhdr;
4575 	unsigned int pages, pkt_len, rem_len;
4576 	u16 sw_rx_cons, sw_rx_cons_idx, sw_pg_cons, sw_pg_cons_idx, hw_rx_cons;
4577 	u32 status;
4578 
4579 #ifdef BCE_DEBUG
4580 	u32 rx_intr_start, rx_intr_end;
4581 	rx_intr_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4582 	sc->rx_interrupts++;
4583 #endif
4584 
4585 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4586 	for (int i = 0; i < RX_PAGES; i++)
4587 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4588 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4589 
4590 	/* Prepare the page chain pages to be accessed by the host CPU. */
4591 	for (int i = 0; i < PG_PAGES; i++)
4592 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4593 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4594 
4595 	/* Get the hardware's view of the RX consumer index. */
4596 	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4597 
4598 	/* Get working copies of the driver's view of the consumer indices. */
4599 	sw_rx_cons = sc->rx_cons;
4600 	sw_pg_cons = sc->pg_cons;
4601 
4602 	/* Update some debug statistics counters */
4603 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4604 		sc->rx_low_watermark = sc->free_rx_bd);
4605 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4606 
4607 	/* Scan through the receive chain as long as there is work to do */
4608 	/* ToDo: Consider setting a limit on the number of packets processed. */
4609 	while (sw_rx_cons != hw_rx_cons) {
4610 		struct mbuf *m0;
4611 
4612 		/* Convert the producer/consumer indices to an actual rx_bd index. */
4613 		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
4614 
4615 #ifdef DEVICE_POLLING
4616 		if (ifp->if_capenable & IFCAP_POLLING) {
4617 			if (sc->bce_rxcycles <= 0)
4618 				break;
4619 			sc->bce_rxcycles--;
4620 		}
4621 #endif
4622 
4623 		/* Unmap the mbuf from DMA space. */
4624 		bus_dmamap_sync(sc->rx_mbuf_tag,
4625 		    sc->rx_mbuf_map[sw_rx_cons_idx],
4626 	    	BUS_DMASYNC_POSTREAD);
4627 		bus_dmamap_unload(sc->rx_mbuf_tag,
4628 		    sc->rx_mbuf_map[sw_rx_cons_idx]);
4629 
4630 		/* Remove the mbuf from the RX chain. */
4631 		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
4632 		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
4633 		DBRUN(sc->debug_rx_mbuf_alloc--);
4634 		sc->free_rx_bd++;
4635 
4636 		/*
4637 		 * Frames received on the NetXteme II are prepended
4638 		 * with an l2_fhdr structure which provides status
4639 		 * information about the received frame (including
4640 		 * VLAN tags and checksum info).  The frames are also
4641 		 * automatically adjusted to align the IP header
4642 		 * (i.e. two null bytes are inserted before the
4643 		 * Ethernet header).  As a result the data DMA'd by
4644 		 * the controller into the mbuf is as follows:
4645 		 * +---------+-----+---------------------+-----+
4646 		 * | l2_fhdr | pad | packet data         | FCS |
4647 		 * +---------+-----+---------------------+-----+
4648 		 * The l2_fhdr needs to be checked and skipped and
4649 		 * the FCS needs to be stripped before sending the
4650 		 * packet up the stack.
4651 		 */
4652 		l2fhdr  = mtod(m0, struct l2_fhdr *);
4653 
4654 		/* Get the packet data + FCS length and the status. */
4655 		pkt_len = l2fhdr->l2_fhdr_pkt_len;
4656 		status  = l2fhdr->l2_fhdr_status;
4657 
4658 		/*
4659 		 * Skip over the l2_fhdr and pad, resulting in the
4660 		 * following data in the mbuf:
4661 		 * +---------------------+-----+
4662 		 * | packet data         | FCS |
4663 		 * +---------------------+-----+
4664 		 */
4665 		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4666 
4667 		/*
4668 		 * Check whether the received frame fits in a single
4669 		 * mbuf or not (i.e. packet data + FCS <=
4670 		 * sc->rx_bd_mbuf_alloc_size bytes).
4671 		 */
4672 		if (pkt_len > m0->m_len) {
4673 			/*
4674 			 * The received frame is larger than a single mbuf.
4675 			 * If the frame was a TCP frame then only the TCP
4676 			 * header is placed in the mbuf, the remaining
4677 			 * payload (including FCS) is placed in the page
4678 			 * chain, the SPLIT flag is set, and the header
4679 			 * length is placed in the IP checksum field.
4680 			 * If the frame is not a TCP frame then the mbuf
4681 			 * is filled and the remaining bytes are placed
4682 			 * in the page chain.
4683 			 */
4684 		 	if (status & L2_FHDR_STATUS_SPLIT)
4685 				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
4686 
4687 			rem_len = pkt_len - m0->m_len;
4688 
4689 			/* Calculate how many pages to pull off the page chain. */
4690 			/* ToDo: The following assumes that mbuf clusters are 2KB. */
4691 			pages = (rem_len + sc->pg_bd_mbuf_alloc_size) >> 11;
4692 
4693 			/* Pull mbufs off the page chain for the remaining data. */
4694 			while (rem_len > 0) {
4695 				struct mbuf *m_pg;
4696 
4697 				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
4698 
4699 				/* Remove the mbuf from the page chain. */
4700 				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
4701 				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
4702 				DBRUN(sc->debug_pg_mbuf_alloc--);
4703 				sc->free_pg_bd++;
4704 
4705 				/* Unmap the page chain mbuf from DMA space. */
4706 				bus_dmamap_sync(sc->pg_mbuf_tag,
4707 					sc->pg_mbuf_map[sw_pg_cons_idx],
4708 					BUS_DMASYNC_POSTREAD);
4709 				bus_dmamap_unload(sc->pg_mbuf_tag,
4710 					sc->pg_mbuf_map[sw_pg_cons_idx]);
4711 
4712 				/* Adjust the mbuf length. */
4713 				if (rem_len < m_pg->m_len) {
4714 					/* The mbuf chain is complete. */
4715 					m_pg->m_len = rem_len;
4716 					rem_len = 0;
4717 				} else {
4718 					/* More packet data is waiting. */
4719 					rem_len -= m_pg->m_len;
4720 				}
4721 
4722 				/* Concatenate the mbuf cluster to the mbuf. */
4723 				m_cat(m0, m_pg);
4724 
4725 				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
4726 			}
4727 
4728 			/* Set the total packet length. */
4729 			m0->m_pkthdr.len = pkt_len;
4730 
4731 		} else {
4732 			/*
4733 			 * The received packet is small and fits in a
4734 			 * single mbuf (i.e. the l2_fhdr + pad + packet +
4735 			 * FCS <= MHLEN).  In other words, the packet is
4736 			 * 154 bytes or less in size.
4737 			 */
4738 
4739 			/* Set the total packet length. */
4740 			m0->m_pkthdr.len = m0->m_len = pkt_len;
4741 		}
4742 
4743 		/* Remove the trailing Ethernet FCS. */
4744 		m_adj(m0, -ETHER_CRC_LEN);
4745 
4746 		/* Check that the resulting mbuf chain is valid. */
4747 		DBRUN(m_sanity(m0, FALSE));
4748 
4749 		DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4750 			BCE_PRINTF("Simulating l2_fhdr status error.\n");
4751 			status = status | L2_FHDR_ERRORS_PHY_DECODE);
4752 
4753 		/* Check the received frame for errors. */
4754 		if (status & (L2_FHDR_ERRORS_BAD_CRC |
4755 			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4756 			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4757 
4758 			/* Log the error and release the mbuf. */
4759 			ifp->if_ierrors++;
4760 			DBRUN(sc->l2fhdr_status_errors++);
4761 
4762 			m_freem(m0);
4763 			m0 = NULL;
4764 			goto bce_rx_int_next_rx;
4765 		}
4766 
4767 		/* Send the packet to the appropriate interface. */
4768 		m0->m_pkthdr.rcvif = ifp;
4769 
4770 		/* Assume no hardware checksum. */
4771 		m0->m_pkthdr.csum_flags = 0;
4772 
4773 		/* Validate the checksum if offload enabled. */
4774 		if (ifp->if_capenable & IFCAP_RXCSUM) {
4775 
4776 			/* Check for an IP datagram. */
4777 		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
4778 				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
4779 				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4780 
4781 				/* Check if the IP checksum is valid. */
4782 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4783 					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4784 			}
4785 
4786 			/* Check for a valid TCP/UDP frame. */
4787 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4788 				L2_FHDR_STATUS_UDP_DATAGRAM)) {
4789 
4790 				/* Check for a good TCP/UDP checksum. */
4791 				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4792 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4793 					m0->m_pkthdr.csum_data =
4794 					    l2fhdr->l2_fhdr_tcp_udp_xsum;
4795 					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4796 						| CSUM_PSEUDO_HDR);
4797 				}
4798 			}
4799 		}
4800 
4801 		/*
4802 		 * If we received a packet with a vlan tag,
4803 		 * attach that information to the packet.
4804 		 */
4805 		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4806 #if __FreeBSD_version < 700000
4807 			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
4808 #else
4809 			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4810 			m0->m_flags |= M_VLANTAG;
4811 #endif
4812 		}
4813 
4814 		/* Pass the mbuf off to the upper layers. */
4815 		ifp->if_ipackets++;
4816 
4817 bce_rx_int_next_rx:
4818 		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
4819 
4820 		/* If we have a packet, pass it up the stack */
4821 		if (m0) {
4822 			/* Make sure we don't lose our place when we release the lock. */
4823 			sc->rx_cons = sw_rx_cons;
4824 			sc->pg_cons = sw_pg_cons;
4825 
4826 			BCE_UNLOCK(sc);
4827 			(*ifp->if_input)(ifp, m0);
4828 			BCE_LOCK(sc);
4829 
4830 			/* Recover our place. */
4831 			sw_rx_cons = sc->rx_cons;
4832 			sw_pg_cons = sc->pg_cons;
4833 		}
4834 
4835 		/* Refresh hw_cons to see if there's new work */
4836 		if (sw_rx_cons == hw_rx_cons)
4837 			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4838 	}
4839 
4840 	/* No new packets to process.  Refill the RX and page chains and exit. */
4841 	sc->pg_cons = sw_pg_cons;
4842 	bce_fill_pg_chain(sc);
4843 
4844 	sc->rx_cons = sw_rx_cons;
4845 	bce_fill_rx_chain(sc);
4846 
4847 	for (int i = 0; i < RX_PAGES; i++)
4848 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4849 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4850 
4851 	for (int i = 0; i < PG_PAGES; i++)
4852 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4853 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4854 
4855 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4856 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4857 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4858 
4859 	DBRUN(rx_intr_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4860 		sc->rx_intr_time += (u64) BCE_TIME_DELTA(rx_intr_start, rx_intr_end));
4861 }
4862 
4863 
4864 /****************************************************************************/
4865 /* Handles transmit completion interrupt events.                            */
4866 /*                                                                          */
4867 /* Returns:                                                                 */
4868 /*   Nothing.                                                               */
4869 /****************************************************************************/
4870 static void
4871 bce_tx_intr(struct bce_softc *sc)
4872 {
4873 	struct status_block *sblk = sc->status_block;
4874 	struct ifnet *ifp = sc->bce_ifp;
4875 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4876 
4877 #ifdef BCE_DEBUG
4878 	u32 tx_intr_start, tx_intr_end;
4879 	tx_intr_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4880 	sc->tx_interrupts++;
4881 #endif
4882 
4883 	BCE_LOCK_ASSERT(sc);
4884 
4885 	/* Get the hardware's view of the TX consumer index. */
4886 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4887 
4888 	/* Skip to the next entry if this is a chain page pointer. */
4889 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4890 		hw_tx_cons++;
4891 
4892 	sw_tx_cons = sc->tx_cons;
4893 
4894 	/* Prevent speculative reads from getting ahead of the status block. */
4895 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4896 		BUS_SPACE_BARRIER_READ);
4897 
4898 	/* Cycle through any completed TX chain page entries. */
4899 	while (sw_tx_cons != hw_tx_cons) {
4900 #ifdef BCE_DEBUG
4901 		struct tx_bd *txbd = NULL;
4902 #endif
4903 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4904 
4905 		DBPRINT(sc, BCE_INFO_SEND,
4906 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4907 			"sw_tx_chain_cons = 0x%04X\n",
4908 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4909 
4910 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4911 			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
4912 				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
4913 				(int) MAX_TX_BD);
4914 			bce_breakpoint(sc));
4915 
4916 		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4917 				[TX_IDX(sw_tx_chain_cons)]);
4918 
4919 		DBRUNIF((txbd == NULL),
4920 			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4921 				__FILE__, __LINE__, sw_tx_chain_cons);
4922 			bce_breakpoint(sc));
4923 
4924 		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
4925 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4926 
4927 		/*
4928 		 * Free the associated mbuf. Remember
4929 		 * that only the last tx_bd of a packet
4930 		 * has an mbuf pointer and DMA map.
4931 		 */
4932 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4933 
4934 			/* Validate that this is the last tx_bd. */
4935 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4936 				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
4937 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4938 				bce_breakpoint(sc));
4939 
4940 			DBRUNMSG(BCE_INFO_SEND,
4941 				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
4942 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4943 
4944 			/* Unmap the mbuf. */
4945 			bus_dmamap_unload(sc->tx_mbuf_tag,
4946 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4947 
4948 			/* Free the mbuf. */
4949 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4950 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4951 			DBRUN(sc->debug_tx_mbuf_alloc--);
4952 
4953 			ifp->if_opackets++;
4954 		}
4955 
4956 		sc->used_tx_bd--;
4957 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4958 
4959 		/* Refresh hw_cons to see if there's new work. */
4960 		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4961 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4962 			hw_tx_cons++;
4963 
4964 		/* Prevent speculative reads from getting ahead of the status block. */
4965 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4966 			BUS_SPACE_BARRIER_READ);
4967 	}
4968 
4969 	/* Clear the TX timeout timer. */
4970 	sc->watchdog_timer = 0;
4971 
4972 	/* Clear the tx hardware queue full flag. */
4973 	if (sc->used_tx_bd < sc->max_tx_bd) {
4974 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4975 			DBPRINT(sc, BCE_INFO_SEND,
4976 				"%s(): Open TX chain! %d/%d (used/total)\n",
4977 				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
4978 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4979 	}
4980 
4981 	sc->tx_cons = sw_tx_cons;
4982 	DBRUN(tx_intr_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4983 		sc->tx_intr_time += (u64) BCE_TIME_DELTA(tx_intr_start, tx_intr_end));
4984 }
4985 
4986 
4987 /****************************************************************************/
4988 /* Disables interrupt generation.                                           */
4989 /*                                                                          */
4990 /* Returns:                                                                 */
4991 /*   Nothing.                                                               */
4992 /****************************************************************************/
4993 static void
4994 bce_disable_intr(struct bce_softc *sc)
4995 {
4996 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4997 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4998 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4999 }
5000 
5001 
5002 /****************************************************************************/
5003 /* Enables interrupt generation.                                            */
5004 /*                                                                          */
5005 /* Returns:                                                                 */
5006 /*   Nothing.                                                               */
5007 /****************************************************************************/
5008 static void
5009 bce_enable_intr(struct bce_softc *sc)
5010 {
5011 	u32 val;
5012 
5013 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5014 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5015 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
5016 
5017 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5018 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5019 
5020 	val = REG_RD(sc, BCE_HC_COMMAND);
5021 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
5022 }
5023 
5024 
5025 /****************************************************************************/
5026 /* Handles controller initialization.                                       */
5027 /*                                                                          */
5028 /* Returns:                                                                 */
5029 /*   Nothing.                                                               */
5030 /****************************************************************************/
5031 static void
5032 bce_init_locked(struct bce_softc *sc)
5033 {
5034 	struct ifnet *ifp;
5035 	u32 ether_mtu = 0;
5036 
5037 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5038 
5039 	BCE_LOCK_ASSERT(sc);
5040 
5041 	ifp = sc->bce_ifp;
5042 
5043 	/* Check if the driver is still running and bail out if it is. */
5044 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5045 		goto bce_init_locked_exit;
5046 
5047 	bce_stop(sc);
5048 
5049 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
5050 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
5051 			__FILE__, __LINE__);
5052 		goto bce_init_locked_exit;
5053 	}
5054 
5055 	if (bce_chipinit(sc)) {
5056 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
5057 			__FILE__, __LINE__);
5058 		goto bce_init_locked_exit;
5059 	}
5060 
5061 	if (bce_blockinit(sc)) {
5062 		BCE_PRINTF("%s(%d): Block initialization failed!\n",
5063 			__FILE__, __LINE__);
5064 		goto bce_init_locked_exit;
5065 	}
5066 
5067 	/* Load our MAC address. */
5068 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
5069 	bce_set_mac_addr(sc);
5070 
5071 	/* Calculate and program the hardware Ethernet MTU size. */
5072 	if (ifp->if_mtu <= sc->pg_bd_mbuf_alloc_size)
5073 		/* Be generous on receive if we have room. */
5074 		ether_mtu = sc->pg_bd_mbuf_alloc_size;
5075 	else
5076 		ether_mtu = ifp->if_mtu;
5077 
5078 	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
5079 
5080 	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
5081 		ether_mtu);
5082 
5083 	/* Program the mtu, enabling jumbo frame support if necessary. */
5084 	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
5085 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
5086 			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
5087 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
5088 	else
5089 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
5090 
5091 	DBPRINT(sc, BCE_INFO_LOAD,
5092 		"%s(): rx_bd_mbuf_alloc_size = %d, pg_bd_mbuf_alloc_size = %d\n",
5093 		__FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->pg_bd_mbuf_alloc_size);
5094 
5095 	/* Program appropriate promiscuous/multicast filtering. */
5096 	bce_set_rx_mode(sc);
5097 
5098 	/* Init page buffer descriptor chain. */
5099 	bce_init_pg_chain(sc);
5100 
5101 	/* Init RX buffer descriptor chain. */
5102 	bce_init_rx_chain(sc);
5103 
5104 	/* Init TX buffer descriptor chain. */
5105 	bce_init_tx_chain(sc);
5106 
5107 #ifdef DEVICE_POLLING
5108 	/* Disable interrupts if we are polling. */
5109 	if (ifp->if_capenable & IFCAP_POLLING) {
5110 		bce_disable_intr(sc);
5111 
5112 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5113 			(1 << 16) | sc->bce_rx_quick_cons_trip);
5114 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5115 			(1 << 16) | sc->bce_tx_quick_cons_trip);
5116 	} else
5117 #endif
5118 	/* Enable host interrupts. */
5119 	bce_enable_intr(sc);
5120 
5121 	bce_ifmedia_upd_locked(ifp);
5122 
5123 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
5124 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5125 
5126 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
5127 
5128 bce_init_locked_exit:
5129 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5130 
5131 	return;
5132 }
5133 
5134 
5135 /****************************************************************************/
5136 /* Initialize the controller just enough so that any management firmware    */
5137 /* running on the device will continue to operate correctly.                */
5138 /*                                                                          */
5139 /* Returns:                                                                 */
5140 /*   Nothing.                                                               */
5141 /****************************************************************************/
5142 static void
5143 bce_mgmt_init_locked(struct bce_softc *sc)
5144 {
5145 	struct ifnet *ifp;
5146 
5147 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5148 
5149 	BCE_LOCK_ASSERT(sc);
5150 
5151 	/* Bail out if management firmware is not running. */
5152 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
5153 		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
5154 			"No management firmware running...\n");
5155 		goto bce_mgmt_init_locked_exit;
5156 	}
5157 
5158 	ifp = sc->bce_ifp;
5159 
5160 	/* Enable all critical blocks in the MAC. */
5161 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
5162 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
5163 	DELAY(20);
5164 
5165 	bce_ifmedia_upd_locked(ifp);
5166 bce_mgmt_init_locked_exit:
5167 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5168 
5169 	return;
5170 }
5171 
5172 
5173 /****************************************************************************/
5174 /* Handles controller initialization when called from an unlocked routine.  */
5175 /*                                                                          */
5176 /* Returns:                                                                 */
5177 /*   Nothing.                                                               */
5178 /****************************************************************************/
5179 static void
5180 bce_init(void *xsc)
5181 {
5182 	struct bce_softc *sc = xsc;
5183 
5184 	BCE_LOCK(sc);
5185 	bce_init_locked(sc);
5186 	BCE_UNLOCK(sc);
5187 }
5188 
5189 
5190 /****************************************************************************/
5191 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
5192 /* memory visible to the controller.                                        */
5193 /*                                                                          */
5194 /* Returns:                                                                 */
5195 /*   0 for success, positive value for failure.                             */
5196 /* Modified:                                                                */
5197 /*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
5198 /****************************************************************************/
5199 static int
5200 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
5201 {
5202 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
5203 	bus_dmamap_t map;
5204 	struct tx_bd *txbd = NULL;
5205 	struct mbuf *m0;
5206 	struct ether_vlan_header *eh;
5207 	struct ip *ip;
5208 	struct tcphdr *th;
5209 	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
5210 	u32 prod_bseq;
5211 	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
5212 
5213 
5214 #ifdef BCE_DEBUG
5215 	u16 debug_prod;
5216 #endif
5217 	int i, error, nsegs, rc = 0;
5218 
5219 	/* Transfer any checksum offload flags to the bd. */
5220 	m0 = *m_head;
5221 	if (m0->m_pkthdr.csum_flags) {
5222 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
5223 			flags |= TX_BD_FLAGS_IP_CKSUM;
5224 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
5225 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5226 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5227 			/* For TSO the controller needs two pieces of info, */
5228 			/* the MSS and the IP+TCP options length.           */
5229 			mss = htole16(m0->m_pkthdr.tso_segsz);
5230 
5231 			/* Map the header and find the Ethernet type & header length */
5232 			eh = mtod(m0, struct ether_vlan_header *);
5233 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5234 				etype = ntohs(eh->evl_proto);
5235 				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5236 			} else {
5237 				etype = ntohs(eh->evl_encap_proto);
5238 				e_hlen = ETHER_HDR_LEN;
5239 			}
5240 
5241 			/* Check for supported TSO Ethernet types (only IPv4 for now) */
5242 			switch (etype) {
5243 				case ETHERTYPE_IP:
5244 					ip = (struct ip *)(m0->m_data + e_hlen);
5245 
5246 					/* TSO only supported for TCP protocol */
5247 					if (ip->ip_p != IPPROTO_TCP) {
5248 						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
5249 							__FILE__, __LINE__);
5250 						goto bce_tx_encap_skip_tso;
5251 					}
5252 
5253 					/* Get IP header length in bytes (min 20) */
5254 					ip_hlen = ip->ip_hl << 2;
5255 
5256 					/* Get the TCP header length in bytes (min 20) */
5257 					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
5258 					tcp_hlen = (th->th_off << 2);
5259 
5260 					/* IP header length and checksum will be calc'd by hardware */
5261 					ip_len = ip->ip_len;
5262 					ip->ip_len = 0;
5263 					ip->ip_sum = 0;
5264 					break;
5265 				case ETHERTYPE_IPV6:
5266 					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
5267 						__FILE__, __LINE__);
5268 					goto bce_tx_encap_skip_tso;
5269 				default:
5270 					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
5271 						__FILE__, __LINE__);
5272 					goto bce_tx_encap_skip_tso;
5273 			}
5274 
5275 			hdr_len = e_hlen + ip_hlen + tcp_hlen;
5276 
5277 			DBPRINT(sc, BCE_EXCESSIVE_SEND,
5278 				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
5279 				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
5280 
5281 			/* Set the LSO flag in the TX BD */
5282 			flags |= TX_BD_FLAGS_SW_LSO;
5283 			/* Set the length of IP + TCP options (in 32 bit words) */
5284 			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
5285 
5286 bce_tx_encap_skip_tso:
5287 			DBRUN(sc->requested_tso_frames++);
5288 		}
5289 	}
5290 
5291 	/* Transfer any VLAN tags to the bd. */
5292 	if (m0->m_flags & M_VLANTAG) {
5293 		flags |= TX_BD_FLAGS_VLAN_TAG;
5294 		vlan_tag = m0->m_pkthdr.ether_vtag;
5295 	}
5296 
5297 	/* Map the mbuf into DMAable memory. */
5298 	prod = sc->tx_prod;
5299 	chain_prod = TX_CHAIN_IDX(prod);
5300 	map = sc->tx_mbuf_map[chain_prod];
5301 
5302 	/* Map the mbuf into our DMA address space. */
5303 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5304 	    segs, &nsegs, BUS_DMA_NOWAIT);
5305 
5306 	/* Check if the DMA mapping was successful */
5307 	if (error == EFBIG) {
5308 
5309 		/* The mbuf is too fragmented for our DMA mapping. */
5310    		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
5311 			__FUNCTION__, nsegs);
5312 		DBRUN(bce_dump_mbuf(sc, m0););
5313 
5314 		/* Try to defrag the mbuf. */
5315 		m0 = m_defrag(*m_head, M_DONTWAIT);
5316 		if (m0 == NULL) {
5317 			/* Defrag was unsuccessful */
5318 			m_freem(*m_head);
5319 			*m_head = NULL;
5320 			sc->mbuf_alloc_failed++;
5321 			return (ENOBUFS);
5322 		}
5323 
5324 		/* Defrag was successful, try mapping again */
5325 		*m_head = m0;
5326 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5327 		    segs, &nsegs, BUS_DMA_NOWAIT);
5328 
5329 		/* Still getting an error after a defrag. */
5330 		if (error == ENOMEM) {
5331 			/* Insufficient DMA buffers available. */
5332 			sc->tx_dma_map_failures++;
5333 			return (error);
5334 		} else if (error != 0) {
5335 			/* Still can't map the mbuf, release it and return an error. */
5336 			BCE_PRINTF(
5337 			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
5338 			    __FILE__, __LINE__);
5339 			m_freem(m0);
5340 			*m_head = NULL;
5341 			sc->tx_dma_map_failures++;
5342 			return (ENOBUFS);
5343 		}
5344 	} else if (error == ENOMEM) {
5345 		/* Insufficient DMA buffers available. */
5346 		sc->tx_dma_map_failures++;
5347 		return (error);
5348 	} else if (error != 0) {
5349 		m_freem(m0);
5350 		*m_head = NULL;
5351 		sc->tx_dma_map_failures++;
5352 		return (error);
5353 	}
5354 
5355 	/* Make sure there's room in the chain */
5356 	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
5357 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
5358 		return (ENOBUFS);
5359 	}
5360 
5361 	/* prod points to an empty tx_bd at this point. */
5362 	prod_bseq  = sc->tx_prod_bseq;
5363 
5364 #ifdef BCE_DEBUG
5365 	debug_prod = chain_prod;
5366 #endif
5367 
5368 	DBPRINT(sc, BCE_INFO_SEND,
5369 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
5370 		"prod_bseq = 0x%08X\n",
5371 		__FUNCTION__, prod, chain_prod, prod_bseq);
5372 
5373 	/*
5374 	 * Cycle through each mbuf segment that makes up
5375 	 * the outgoing frame, gathering the mapping info
5376 	 * for that segment and creating a tx_bd for
5377 	 * the mbuf.
5378 	 */
5379 	for (i = 0; i < nsegs ; i++) {
5380 
5381 		chain_prod = TX_CHAIN_IDX(prod);
5382 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
5383 
5384 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
5385 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
5386 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
5387 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
5388 		txbd->tx_bd_flags = htole16(flags);
5389 		prod_bseq += segs[i].ds_len;
5390 		if (i == 0)
5391 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
5392 		prod = NEXT_TX_BD(prod);
5393 	}
5394 
5395 	/* Set the END flag on the last TX buffer descriptor. */
5396 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
5397 
5398 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
5399 
5400 	DBPRINT(sc, BCE_INFO_SEND,
5401 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
5402 		"prod_bseq = 0x%08X\n",
5403 		__FUNCTION__, prod, chain_prod, prod_bseq);
5404 
5405 	/*
5406 	 * Ensure that the mbuf pointer for this transmission
5407 	 * is placed at the array index of the last
5408 	 * descriptor in this chain.  This is done
5409 	 * because a single map is used for all
5410 	 * segments of the mbuf and we don't want to
5411 	 * unload the map before all of the segments
5412 	 * have been freed.
5413 	 */
5414 	sc->tx_mbuf_ptr[chain_prod] = m0;
5415 	sc->used_tx_bd += nsegs;
5416 
5417 	/* Update some debug statistic counters */
5418 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
5419 		sc->tx_hi_watermark = sc->used_tx_bd);
5420 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
5421 	DBRUNIF(sc->debug_tx_mbuf_alloc++);
5422 
5423 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
5424 
5425 	/* prod points to the next free tx_bd at this point. */
5426 	sc->tx_prod = prod;
5427 	sc->tx_prod_bseq = prod_bseq;
5428 
5429 	return(rc);
5430 }
5431 
5432 
5433 /****************************************************************************/
5434 /* Main transmit routine when called from another routine with a lock.      */
5435 /*                                                                          */
5436 /* Returns:                                                                 */
5437 /*   Nothing.                                                               */
5438 /****************************************************************************/
5439 static void
5440 bce_start_locked(struct ifnet *ifp)
5441 {
5442 	struct bce_softc *sc = ifp->if_softc;
5443 	struct mbuf *m_head = NULL;
5444 	int count = 0;
5445 	u16 tx_prod, tx_chain_prod;
5446 
5447 	/* If there's no link or the transmit queue is empty then just exit. */
5448 	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5449 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
5450 			__FUNCTION__);
5451 		goto bce_start_locked_exit;
5452 	}
5453 
5454 	/* prod points to the next free tx_bd. */
5455 	tx_prod = sc->tx_prod;
5456 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
5457 
5458 	DBPRINT(sc, BCE_INFO_SEND,
5459 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
5460 		"tx_prod_bseq = 0x%08X\n",
5461 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5462 
5463 	/*
5464 	 * Keep adding entries while there is space in the ring.
5465 	 */
5466 	while (sc->used_tx_bd < sc->max_tx_bd) {
5467 
5468 		/* Check for any frames to send. */
5469 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
5470 		if (m_head == NULL)
5471 			break;
5472 
5473 		/*
5474 		 * Pack the data into the transmit ring. If we
5475 		 * don't have room, place the mbuf back at the
5476 		 * head of the queue and set the OACTIVE flag
5477 		 * to wait for the NIC to drain the chain.
5478 		 */
5479 		if (bce_tx_encap(sc, &m_head)) {
5480 			if (m_head != NULL)
5481 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
5482 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5483 			DBPRINT(sc, BCE_INFO_SEND,
5484 				"TX chain is closed for business! Total tx_bd used = %d\n",
5485 				sc->used_tx_bd);
5486 			break;
5487 		}
5488 
5489 		count++;
5490 
5491 		/* Send a copy of the frame to any BPF listeners. */
5492 		ETHER_BPF_MTAP(ifp, m_head);
5493 	}
5494 
5495 	if (count == 0) {
5496 		/* no packets were dequeued */
5497 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
5498 			__FUNCTION__);
5499 		goto bce_start_locked_exit;
5500 	}
5501 
5502 	/* Update the driver's counters. */
5503 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5504 
5505 	DBPRINT(sc, BCE_INFO_SEND,
5506 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
5507 		"tx_prod_bseq = 0x%08X\n",
5508 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5509 
5510 	/* Start the transmit. */
5511 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5512 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5513 
5514 	/* Set the tx timeout. */
5515 	sc->watchdog_timer = BCE_TX_TIMEOUT;
5516 
5517 bce_start_locked_exit:
5518 	return;
5519 }
5520 
5521 
5522 /****************************************************************************/
5523 /* Main transmit routine when called from another routine without a lock.   */
5524 /*                                                                          */
5525 /* Returns:                                                                 */
5526 /*   Nothing.                                                               */
5527 /****************************************************************************/
5528 static void
5529 bce_start(struct ifnet *ifp)
5530 {
5531 	struct bce_softc *sc = ifp->if_softc;
5532 
5533 	BCE_LOCK(sc);
5534 	bce_start_locked(ifp);
5535 	BCE_UNLOCK(sc);
5536 }
5537 
5538 
5539 /****************************************************************************/
5540 /* Handles any IOCTL calls from the operating system.                       */
5541 /*                                                                          */
5542 /* Returns:                                                                 */
5543 /*   0 for success, positive value for failure.                             */
5544 /****************************************************************************/
5545 static int
5546 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5547 {
5548 	struct bce_softc *sc = ifp->if_softc;
5549 	struct ifreq *ifr = (struct ifreq *) data;
5550 	struct mii_data *mii;
5551 	int mask, error = 0;
5552 
5553 	switch(command) {
5554 
5555 		/* Set the interface MTU. */
5556 		case SIOCSIFMTU:
5557 			/* Check that the MTU setting is supported. */
5558 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
5559 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
5560 				error = EINVAL;
5561 				break;
5562 			}
5563 
5564 			DBPRINT(sc, BCE_INFO_MISC,
5565 				"SIOCSIFMTU: Changing MTU from %d to %d\n",
5566 				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
5567 
5568 			BCE_LOCK(sc);
5569 			ifp->if_mtu = ifr->ifr_mtu;
5570 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5571 			bce_init_locked(sc);
5572 			BCE_UNLOCK(sc);
5573 			break;
5574 
5575 		/* Set interface flags. */
5576 		case SIOCSIFFLAGS:
5577 			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
5578 
5579 			BCE_LOCK(sc);
5580 
5581 			/* Check if the interface is up. */
5582 			if (ifp->if_flags & IFF_UP) {
5583 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5584 					/* Change promiscuous/multicast flags as necessary. */
5585 					bce_set_rx_mode(sc);
5586 				} else {
5587 					/* Start the HW */
5588 					bce_init_locked(sc);
5589 				}
5590 			} else {
5591 				/* The interface is down, check if driver is running. */
5592 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5593 					bce_stop(sc);
5594 
5595 					/* If MFW is running, restart the controller a bit. */
5596 					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5597 						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5598 						bce_chipinit(sc);
5599 						bce_mgmt_init_locked(sc);
5600 					}
5601 				}
5602 			}
5603 
5604 			BCE_UNLOCK(sc);
5605 			error = 0;
5606 
5607 			break;
5608 
5609 		/* Add/Delete multicast address */
5610 		case SIOCADDMULTI:
5611 		case SIOCDELMULTI:
5612 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
5613 
5614 			BCE_LOCK(sc);
5615 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5616 				bce_set_rx_mode(sc);
5617 				error = 0;
5618 			}
5619 			BCE_UNLOCK(sc);
5620 
5621 			break;
5622 
5623 		/* Set/Get Interface media */
5624 		case SIOCSIFMEDIA:
5625 		case SIOCGIFMEDIA:
5626 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
5627 
5628 			mii = device_get_softc(sc->bce_miibus);
5629 			error = ifmedia_ioctl(ifp, ifr,
5630 			    &mii->mii_media, command);
5631 			break;
5632 
5633 		/* Set interface capability */
5634 		case SIOCSIFCAP:
5635 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5636 			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
5637 
5638 #ifdef DEVICE_POLLING
5639 			if (mask & IFCAP_POLLING) {
5640 				if (ifr->ifr_reqcap & IFCAP_POLLING) {
5641 
5642 					/* Setup the poll routine to call. */
5643 					error = ether_poll_register(bce_poll, ifp);
5644 					if (error) {
5645 						BCE_PRINTF("%s(%d): Error registering poll function!\n",
5646 							__FILE__, __LINE__);
5647 						goto bce_ioctl_exit;
5648 					}
5649 
5650 					/* Clear the interrupt. */
5651 					BCE_LOCK(sc);
5652 					bce_disable_intr(sc);
5653 
5654 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5655 						(1 << 16) | sc->bce_rx_quick_cons_trip);
5656 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5657 						(1 << 16) | sc->bce_tx_quick_cons_trip);
5658 
5659 					ifp->if_capenable |= IFCAP_POLLING;
5660 					BCE_UNLOCK(sc);
5661 				} else {
5662 					/* Clear the poll routine. */
5663 					error = ether_poll_deregister(ifp);
5664 
5665 					/* Enable interrupt even in error case */
5666 					BCE_LOCK(sc);
5667 					bce_enable_intr(sc);
5668 
5669 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5670 						(sc->bce_tx_quick_cons_trip_int << 16) |
5671 						sc->bce_tx_quick_cons_trip);
5672 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5673 						(sc->bce_rx_quick_cons_trip_int << 16) |
5674 						sc->bce_rx_quick_cons_trip);
5675 
5676 					ifp->if_capenable &= ~IFCAP_POLLING;
5677 					BCE_UNLOCK(sc);
5678 				}
5679 			}
5680 #endif /*DEVICE_POLLING */
5681 
5682 			/* Toggle the TX checksum capabilites enable flag. */
5683 			if (mask & IFCAP_TXCSUM) {
5684 				ifp->if_capenable ^= IFCAP_TXCSUM;
5685 				if (IFCAP_TXCSUM & ifp->if_capenable)
5686 					ifp->if_hwassist = BCE_IF_HWASSIST;
5687 				else
5688 					ifp->if_hwassist = 0;
5689 			}
5690 
5691 			/* Toggle the RX checksum capabilities enable flag. */
5692 			if (mask & IFCAP_RXCSUM) {
5693 				ifp->if_capenable ^= IFCAP_RXCSUM;
5694 				if (IFCAP_RXCSUM & ifp->if_capenable)
5695 					ifp->if_hwassist = BCE_IF_HWASSIST;
5696 				else
5697 					ifp->if_hwassist = 0;
5698 			}
5699 
5700 			/* Toggle the TSO capabilities enable flag. */
5701 			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
5702 				ifp->if_capenable ^= IFCAP_TSO4;
5703 				if (IFCAP_RXCSUM & ifp->if_capenable)
5704 					ifp->if_hwassist = BCE_IF_HWASSIST;
5705 				else
5706 					ifp->if_hwassist = 0;
5707 			}
5708 
5709 			/* Toggle VLAN_MTU capabilities enable flag. */
5710 			if (mask & IFCAP_VLAN_MTU) {
5711 				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
5712 					__FILE__, __LINE__);
5713 			}
5714 
5715 			/* Toggle VLANHWTAG capabilities enabled flag. */
5716 			if (mask & IFCAP_VLAN_HWTAGGING) {
5717 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5718 					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
5719 						"management firmware (ASF/IPMI/UMP) is running!\n",
5720 						__FILE__, __LINE__);
5721 				else
5722 					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5723 						__FILE__, __LINE__);
5724 			}
5725 
5726 			break;
5727 		default:
5728 			/* We don't know how to handle the IOCTL, pass it on. */
5729 			error = ether_ioctl(ifp, command, data);
5730 			break;
5731 	}
5732 
5733 #ifdef DEVICE_POLLING
5734 bce_ioctl_exit:
5735 #endif
5736 	return(error);
5737 }
5738 
5739 
5740 /****************************************************************************/
5741 /* Transmit timeout handler.                                                */
5742 /*                                                                          */
5743 /* Returns:                                                                 */
5744 /*   Nothing.                                                               */
5745 /****************************************************************************/
5746 static void
5747 bce_watchdog(struct bce_softc *sc)
5748 {
5749 
5750 	BCE_LOCK_ASSERT(sc);
5751 
5752 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
5753 		return;
5754 
5755 	/*
5756 	 * If we are in this routine because of pause frames, then
5757 	 * don't reset the hardware.
5758 	 */
5759 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5760 		return;
5761 
5762 	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
5763 		__FILE__, __LINE__);
5764 
5765 	DBRUNMSG(BCE_VERBOSE_SEND,
5766 		bce_dump_driver_state(sc);
5767 		bce_dump_status_block(sc));
5768 
5769 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5770 
5771 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5772 
5773 	bce_init_locked(sc);
5774 	sc->bce_ifp->if_oerrors++;
5775 
5776 }
5777 
5778 
5779 #ifdef DEVICE_POLLING
5780 static void
5781 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5782 {
5783 	struct bce_softc *sc = ifp->if_softc;
5784 
5785 	BCE_LOCK_ASSERT(sc);
5786 
5787 	sc->bce_rxcycles = count;
5788 
5789 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5790 	    BUS_DMASYNC_POSTWRITE);
5791 
5792 	/* Check for any completed RX frames. */
5793 	if (sc->status_block->status_rx_quick_consumer_index0 !=
5794 		sc->hw_rx_cons)
5795 		bce_rx_intr(sc);
5796 
5797 	/* Check for any completed TX frames. */
5798 	if (sc->status_block->status_tx_quick_consumer_index0 !=
5799 		sc->hw_tx_cons)
5800 		bce_tx_intr(sc);
5801 
5802 	/* Check for new frames to transmit. */
5803 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5804 		bce_start_locked(ifp);
5805 
5806 }
5807 
5808 
5809 static void
5810 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5811 {
5812 	struct bce_softc *sc = ifp->if_softc;
5813 
5814 	BCE_LOCK(sc);
5815 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5816 		bce_poll_locked(ifp, cmd, count);
5817 	BCE_UNLOCK(sc);
5818 }
5819 #endif /* DEVICE_POLLING */
5820 
5821 
5822 #if 0
5823 static inline int
5824 bce_has_work(struct bce_softc *sc)
5825 {
5826 	struct status_block *stat = sc->status_block;
5827 
5828 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5829 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5830 		return 1;
5831 
5832 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5833 	    bp->link_up)
5834 		return 1;
5835 
5836 	return 0;
5837 }
5838 #endif
5839 
5840 
5841 /*
5842  * Interrupt handler.
5843  */
5844 /****************************************************************************/
5845 /* Main interrupt entry point.  Verifies that the controller generated the  */
5846 /* interrupt and then calls a separate routine for handle the various       */
5847 /* interrupt causes (PHY, TX, RX).                                          */
5848 /*                                                                          */
5849 /* Returns:                                                                 */
5850 /*   0 for success, positive value for failure.                             */
5851 /****************************************************************************/
5852 static void
5853 bce_intr(void *xsc)
5854 {
5855 	struct bce_softc *sc;
5856 	struct ifnet *ifp;
5857 	u32 status_attn_bits;
5858 
5859 	sc = xsc;
5860 	ifp = sc->bce_ifp;
5861 
5862 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5863 	BCE_LOCK(sc);
5864 
5865 	DBRUN(sc->interrupts_generated++);
5866 
5867 #ifdef DEVICE_POLLING
5868 	if (ifp->if_capenable & IFCAP_POLLING) {
5869 		DBPRINT(sc, BCE_INFO_MISC, "Polling enabled!\n");
5870 		goto bce_intr_exit;
5871 	}
5872 #endif
5873 
5874 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5875 	    BUS_DMASYNC_POSTWRITE);
5876 
5877 	/*
5878 	 * If the hardware status block index
5879 	 * matches the last value read by the
5880 	 * driver and we haven't asserted our
5881 	 * interrupt then there's nothing to do.
5882 	 */
5883 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5884 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5885 		goto bce_intr_exit;
5886 
5887 	/* Ack the interrupt and stop others from occuring. */
5888 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5889 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5890 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5891 
5892 	/* Keep processing data as long as there is work to do. */
5893 	for (;;) {
5894 
5895 		status_attn_bits = sc->status_block->status_attn_bits;
5896 
5897 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5898 			BCE_PRINTF("Simulating unexpected status attention bit set.");
5899 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5900 
5901 		/* Was it a link change interrupt? */
5902 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5903 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5904 			bce_phy_intr(sc);
5905 
5906 		/* If any other attention is asserted then the chip is toast. */
5907 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5908 			(sc->status_block->status_attn_bits_ack &
5909 			~STATUS_ATTN_BITS_LINK_STATE))) {
5910 
5911 			DBRUN(sc->unexpected_attentions++);
5912 
5913 			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
5914 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5915 
5916 			DBRUNMSG(BCE_FATAL,
5917 				if (bce_debug_unexpected_attention == 0)
5918 					bce_breakpoint(sc));
5919 
5920 			bce_init_locked(sc);
5921 			goto bce_intr_exit;
5922 		}
5923 
5924 		/* Check for any completed RX frames. */
5925 		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5926 			bce_rx_intr(sc);
5927 
5928 		/* Check for any completed TX frames. */
5929 		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5930 			bce_tx_intr(sc);
5931 
5932 		/* Save the status block index value for use during the next interrupt. */
5933 		sc->last_status_idx = sc->status_block->status_idx;
5934 
5935 		/* Prevent speculative reads from getting ahead of the status block. */
5936 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5937 			BUS_SPACE_BARRIER_READ);
5938 
5939 		/* If there's no work left then exit the interrupt service routine. */
5940 		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5941 	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5942 			break;
5943 
5944 	}
5945 
5946 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5947 	    BUS_DMASYNC_PREWRITE);
5948 
5949 	/* Re-enable interrupts. */
5950 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5951 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5952 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5953 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5954 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5955 
5956 	/* Handle any frames that arrived while handling the interrupt. */
5957 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5958 		bce_start_locked(ifp);
5959 
5960 bce_intr_exit:
5961 	BCE_UNLOCK(sc);
5962 }
5963 
5964 
5965 /****************************************************************************/
5966 /* Programs the various packet receive modes (broadcast and multicast).     */
5967 /*                                                                          */
5968 /* Returns:                                                                 */
5969 /*   Nothing.                                                               */
5970 /****************************************************************************/
5971 static void
5972 bce_set_rx_mode(struct bce_softc *sc)
5973 {
5974 	struct ifnet *ifp;
5975 	struct ifmultiaddr *ifma;
5976 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5977 	u32 rx_mode, sort_mode;
5978 	int h, i;
5979 
5980 	BCE_LOCK_ASSERT(sc);
5981 
5982 	ifp = sc->bce_ifp;
5983 
5984 	/* Initialize receive mode default settings. */
5985 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5986 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5987 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5988 
5989 	/*
5990 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5991 	 * be enbled.
5992 	 */
5993 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5994 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5995 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5996 
5997 	/*
5998 	 * Check for promiscuous, all multicast, or selected
5999 	 * multicast address filtering.
6000 	 */
6001 	if (ifp->if_flags & IFF_PROMISC) {
6002 		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
6003 
6004 		/* Enable promiscuous mode. */
6005 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
6006 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
6007 	} else if (ifp->if_flags & IFF_ALLMULTI) {
6008 		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
6009 
6010 		/* Enable all multicast addresses. */
6011 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
6012 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
6013        	}
6014 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
6015 	} else {
6016 		/* Accept one or more multicast(s). */
6017 		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
6018 
6019 		IF_ADDR_LOCK(ifp);
6020 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
6021 			if (ifma->ifma_addr->sa_family != AF_LINK)
6022 				continue;
6023 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
6024 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
6025 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
6026 		}
6027 		IF_ADDR_UNLOCK(ifp);
6028 
6029 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
6030 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
6031 
6032 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
6033 	}
6034 
6035 	/* Only make changes if the recive mode has actually changed. */
6036 	if (rx_mode != sc->rx_mode) {
6037 		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
6038 			rx_mode);
6039 
6040 		sc->rx_mode = rx_mode;
6041 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
6042 	}
6043 
6044 	/* Disable and clear the exisitng sort before enabling a new sort. */
6045 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
6046 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
6047 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
6048 }
6049 
6050 
6051 /****************************************************************************/
6052 /* Called periodically to updates statistics from the controllers           */
6053 /* statistics block.                                                        */
6054 /*                                                                          */
6055 /* Returns:                                                                 */
6056 /*   Nothing.                                                               */
6057 /****************************************************************************/
6058 static void
6059 bce_stats_update(struct bce_softc *sc)
6060 {
6061 	struct ifnet *ifp;
6062 	struct statistics_block *stats;
6063 
6064 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
6065 
6066 	ifp = sc->bce_ifp;
6067 
6068 	stats = (struct statistics_block *) sc->stats_block;
6069 
6070 	/*
6071 	 * Update the interface statistics from the
6072 	 * hardware statistics.
6073 	 */
6074 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
6075 
6076 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
6077 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
6078 					  (u_long) stats->stat_IfInMBUFDiscards +
6079 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
6080 					  (u_long) stats->stat_Dot3StatsFCSErrors;
6081 
6082 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
6083 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
6084 					  (u_long) stats->stat_Dot3StatsLateCollisions;
6085 
6086 	/*
6087 	 * Certain controllers don't report
6088 	 * carrier sense errors correctly.
6089 	 * See errata E11_5708CA0_1165.
6090 	 */
6091 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
6092 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
6093 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
6094 
6095 	/*
6096 	 * Update the sysctl statistics from the
6097 	 * hardware statistics.
6098 	 */
6099 	sc->stat_IfHCInOctets =
6100 		((u64) stats->stat_IfHCInOctets_hi << 32) +
6101 		 (u64) stats->stat_IfHCInOctets_lo;
6102 
6103 	sc->stat_IfHCInBadOctets =
6104 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
6105 		 (u64) stats->stat_IfHCInBadOctets_lo;
6106 
6107 	sc->stat_IfHCOutOctets =
6108 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
6109 		 (u64) stats->stat_IfHCOutOctets_lo;
6110 
6111 	sc->stat_IfHCOutBadOctets =
6112 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
6113 		 (u64) stats->stat_IfHCOutBadOctets_lo;
6114 
6115 	sc->stat_IfHCInUcastPkts =
6116 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
6117 		 (u64) stats->stat_IfHCInUcastPkts_lo;
6118 
6119 	sc->stat_IfHCInMulticastPkts =
6120 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
6121 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
6122 
6123 	sc->stat_IfHCInBroadcastPkts =
6124 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
6125 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
6126 
6127 	sc->stat_IfHCOutUcastPkts =
6128 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
6129 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
6130 
6131 	sc->stat_IfHCOutMulticastPkts =
6132 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
6133 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
6134 
6135 	sc->stat_IfHCOutBroadcastPkts =
6136 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
6137 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
6138 
6139 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
6140 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
6141 
6142 	sc->stat_Dot3StatsCarrierSenseErrors =
6143 		stats->stat_Dot3StatsCarrierSenseErrors;
6144 
6145 	sc->stat_Dot3StatsFCSErrors =
6146 		stats->stat_Dot3StatsFCSErrors;
6147 
6148 	sc->stat_Dot3StatsAlignmentErrors =
6149 		stats->stat_Dot3StatsAlignmentErrors;
6150 
6151 	sc->stat_Dot3StatsSingleCollisionFrames =
6152 		stats->stat_Dot3StatsSingleCollisionFrames;
6153 
6154 	sc->stat_Dot3StatsMultipleCollisionFrames =
6155 		stats->stat_Dot3StatsMultipleCollisionFrames;
6156 
6157 	sc->stat_Dot3StatsDeferredTransmissions =
6158 		stats->stat_Dot3StatsDeferredTransmissions;
6159 
6160 	sc->stat_Dot3StatsExcessiveCollisions =
6161 		stats->stat_Dot3StatsExcessiveCollisions;
6162 
6163 	sc->stat_Dot3StatsLateCollisions =
6164 		stats->stat_Dot3StatsLateCollisions;
6165 
6166 	sc->stat_EtherStatsCollisions =
6167 		stats->stat_EtherStatsCollisions;
6168 
6169 	sc->stat_EtherStatsFragments =
6170 		stats->stat_EtherStatsFragments;
6171 
6172 	sc->stat_EtherStatsJabbers =
6173 		stats->stat_EtherStatsJabbers;
6174 
6175 	sc->stat_EtherStatsUndersizePkts =
6176 		stats->stat_EtherStatsUndersizePkts;
6177 
6178 	sc->stat_EtherStatsOverrsizePkts =
6179 		stats->stat_EtherStatsOverrsizePkts;
6180 
6181 	sc->stat_EtherStatsPktsRx64Octets =
6182 		stats->stat_EtherStatsPktsRx64Octets;
6183 
6184 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
6185 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
6186 
6187 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
6188 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
6189 
6190 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
6191 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
6192 
6193 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
6194 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
6195 
6196 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
6197 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
6198 
6199 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
6200 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
6201 
6202 	sc->stat_EtherStatsPktsTx64Octets =
6203 		stats->stat_EtherStatsPktsTx64Octets;
6204 
6205 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
6206 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
6207 
6208 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
6209 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
6210 
6211 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
6212 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
6213 
6214 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
6215 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
6216 
6217 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
6218 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
6219 
6220 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
6221 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
6222 
6223 	sc->stat_XonPauseFramesReceived =
6224 		stats->stat_XonPauseFramesReceived;
6225 
6226 	sc->stat_XoffPauseFramesReceived =
6227 		stats->stat_XoffPauseFramesReceived;
6228 
6229 	sc->stat_OutXonSent =
6230 		stats->stat_OutXonSent;
6231 
6232 	sc->stat_OutXoffSent =
6233 		stats->stat_OutXoffSent;
6234 
6235 	sc->stat_FlowControlDone =
6236 		stats->stat_FlowControlDone;
6237 
6238 	sc->stat_MacControlFramesReceived =
6239 		stats->stat_MacControlFramesReceived;
6240 
6241 	sc->stat_XoffStateEntered =
6242 		stats->stat_XoffStateEntered;
6243 
6244 	sc->stat_IfInFramesL2FilterDiscards =
6245 		stats->stat_IfInFramesL2FilterDiscards;
6246 
6247 	sc->stat_IfInRuleCheckerDiscards =
6248 		stats->stat_IfInRuleCheckerDiscards;
6249 
6250 	sc->stat_IfInFTQDiscards =
6251 		stats->stat_IfInFTQDiscards;
6252 
6253 	sc->stat_IfInMBUFDiscards =
6254 		stats->stat_IfInMBUFDiscards;
6255 
6256 	sc->stat_IfInRuleCheckerP4Hit =
6257 		stats->stat_IfInRuleCheckerP4Hit;
6258 
6259 	sc->stat_CatchupInRuleCheckerDiscards =
6260 		stats->stat_CatchupInRuleCheckerDiscards;
6261 
6262 	sc->stat_CatchupInFTQDiscards =
6263 		stats->stat_CatchupInFTQDiscards;
6264 
6265 	sc->stat_CatchupInMBUFDiscards =
6266 		stats->stat_CatchupInMBUFDiscards;
6267 
6268 	sc->stat_CatchupInRuleCheckerP4Hit =
6269 		stats->stat_CatchupInRuleCheckerP4Hit;
6270 
6271 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
6272 
6273 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
6274 }
6275 
6276 
6277 /****************************************************************************/
6278 /* Periodic function to notify the bootcode that the driver is still        */
6279 /* present.                                                                 */
6280 /*                                                                          */
6281 /* Returns:                                                                 */
6282 /*   Nothing.                                                               */
6283 /****************************************************************************/
6284 static void
6285 bce_pulse(void *xsc)
6286 {
6287 	struct bce_softc *sc = xsc;
6288 	u32 msg;
6289 
6290 	DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n");
6291 
6292 	BCE_LOCK_ASSERT(sc);
6293 
6294 	/* Tell the firmware that the driver is still running. */
6295 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
6296 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
6297 
6298 	/* Schedule the next pulse. */
6299 	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
6300 
6301 	return;
6302 }
6303 
6304 
6305 /****************************************************************************/
6306 /* Periodic function to perform maintenance tasks.                          */
6307 /*                                                                          */
6308 /* Returns:                                                                 */
6309 /*   Nothing.                                                               */
6310 /****************************************************************************/
6311 static void
6312 bce_tick(void *xsc)
6313 {
6314 	struct bce_softc *sc = xsc;
6315 	struct mii_data *mii;
6316 	struct ifnet *ifp;
6317 
6318 	ifp = sc->bce_ifp;
6319 
6320 	BCE_LOCK_ASSERT(sc);
6321 
6322 	/* Schedule the next tick. */
6323 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6324 
6325 	/* Update the statistics from the hardware statistics block. */
6326 	bce_stats_update(sc);
6327 
6328 	/* Top off the receive and page chains. */
6329 	bce_fill_pg_chain(sc);
6330 	bce_fill_rx_chain(sc);
6331 
6332 	/* Check that chip hasn't hung. */
6333 	bce_watchdog(sc);
6334 
6335 	/* If link is up already up then we're done. */
6336 	if (sc->bce_link)
6337 		goto bce_tick_locked_exit;
6338 
6339 	mii = device_get_softc(sc->bce_miibus);
6340 	mii_tick(mii);
6341 
6342 	/* Check if the link has come up. */
6343 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
6344 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6345 		sc->bce_link++;
6346 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
6347 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
6348 		    bootverbose)
6349 			BCE_PRINTF("Gigabit link up\n");
6350 		/* Now that link is up, handle any outstanding TX traffic. */
6351 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
6352 			bce_start_locked(ifp);
6353 	}
6354 
6355 bce_tick_locked_exit:
6356 	return;
6357 }
6358 
6359 
6360 #ifdef BCE_DEBUG
6361 /****************************************************************************/
6362 /* Allows the driver state to be dumped through the sysctl interface.       */
6363 /*                                                                          */
6364 /* Returns:                                                                 */
6365 /*   0 for success, positive value for failure.                             */
6366 /****************************************************************************/
6367 static int
6368 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
6369 {
6370         int error;
6371         int result;
6372         struct bce_softc *sc;
6373 
6374         result = -1;
6375         error = sysctl_handle_int(oidp, &result, 0, req);
6376 
6377         if (error || !req->newptr)
6378                 return (error);
6379 
6380         if (result == 1) {
6381                 sc = (struct bce_softc *)arg1;
6382                 bce_dump_driver_state(sc);
6383         }
6384 
6385         return error;
6386 }
6387 
6388 
6389 /****************************************************************************/
6390 /* Allows the hardware state to be dumped through the sysctl interface.     */
6391 /*                                                                          */
6392 /* Returns:                                                                 */
6393 /*   0 for success, positive value for failure.                             */
6394 /****************************************************************************/
6395 static int
6396 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
6397 {
6398         int error;
6399         int result;
6400         struct bce_softc *sc;
6401 
6402         result = -1;
6403         error = sysctl_handle_int(oidp, &result, 0, req);
6404 
6405         if (error || !req->newptr)
6406                 return (error);
6407 
6408         if (result == 1) {
6409                 sc = (struct bce_softc *)arg1;
6410                 bce_dump_hw_state(sc);
6411         }
6412 
6413         return error;
6414 }
6415 
6416 
6417 /****************************************************************************/
6418 /* Allows the bootcode state to be dumped through the sysctl interface.     */
6419 /*                                                                          */
6420 /* Returns:                                                                 */
6421 /*   0 for success, positive value for failure.                             */
6422 /****************************************************************************/
6423 static int
6424 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
6425 {
6426         int error;
6427         int result;
6428         struct bce_softc *sc;
6429 
6430         result = -1;
6431         error = sysctl_handle_int(oidp, &result, 0, req);
6432 
6433         if (error || !req->newptr)
6434                 return (error);
6435 
6436         if (result == 1) {
6437                 sc = (struct bce_softc *)arg1;
6438                 bce_dump_bc_state(sc);
6439         }
6440 
6441         return error;
6442 }
6443 
6444 
6445 /****************************************************************************/
6446 /* Provides a sysctl interface to allow dumping the RX chain.               */
6447 /*                                                                          */
6448 /* Returns:                                                                 */
6449 /*   0 for success, positive value for failure.                             */
6450 /****************************************************************************/
6451 static int
6452 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
6453 {
6454         int error;
6455         int result;
6456         struct bce_softc *sc;
6457 
6458         result = -1;
6459         error = sysctl_handle_int(oidp, &result, 0, req);
6460 
6461         if (error || !req->newptr)
6462                 return (error);
6463 
6464         if (result == 1) {
6465                 sc = (struct bce_softc *)arg1;
6466                 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
6467         }
6468 
6469         return error;
6470 }
6471 
6472 
6473 /****************************************************************************/
6474 /* Provides a sysctl interface to allow dumping the TX chain.               */
6475 /*                                                                          */
6476 /* Returns:                                                                 */
6477 /*   0 for success, positive value for failure.                             */
6478 /****************************************************************************/
6479 static int
6480 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
6481 {
6482         int error;
6483         int result;
6484         struct bce_softc *sc;
6485 
6486         result = -1;
6487         error = sysctl_handle_int(oidp, &result, 0, req);
6488 
6489         if (error || !req->newptr)
6490                 return (error);
6491 
6492         if (result == 1) {
6493                 sc = (struct bce_softc *)arg1;
6494                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6495         }
6496 
6497         return error;
6498 }
6499 
6500 
6501 /****************************************************************************/
6502 /* Provides a sysctl interface to allow dumping the page chain.             */
6503 /*                                                                          */
6504 /* Returns:                                                                 */
6505 /*   0 for success, positive value for failure.                             */
6506 /****************************************************************************/
6507 static int
6508 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
6509 {
6510         int error;
6511         int result;
6512         struct bce_softc *sc;
6513 
6514         result = -1;
6515         error = sysctl_handle_int(oidp, &result, 0, req);
6516 
6517         if (error || !req->newptr)
6518                 return (error);
6519 
6520         if (result == 1) {
6521                 sc = (struct bce_softc *)arg1;
6522                 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
6523         }
6524 
6525         return error;
6526 }
6527 
6528 
6529 /****************************************************************************/
6530 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
6531 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
6532 /*                                                                          */
6533 /* Returns:                                                                 */
6534 /*   0 for success, positive value for failure.                             */
6535 /****************************************************************************/
6536 static int
6537 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6538 {
6539 	struct bce_softc *sc;
6540 	int error;
6541 	u32 val, result;
6542 
6543 	result = -1;
6544 	error = sysctl_handle_int(oidp, &result, 0, req);
6545 	if (error || (req->newptr == NULL))
6546 		return (error);
6547 
6548 	/* Make sure the register is accessible. */
6549 	if (result < 0x8000) {
6550 		sc = (struct bce_softc *)arg1;
6551 		val = REG_RD(sc, result);
6552 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6553 	} else if (result < 0x0280000) {
6554 		sc = (struct bce_softc *)arg1;
6555 		val = REG_RD_IND(sc, result);
6556 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6557 	}
6558 
6559 	return (error);
6560 }
6561 
6562 
6563 /****************************************************************************/
6564 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
6565 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
6566 /*                                                                          */
6567 /* Returns:                                                                 */
6568 /*   0 for success, positive value for failure.                             */
6569 /****************************************************************************/
6570 static int
6571 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
6572 {
6573 	struct bce_softc *sc;
6574 	device_t dev;
6575 	int error, result;
6576 	u16 val;
6577 
6578 	result = -1;
6579 	error = sysctl_handle_int(oidp, &result, 0, req);
6580 	if (error || (req->newptr == NULL))
6581 		return (error);
6582 
6583 	/* Make sure the register is accessible. */
6584 	if (result < 0x20) {
6585 		sc = (struct bce_softc *)arg1;
6586 		dev = sc->bce_dev;
6587 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
6588 		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
6589 	}
6590 	return (error);
6591 }
6592 
6593 
6594 /****************************************************************************/
6595 /* Provides a sysctl interface to forcing the driver to dump state and      */
6596 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
6597 /*                                                                          */
6598 /* Returns:                                                                 */
6599 /*   0 for success, positive value for failure.                             */
6600 /****************************************************************************/
6601 static int
6602 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
6603 {
6604         int error;
6605         int result;
6606         struct bce_softc *sc;
6607 
6608         result = -1;
6609         error = sysctl_handle_int(oidp, &result, 0, req);
6610 
6611         if (error || !req->newptr)
6612                 return (error);
6613 
6614         if (result == 1) {
6615                 sc = (struct bce_softc *)arg1;
6616                 bce_breakpoint(sc);
6617         }
6618 
6619         return error;
6620 }
6621 #endif
6622 
6623 
6624 /****************************************************************************/
6625 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6626 /*                                                                          */
6627 /* Returns:                                                                 */
6628 /*   0 for success, positive value for failure.                             */
6629 /****************************************************************************/
6630 static void
6631 bce_add_sysctls(struct bce_softc *sc)
6632 {
6633 	struct sysctl_ctx_list *ctx;
6634 	struct sysctl_oid_list *children;
6635 
6636 	ctx = device_get_sysctl_ctx(sc->bce_dev);
6637 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6638 
6639 #ifdef BCE_DEBUG
6640 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6641 		"rx_low_watermark",
6642 		CTLFLAG_RD, &sc->rx_low_watermark,
6643 		0, "Lowest level of free rx_bd's");
6644 
6645 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6646 		"rx_empty_count",
6647 		CTLFLAG_RD, &sc->rx_empty_count,
6648 		0, "Number of times the RX chain was empty");
6649 
6650 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6651 		"tx_hi_watermark",
6652 		CTLFLAG_RD, &sc->tx_hi_watermark,
6653 		0, "Highest level of used tx_bd's");
6654 
6655 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6656 		"tx_full_count",
6657 		CTLFLAG_RD, &sc->tx_full_count,
6658 		0, "Number of times the TX chain was full");
6659 
6660 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6661 		"l2fhdr_status_errors",
6662 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
6663 		0, "l2_fhdr status errors");
6664 
6665 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6666 		"unexpected_attentions",
6667 		CTLFLAG_RD, &sc->unexpected_attentions,
6668 		0, "Unexpected attentions");
6669 
6670 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6671 		"lost_status_block_updates",
6672 		CTLFLAG_RD, &sc->lost_status_block_updates,
6673 		0, "Lost status block updates");
6674 
6675 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6676 		"debug_mbuf_sim_alloc_failed",
6677 		CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
6678 		0, "Simulated mbuf cluster allocation failures");
6679 
6680 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6681 		"requested_tso_frames",
6682 		CTLFLAG_RD, &sc->requested_tso_frames,
6683 		0, "Number of TSO frames received");
6684 
6685 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6686 		"rx_interrupts",
6687 		CTLFLAG_RD, &sc->rx_interrupts,
6688 		0, "Number of RX interrupts");
6689 
6690 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6691 		"tx_interrupts",
6692 		CTLFLAG_RD, &sc->tx_interrupts,
6693 		0, "Number of TX interrupts");
6694 
6695 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6696 		"rx_intr_time",
6697 		CTLFLAG_RD, &sc->rx_intr_time,
6698 		"RX interrupt time");
6699 
6700 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6701 		"tx_intr_time",
6702 		CTLFLAG_RD, &sc->tx_intr_time,
6703 		"TX interrupt time");
6704 
6705 #endif
6706 
6707 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6708 		"mbuf_alloc_failed",
6709 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
6710 		0, "mbuf cluster allocation failures");
6711 
6712 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6713 		"tx_dma_map_failures",
6714 		CTLFLAG_RD, &sc->tx_dma_map_failures,
6715 		0, "tx dma mapping failures");
6716 
6717 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6718 		"stat_IfHcInOctets",
6719 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6720 		"Bytes received");
6721 
6722 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6723 		"stat_IfHCInBadOctets",
6724 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6725 		"Bad bytes received");
6726 
6727 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6728 		"stat_IfHCOutOctets",
6729 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6730 		"Bytes sent");
6731 
6732 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6733 		"stat_IfHCOutBadOctets",
6734 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6735 		"Bad bytes sent");
6736 
6737 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6738 		"stat_IfHCInUcastPkts",
6739 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6740 		"Unicast packets received");
6741 
6742 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6743 		"stat_IfHCInMulticastPkts",
6744 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6745 		"Multicast packets received");
6746 
6747 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6748 		"stat_IfHCInBroadcastPkts",
6749 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6750 		"Broadcast packets received");
6751 
6752 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6753 		"stat_IfHCOutUcastPkts",
6754 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6755 		"Unicast packets sent");
6756 
6757 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6758 		"stat_IfHCOutMulticastPkts",
6759 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6760 		"Multicast packets sent");
6761 
6762 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6763 		"stat_IfHCOutBroadcastPkts",
6764 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6765 		"Broadcast packets sent");
6766 
6767 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6768 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6769 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6770 		0, "Internal MAC transmit errors");
6771 
6772 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6773 		"stat_Dot3StatsCarrierSenseErrors",
6774 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6775 		0, "Carrier sense errors");
6776 
6777 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6778 		"stat_Dot3StatsFCSErrors",
6779 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6780 		0, "Frame check sequence errors");
6781 
6782 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6783 		"stat_Dot3StatsAlignmentErrors",
6784 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6785 		0, "Alignment errors");
6786 
6787 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6788 		"stat_Dot3StatsSingleCollisionFrames",
6789 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6790 		0, "Single Collision Frames");
6791 
6792 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6793 		"stat_Dot3StatsMultipleCollisionFrames",
6794 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6795 		0, "Multiple Collision Frames");
6796 
6797 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6798 		"stat_Dot3StatsDeferredTransmissions",
6799 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6800 		0, "Deferred Transmissions");
6801 
6802 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6803 		"stat_Dot3StatsExcessiveCollisions",
6804 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6805 		0, "Excessive Collisions");
6806 
6807 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6808 		"stat_Dot3StatsLateCollisions",
6809 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6810 		0, "Late Collisions");
6811 
6812 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6813 		"stat_EtherStatsCollisions",
6814 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6815 		0, "Collisions");
6816 
6817 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6818 		"stat_EtherStatsFragments",
6819 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6820 		0, "Fragments");
6821 
6822 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6823 		"stat_EtherStatsJabbers",
6824 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6825 		0, "Jabbers");
6826 
6827 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6828 		"stat_EtherStatsUndersizePkts",
6829 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6830 		0, "Undersize packets");
6831 
6832 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6833 		"stat_EtherStatsOverrsizePkts",
6834 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6835 		0, "stat_EtherStatsOverrsizePkts");
6836 
6837 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6838 		"stat_EtherStatsPktsRx64Octets",
6839 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6840 		0, "Bytes received in 64 byte packets");
6841 
6842 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6843 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6844 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6845 		0, "Bytes received in 65 to 127 byte packets");
6846 
6847 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6848 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6849 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6850 		0, "Bytes received in 128 to 255 byte packets");
6851 
6852 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6853 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6854 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6855 		0, "Bytes received in 256 to 511 byte packets");
6856 
6857 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6858 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6859 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6860 		0, "Bytes received in 512 to 1023 byte packets");
6861 
6862 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6863 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6864 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6865 		0, "Bytes received in 1024 t0 1522 byte packets");
6866 
6867 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6868 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6869 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6870 		0, "Bytes received in 1523 to 9022 byte packets");
6871 
6872 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6873 		"stat_EtherStatsPktsTx64Octets",
6874 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6875 		0, "Bytes sent in 64 byte packets");
6876 
6877 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6878 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6879 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6880 		0, "Bytes sent in 65 to 127 byte packets");
6881 
6882 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6883 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6884 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6885 		0, "Bytes sent in 128 to 255 byte packets");
6886 
6887 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6888 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6889 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6890 		0, "Bytes sent in 256 to 511 byte packets");
6891 
6892 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6893 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6894 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6895 		0, "Bytes sent in 512 to 1023 byte packets");
6896 
6897 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6898 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6899 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6900 		0, "Bytes sent in 1024 to 1522 byte packets");
6901 
6902 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6903 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6904 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6905 		0, "Bytes sent in 1523 to 9022 byte packets");
6906 
6907 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6908 		"stat_XonPauseFramesReceived",
6909 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6910 		0, "XON pause frames receved");
6911 
6912 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6913 		"stat_XoffPauseFramesReceived",
6914 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6915 		0, "XOFF pause frames received");
6916 
6917 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6918 		"stat_OutXonSent",
6919 		CTLFLAG_RD, &sc->stat_OutXonSent,
6920 		0, "XON pause frames sent");
6921 
6922 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6923 		"stat_OutXoffSent",
6924 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6925 		0, "XOFF pause frames sent");
6926 
6927 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6928 		"stat_FlowControlDone",
6929 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6930 		0, "Flow control done");
6931 
6932 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6933 		"stat_MacControlFramesReceived",
6934 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6935 		0, "MAC control frames received");
6936 
6937 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6938 		"stat_XoffStateEntered",
6939 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6940 		0, "XOFF state entered");
6941 
6942 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6943 		"stat_IfInFramesL2FilterDiscards",
6944 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6945 		0, "Received L2 packets discarded");
6946 
6947 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6948 		"stat_IfInRuleCheckerDiscards",
6949 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6950 		0, "Received packets discarded by rule");
6951 
6952 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6953 		"stat_IfInFTQDiscards",
6954 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6955 		0, "Received packet FTQ discards");
6956 
6957 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6958 		"stat_IfInMBUFDiscards",
6959 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6960 		0, "Received packets discarded due to lack of controller buffer memory");
6961 
6962 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6963 		"stat_IfInRuleCheckerP4Hit",
6964 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6965 		0, "Received packets rule checker hits");
6966 
6967 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6968 		"stat_CatchupInRuleCheckerDiscards",
6969 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6970 		0, "Received packets discarded in Catchup path");
6971 
6972 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6973 		"stat_CatchupInFTQDiscards",
6974 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6975 		0, "Received packets discarded in FTQ in Catchup path");
6976 
6977 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6978 		"stat_CatchupInMBUFDiscards",
6979 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6980 		0, "Received packets discarded in controller buffer memory in Catchup path");
6981 
6982 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6983 		"stat_CatchupInRuleCheckerP4Hit",
6984 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6985 		0, "Received packets rule checker hits in Catchup path");
6986 
6987 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6988 		"com_no_buffers",
6989 		CTLFLAG_RD, &sc->com_no_buffers,
6990 		0, "Valid packets received but no RX buffers available");
6991 
6992 #ifdef BCE_DEBUG
6993 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6994 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6995 		(void *)sc, 0,
6996 		bce_sysctl_driver_state, "I", "Drive state information");
6997 
6998 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6999 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
7000 		(void *)sc, 0,
7001 		bce_sysctl_hw_state, "I", "Hardware state information");
7002 
7003 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7004 		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
7005 		(void *)sc, 0,
7006 		bce_sysctl_bc_state, "I", "Bootcode state information");
7007 
7008 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7009 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
7010 		(void *)sc, 0,
7011 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
7012 
7013 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7014 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
7015 		(void *)sc, 0,
7016 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
7017 
7018 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7019 		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
7020 		(void *)sc, 0,
7021 		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
7022 
7023 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7024 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
7025 		(void *)sc, 0,
7026 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
7027 
7028 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7029 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
7030 		(void *)sc, 0,
7031 		bce_sysctl_reg_read, "I", "Register read");
7032 
7033 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
7034 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
7035 		(void *)sc, 0,
7036 		bce_sysctl_phy_read, "I", "PHY register read");
7037 
7038 #endif
7039 
7040 }
7041 
7042 
7043 /****************************************************************************/
7044 /* BCE Debug Routines                                                       */
7045 /****************************************************************************/
7046 #ifdef BCE_DEBUG
7047 
7048 /****************************************************************************/
7049 /* Freezes the controller to allow for a cohesive state dump.               */
7050 /*                                                                          */
7051 /* Returns:                                                                 */
7052 /*   Nothing.                                                               */
7053 /****************************************************************************/
7054 static void
7055 bce_freeze_controller(struct bce_softc *sc)
7056 {
7057 	u32 val;
7058 	val = REG_RD(sc, BCE_MISC_COMMAND);
7059 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
7060 	REG_WR(sc, BCE_MISC_COMMAND, val);
7061 
7062 }
7063 
7064 
7065 /****************************************************************************/
7066 /* Unfreezes the controller after a freeze operation.  This may not always  */
7067 /* work and the controller will require a reset!                            */
7068 /*                                                                          */
7069 /* Returns:                                                                 */
7070 /*   Nothing.                                                               */
7071 /****************************************************************************/
7072 static void
7073 bce_unfreeze_controller(struct bce_softc *sc)
7074 {
7075 	u32 val;
7076 	val = REG_RD(sc, BCE_MISC_COMMAND);
7077 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
7078 	REG_WR(sc, BCE_MISC_COMMAND, val);
7079 
7080 }
7081 
7082 /****************************************************************************/
7083 /* Prints out information about an mbuf.                                    */
7084 /*                                                                          */
7085 /* Returns:                                                                 */
7086 /*   Nothing.                                                               */
7087 /****************************************************************************/
7088 static void
7089 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
7090 {
7091 	struct mbuf *mp = m;
7092 
7093 	if (m == NULL) {
7094 		BCE_PRINTF("mbuf: null pointer\n");
7095 		return;
7096 	}
7097 
7098 	while (mp) {
7099 		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
7100 			mp, mp->m_len, mp->m_flags,
7101 			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
7102 			mp->m_data);
7103 
7104 		if (mp->m_flags & M_PKTHDR) {
7105 			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
7106 				mp->m_pkthdr.len, mp->m_flags,
7107 				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
7108 				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
7109 				mp->m_pkthdr.csum_flags,
7110 				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
7111 				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
7112 				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
7113 		}
7114 
7115 		if (mp->m_flags & M_EXT) {
7116 			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
7117 				mp->m_ext.ext_buf, mp->m_ext.ext_size);
7118 			switch (mp->m_ext.ext_type) {
7119 				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
7120 				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
7121 				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
7122 				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
7123 				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
7124 				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
7125 				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
7126 				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
7127 				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
7128 				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
7129 				default:             printf("UNKNOWN\n");
7130 			}
7131 		}
7132 
7133 		mp = mp->m_next;
7134 	}
7135 }
7136 
7137 
7138 /****************************************************************************/
7139 /* Prints out the mbufs in the TX mbuf chain.                               */
7140 /*                                                                          */
7141 /* Returns:                                                                 */
7142 /*   Nothing.                                                               */
7143 /****************************************************************************/
7144 static void
7145 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7146 {
7147 	struct mbuf *m;
7148 
7149 	BCE_PRINTF(
7150 		"----------------------------"
7151 		"  tx mbuf data  "
7152 		"----------------------------\n");
7153 
7154 	for (int i = 0; i < count; i++) {
7155 	 	m = sc->tx_mbuf_ptr[chain_prod];
7156 		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
7157 		bce_dump_mbuf(sc, m);
7158 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
7159 	}
7160 
7161 	BCE_PRINTF(
7162 		"----------------------------"
7163 		"----------------"
7164 		"----------------------------\n");
7165 }
7166 
7167 
7168 /****************************************************************************/
7169 /* Prints out the mbufs in the RX mbuf chain.                               */
7170 /*                                                                          */
7171 /* Returns:                                                                 */
7172 /*   Nothing.                                                               */
7173 /****************************************************************************/
7174 static void
7175 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7176 {
7177 	struct mbuf *m;
7178 
7179 	BCE_PRINTF(
7180 		"----------------------------"
7181 		"  rx mbuf data  "
7182 		"----------------------------\n");
7183 
7184 	for (int i = 0; i < count; i++) {
7185 	 	m = sc->rx_mbuf_ptr[chain_prod];
7186 		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
7187 		bce_dump_mbuf(sc, m);
7188 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
7189 	}
7190 
7191 
7192 	BCE_PRINTF(
7193 		"----------------------------"
7194 		"----------------"
7195 		"----------------------------\n");
7196 }
7197 
7198 
7199 /****************************************************************************/
7200 /* Prints out the mbufs in the mbuf page chain.                             */
7201 /*                                                                          */
7202 /* Returns:                                                                 */
7203 /*   Nothing.                                                               */
7204 /****************************************************************************/
7205 static void
7206 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7207 {
7208 	struct mbuf *m;
7209 
7210 	BCE_PRINTF(
7211 		"----------------------------"
7212 		"  pg mbuf data  "
7213 		"----------------------------\n");
7214 
7215 	for (int i = 0; i < count; i++) {
7216 	 	m = sc->pg_mbuf_ptr[chain_prod];
7217 		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
7218 		bce_dump_mbuf(sc, m);
7219 		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
7220 	}
7221 
7222 
7223 	BCE_PRINTF(
7224 		"----------------------------"
7225 		"----------------"
7226 		"----------------------------\n");
7227 }
7228 
7229 
7230 /****************************************************************************/
7231 /* Prints out a tx_bd structure.                                            */
7232 /*                                                                          */
7233 /* Returns:                                                                 */
7234 /*   Nothing.                                                               */
7235 /****************************************************************************/
7236 static void
7237 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
7238 {
7239 	if (idx > MAX_TX_BD)
7240 		/* Index out of range. */
7241 		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
7242 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
7243 		/* TX Chain page pointer. */
7244 		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7245 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
7246 	else {
7247 			/* Normal tx_bd entry. */
7248 			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7249 				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
7250 				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
7251 				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
7252 				txbd->tx_bd_flags);
7253 
7254 			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
7255 				printf(" CONN_FAULT");
7256 
7257 			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
7258 				printf(" TCP_UDP_CKSUM");
7259 
7260 			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
7261 				printf(" IP_CKSUM");
7262 
7263 			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
7264 				printf("  VLAN");
7265 
7266 			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
7267 				printf(" COAL_NOW");
7268 
7269 			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
7270 				printf(" DONT_GEN_CRC");
7271 
7272 			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
7273 				printf(" START");
7274 
7275 			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
7276 				printf(" END");
7277 
7278 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
7279 				printf(" LSO");
7280 
7281 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
7282 				printf(" OPTION_WORD");
7283 
7284 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
7285 				printf(" FLAGS");
7286 
7287 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
7288 				printf(" SNAP");
7289 
7290 			printf(" )\n");
7291 		}
7292 
7293 }
7294 
7295 
7296 /****************************************************************************/
7297 /* Prints out a rx_bd structure.                                            */
7298 /*                                                                          */
7299 /* Returns:                                                                 */
7300 /*   Nothing.                                                               */
7301 /****************************************************************************/
7302 static void
7303 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
7304 {
7305 	if (idx > MAX_RX_BD)
7306 		/* Index out of range. */
7307 		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
7308 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
7309 		/* RX Chain page pointer. */
7310 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7311 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
7312 	else
7313 		/* Normal rx_bd entry. */
7314 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7315 			"flags = 0x%08X\n", idx,
7316 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
7317 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
7318 }
7319 
7320 
7321 /****************************************************************************/
7322 /* Prints out a rx_bd structure in the page chain.                          */
7323 /*                                                                          */
7324 /* Returns:                                                                 */
7325 /*   Nothing.                                                               */
7326 /****************************************************************************/
7327 static void
7328 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
7329 {
7330 	if (idx > MAX_PG_BD)
7331 		/* Index out of range. */
7332 		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
7333 	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
7334 		/* Page Chain page pointer. */
7335 		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7336 			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
7337 	else
7338 		/* Normal rx_bd entry. */
7339 		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7340 			"flags = 0x%08X\n", idx,
7341 			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
7342 			pgbd->rx_bd_len, pgbd->rx_bd_flags);
7343 }
7344 
7345 
7346 /****************************************************************************/
7347 /* Prints out a l2_fhdr structure.                                          */
7348 /*                                                                          */
7349 /* Returns:                                                                 */
7350 /*   Nothing.                                                               */
7351 /****************************************************************************/
7352 static void
7353 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
7354 {
7355 	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
7356 		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
7357 		"tcp_udp_xsum = 0x%04X\n", idx,
7358 		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
7359 		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
7360 		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
7361 }
7362 
7363 
7364 /****************************************************************************/
7365 /* Prints out the L2 context memory.  (Only useful for CID 0 to 15. )       */
7366 /*                                                                          */
7367 /* Returns:                                                                 */
7368 /*   Nothing.                                                               */
7369 /****************************************************************************/
7370 static void
7371 bce_dump_ctx(struct bce_softc *sc, u16 cid)
7372 {
7373 	if (cid < TX_CID) {
7374 		BCE_PRINTF(
7375 			"----------------------------"
7376 			"    CTX Data    "
7377 			"----------------------------\n");
7378 
7379 		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
7380 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BDIDX) host rx producer index\n",
7381 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BDIDX));
7382 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BSEQ) host byte sequence\n",
7383 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BSEQ));
7384 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BSEQ) h/w byte sequence\n",
7385 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BSEQ));
7386 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_HI) h/w buffer descriptor address\n",
7387 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_HI));
7388 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_LO) h/w buffer descriptor address\n",
7389 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_LO));
7390 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDIDX) h/w rx consumer index\n",
7391 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDIDX));
7392 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_PG_BDIDX) host page producer index\n",
7393 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_PG_BDIDX));
7394 		BCE_PRINTF(" 0x%08X - (L2CTX_PG_BUF_SIZE) host rx_bd/page buffer size\n",
7395 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_PG_BUF_SIZE));
7396 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_HI) h/w page chain address\n",
7397 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_HI));
7398 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_LO) h/w page chain address\n",
7399 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_LO));
7400 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDIDX) h/w page consumer index\n",
7401 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDIDX));
7402 
7403 		BCE_PRINTF(
7404 			"----------------------------"
7405 			"----------------"
7406 			"----------------------------\n");
7407 	}
7408 }
7409 
7410 
7411 /****************************************************************************/
7412 /* Prints out the FTQ data.                                                 */
7413 /*                                                                          */
7414 /* Returns:                                                                */
7415 /*   Nothing.                                                               */
7416 /****************************************************************************/
7417 static void
7418 bce_dump_ftqs(struct bce_softc *sc)
7419 {
7420 	u32 cmd, ctl, cur_depth, max_depth, valid_cnt;
7421 
7422 	BCE_PRINTF(
7423 		"----------------------------"
7424 		"    FTQ Data    "
7425 		"----------------------------\n");
7426 
7427 	BCE_PRINTF("  FTQ   Command    Control   Depth_Now  Max_Depth  Valid_Cnt\n");
7428 	BCE_PRINTF(" ----- ---------- ---------- ---------- ---------- ----------\n");
7429 
7430 	/* Setup the generic statistic counters for the FTQ valid count. */
7431 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7432 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
7433 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
7434 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
7435 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT)));
7436 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1,
7437 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
7438 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
7439 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
7440 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT)));
7441 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2,
7442 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
7443 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
7444 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
7445 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT)));
7446 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3,
7447 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
7448 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
7449 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
7450 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT)));
7451 
7452 	cmd = REG_RD(sc, 0x23f8); /* RLUP_FTQ_CMD */
7453 	ctl = REG_RD(sc, 0x23fc); /* RLUP_FTQ_CTL */
7454 	cur_depth = (ctl & 0xFFC00000) >> 22;
7455 	max_depth = (ctl & 0x003FF000) >> 12;
7456 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7457 	BCE_PRINTF(" RLUP  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7458 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7459 
7460 	cmd = REG_RD_IND(sc, 0xc53f8); /* RXP_FTQ_CMD */
7461 	ctl = REG_RD_IND(sc, 0xc53fc); /* RXP_FTQ_CTL */
7462 	cur_depth = (ctl & 0xFFC00000) >> 22;
7463 	max_depth = (ctl & 0x003FF000) >> 12;
7464 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7465 	BCE_PRINTF(" RXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7466 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7467 
7468 	cmd = REG_RD_IND(sc, 0xc53b8); /* RXP_CFTQ_CMD */
7469 	ctl = REG_RD_IND(sc, 0xc53bc); /* RXP_CFTQ_CTL */
7470 	cur_depth = (ctl & 0xFFC00000) >> 22;
7471 	max_depth = (ctl & 0x003FF000) >> 12;
7472 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7473 	BCE_PRINTF(" RXPC  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7474 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7475 
7476 	cmd = REG_RD(sc, 0x2b78); /* RV2P_PFTQ_CMD */
7477 	ctl = REG_RD(sc, 0x2b7c); /* RV2P_PFTQ_CTL */
7478 	cur_depth = (ctl & 0xFFC00000) >> 22;
7479 	max_depth = (ctl & 0x003FF000) >> 12;
7480 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
7481 	BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7482 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7483 
7484 	cmd = REG_RD(sc, 0x2bf8); /* RV2P_MFTQ_CMD */
7485 	ctl = REG_RD(sc, 0x2bfc); /* RV2P_MFTQ_CTL */
7486 	cur_depth = (ctl & 0xFFC00000) >> 22;
7487 	max_depth = (ctl & 0x003FF000) >> 12;
7488 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
7489 	BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7490 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7491 
7492 	cmd = REG_RD(sc, 0x2bb8); /* RV2P_TFTQ_CMD */
7493 	ctl = REG_RD(sc, 0x2bbc); /* RV2P_TFTQ_CTL */
7494 	cur_depth = (ctl & 0xFFC00000) >> 22;
7495 	max_depth = (ctl & 0x003FF000) >> 12;
7496 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
7497 	BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7498 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7499 
7500 	cmd = REG_RD(sc, 0x2ff8); /* RDMA_FTQ_CMD */
7501 	ctl = REG_RD(sc, 0x2ffc); /* RDMA_FTQ_CTL */
7502 	cur_depth = (ctl & 0xFFC00000) >> 22;
7503 	max_depth = (ctl & 0x003FF000) >> 12;
7504 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
7505 	BCE_PRINTF(" RDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7506 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7507 
7508 	cmd = REG_RD(sc, 0x4ff8); /* TSCH_FTQ_CMD */
7509 	ctl = REG_RD(sc, 0x4ffc); /* TSCH_FTQ_CTL */
7510 	cur_depth = (ctl & 0xFFC00000) >> 22;
7511 	max_depth = (ctl & 0x003FF000) >> 12;
7512 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
7513 	BCE_PRINTF(" TSCH  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7514 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7515 
7516 	cmd = REG_RD(sc, 0x53f8); /* TBDR_FTQ_CMD */
7517 	ctl = REG_RD(sc, 0x53fc); /* TBDR_FTQ_CTL */
7518 	cur_depth = (ctl & 0xFFC00000) >> 22;
7519 	max_depth = (ctl & 0x003FF000) >> 12;
7520 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
7521 	BCE_PRINTF(" TBDR  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7522 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7523 
7524 	cmd = REG_RD_IND(sc, 0x453f8); /* TXP_FTQ_CMD */
7525 	ctl = REG_RD_IND(sc, 0x453fc); /* TXP_FTQ_CTL */
7526 	cur_depth = (ctl & 0xFFC00000) >> 22;
7527 	max_depth = (ctl & 0x003FF000) >> 12;
7528 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
7529 	BCE_PRINTF(" TXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7530 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7531 
7532 	cmd = REG_RD(sc, 0x5ff8); /* TDMA_FTQ_CMD */
7533 	ctl = REG_RD(sc, 0x5ffc); /* TDMA_FTQ_CTL */
7534 	cur_depth = (ctl & 0xFFC00000) >> 22;
7535 	max_depth = (ctl & 0x003FF000) >> 12;
7536 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
7537 	BCE_PRINTF(" TDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7538 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7539 
7540 	cmd = REG_RD_IND(sc, 0x853f8); /* TPAT_FTQ_CMD */
7541 	ctl = REG_RD_IND(sc, 0x853fc); /* TPAT_FTQ_CTL */
7542 	cur_depth = (ctl & 0xFFC00000) >> 22;
7543 	max_depth = (ctl & 0x003FF000) >> 12;
7544 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
7545 	BCE_PRINTF(" TPAT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7546 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7547 
7548 	cmd = REG_RD_IND(sc, 0x1c03f8); /* TAS_FTQ_CMD */
7549 	ctl = REG_RD_IND(sc, 0x1c03fc); /* TAS_FTQ_CTL */
7550 	cur_depth = (ctl & 0xFFC00000) >> 22;
7551 	max_depth = (ctl & 0x003FF000) >> 12;
7552 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
7553 	BCE_PRINTF(" TAS   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7554 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7555 
7556 	cmd = REG_RD_IND(sc, 0x105378); /* COM_COMXQ_FTQ_CMD */
7557 	ctl = REG_RD_IND(sc, 0x10537c); /* COM_COMXQ_FTQ_CTL */
7558 	cur_depth = (ctl & 0xFFC00000) >> 22;
7559 	max_depth = (ctl & 0x003FF000) >> 12;
7560 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
7561 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7562 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7563 
7564 	cmd = REG_RD_IND(sc, 0x1053b8); /* COM_COMTQ_FTQ_CMD */
7565 	ctl = REG_RD_IND(sc, 0x1053bc); /* COM_COMTQ_FTQ_CTL */
7566 	cur_depth = (ctl & 0xFFC00000) >> 22;
7567 	max_depth = (ctl & 0x003FF000) >> 12;
7568 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
7569 	BCE_PRINTF(" COMT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7570 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7571 
7572 	cmd = REG_RD_IND(sc, 0x1053f8); /* COM_COMQ_FTQ_CMD */
7573 	ctl = REG_RD_IND(sc, 0x1053fc); /* COM_COMQ_FTQ_CTL */
7574 	cur_depth = (ctl & 0xFFC00000) >> 22;
7575 	max_depth = (ctl & 0x003FF000) >> 12;
7576 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
7577 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7578 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7579 
7580 	/* Setup the generic statistic counters for the FTQ valid count. */
7581 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7582 		 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
7583 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
7584 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT)));
7585 
7586 	cmd = REG_RD_IND(sc, 0x1453f8); /* MCP_MCPQ_FTQ_CMD */
7587 	ctl = REG_RD_IND(sc, 0x1453fc); /* MCP_MCPQ_FTQ_CTL */
7588 	cur_depth = (ctl & 0xFFC00000) >> 22;
7589 	max_depth = (ctl & 0x003FF000) >> 12;
7590 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7591 	BCE_PRINTF(" MCP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7592 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7593 
7594 	cmd = REG_RD_IND(sc, 0x1853f8); /* CP_CPQ_FTQ_CMD */
7595 	ctl = REG_RD_IND(sc, 0x1853fc); /* CP_CPQ_FTQ_CTL */
7596 	cur_depth = (ctl & 0xFFC00000) >> 22;
7597 	max_depth = (ctl & 0x003FF000) >> 12;
7598 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7599 	BCE_PRINTF(" CP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7600 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7601 
7602 	cmd = REG_RD(sc, 0x43f8); /* CSCH_CH_FTQ_CMD */
7603 	ctl = REG_RD(sc, 0x43fc); /* CSCH_CH_FTQ_CTL */
7604 	cur_depth = (ctl & 0xFFC00000) >> 22;
7605 	max_depth = (ctl & 0x003FF000) >> 12;
7606 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7607 	BCE_PRINTF(" CS    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7608 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7609 
7610 	BCE_PRINTF(
7611 		"----------------------------"
7612 		"----------------"
7613 		"----------------------------\n");
7614 }
7615 
7616 
7617 /****************************************************************************/
7618 /* Prints out the TX chain.                                                 */
7619 /*                                                                          */
7620 /* Returns:                                                                 */
7621 /*   Nothing.                                                               */
7622 /****************************************************************************/
7623 static void
7624 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
7625 {
7626 	struct tx_bd *txbd;
7627 
7628 	/* First some info about the tx_bd chain structure. */
7629 	BCE_PRINTF(
7630 		"----------------------------"
7631 		"  tx_bd  chain  "
7632 		"----------------------------\n");
7633 
7634 	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
7635 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
7636 
7637 	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
7638 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
7639 
7640 	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
7641 
7642 	BCE_PRINTF(
7643 		"----------------------------"
7644 		"   tx_bd data   "
7645 		"----------------------------\n");
7646 
7647 	/* Now print out the tx_bd's themselves. */
7648 	for (int i = 0; i < count; i++) {
7649 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
7650 		bce_dump_txbd(sc, tx_prod, txbd);
7651 		tx_prod = NEXT_TX_BD(tx_prod);
7652 	}
7653 
7654 	BCE_PRINTF(
7655 		"----------------------------"
7656 		"----------------"
7657 		"----------------------------\n");
7658 }
7659 
7660 
7661 /****************************************************************************/
7662 /* Prints out the RX chain.                                                 */
7663 /*                                                                          */
7664 /* Returns:                                                                 */
7665 /*   Nothing.                                                               */
7666 /****************************************************************************/
7667 static void
7668 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
7669 {
7670 	struct rx_bd *rxbd;
7671 
7672 	/* First some info about the rx_bd chain structure. */
7673 	BCE_PRINTF(
7674 		"----------------------------"
7675 		"  rx_bd  chain  "
7676 		"----------------------------\n");
7677 
7678 	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
7679 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
7680 
7681 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7682 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
7683 
7684 	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
7685 
7686 	BCE_PRINTF(
7687 		"----------------------------"
7688 		"   rx_bd data   "
7689 		"----------------------------\n");
7690 
7691 	/* Now print out the rx_bd's themselves. */
7692 	for (int i = 0; i < count; i++) {
7693 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
7694 		bce_dump_rxbd(sc, rx_prod, rxbd);
7695 		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
7696 	}
7697 
7698 	BCE_PRINTF(
7699 		"----------------------------"
7700 		"----------------"
7701 		"----------------------------\n");
7702 }
7703 
7704 
7705 /****************************************************************************/
7706 /* Prints out the page chain.                                               */
7707 /*                                                                          */
7708 /* Returns:                                                                 */
7709 /*   Nothing.                                                               */
7710 /****************************************************************************/
7711 static void
7712 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
7713 {
7714 	struct rx_bd *pgbd;
7715 
7716 	/* First some info about the page chain structure. */
7717 	BCE_PRINTF(
7718 		"----------------------------"
7719 		"   page chain   "
7720 		"----------------------------\n");
7721 
7722 	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
7723 		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
7724 
7725 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7726 		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
7727 
7728 	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
7729 		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
7730 
7731 	BCE_PRINTF(
7732 		"----------------------------"
7733 		"   page data    "
7734 		"----------------------------\n");
7735 
7736 	/* Now print out the rx_bd's themselves. */
7737 	for (int i = 0; i < count; i++) {
7738 		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
7739 		bce_dump_pgbd(sc, pg_prod, pgbd);
7740 		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
7741 	}
7742 
7743 	BCE_PRINTF(
7744 		"----------------------------"
7745 		"----------------"
7746 		"----------------------------\n");
7747 }
7748 
7749 
7750 /****************************************************************************/
7751 /* Prints out the status block from host memory.                            */
7752 /*                                                                          */
7753 /* Returns:                                                                 */
7754 /*   Nothing.                                                               */
7755 /****************************************************************************/
7756 static void
7757 bce_dump_status_block(struct bce_softc *sc)
7758 {
7759 	struct status_block *sblk;
7760 
7761 	sblk = sc->status_block;
7762 
7763    	BCE_PRINTF(
7764 		"----------------------------"
7765 		"  Status Block  "
7766 		"----------------------------\n");
7767 
7768 	BCE_PRINTF("    0x%08X - attn_bits\n",
7769 		sblk->status_attn_bits);
7770 
7771 	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
7772 		sblk->status_attn_bits_ack);
7773 
7774 	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
7775 		sblk->status_rx_quick_consumer_index0,
7776 		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
7777 
7778 	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
7779 		sblk->status_tx_quick_consumer_index0,
7780 		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
7781 
7782 	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
7783 
7784 	/* Theses indices are not used for normal L2 drivers. */
7785 	if (sblk->status_rx_quick_consumer_index1)
7786 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
7787 			sblk->status_rx_quick_consumer_index1,
7788 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
7789 
7790 	if (sblk->status_tx_quick_consumer_index1)
7791 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
7792 			sblk->status_tx_quick_consumer_index1,
7793 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
7794 
7795 	if (sblk->status_rx_quick_consumer_index2)
7796 		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
7797 			sblk->status_rx_quick_consumer_index2,
7798 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
7799 
7800 	if (sblk->status_tx_quick_consumer_index2)
7801 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
7802 			sblk->status_tx_quick_consumer_index2,
7803 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
7804 
7805 	if (sblk->status_rx_quick_consumer_index3)
7806 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
7807 			sblk->status_rx_quick_consumer_index3,
7808 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
7809 
7810 	if (sblk->status_tx_quick_consumer_index3)
7811 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
7812 			sblk->status_tx_quick_consumer_index3,
7813 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
7814 
7815 	if (sblk->status_rx_quick_consumer_index4 ||
7816 		sblk->status_rx_quick_consumer_index5)
7817 		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
7818 			sblk->status_rx_quick_consumer_index4,
7819 			sblk->status_rx_quick_consumer_index5);
7820 
7821 	if (sblk->status_rx_quick_consumer_index6 ||
7822 		sblk->status_rx_quick_consumer_index7)
7823 		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
7824 			sblk->status_rx_quick_consumer_index6,
7825 			sblk->status_rx_quick_consumer_index7);
7826 
7827 	if (sblk->status_rx_quick_consumer_index8 ||
7828 		sblk->status_rx_quick_consumer_index9)
7829 		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
7830 			sblk->status_rx_quick_consumer_index8,
7831 			sblk->status_rx_quick_consumer_index9);
7832 
7833 	if (sblk->status_rx_quick_consumer_index10 ||
7834 		sblk->status_rx_quick_consumer_index11)
7835 		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
7836 			sblk->status_rx_quick_consumer_index10,
7837 			sblk->status_rx_quick_consumer_index11);
7838 
7839 	if (sblk->status_rx_quick_consumer_index12 ||
7840 		sblk->status_rx_quick_consumer_index13)
7841 		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
7842 			sblk->status_rx_quick_consumer_index12,
7843 			sblk->status_rx_quick_consumer_index13);
7844 
7845 	if (sblk->status_rx_quick_consumer_index14 ||
7846 		sblk->status_rx_quick_consumer_index15)
7847 		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
7848 			sblk->status_rx_quick_consumer_index14,
7849 			sblk->status_rx_quick_consumer_index15);
7850 
7851 	if (sblk->status_completion_producer_index ||
7852 		sblk->status_cmd_consumer_index)
7853 		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
7854 			sblk->status_completion_producer_index,
7855 			sblk->status_cmd_consumer_index);
7856 
7857 	BCE_PRINTF(
7858 		"----------------------------"
7859 		"----------------"
7860 		"----------------------------\n");
7861 }
7862 
7863 
7864 /****************************************************************************/
7865 /* Prints out the statistics block from host memory.                        */
7866 /*                                                                          */
7867 /* Returns:                                                                 */
7868 /*   Nothing.                                                               */
7869 /****************************************************************************/
7870 static void
7871 bce_dump_stats_block(struct bce_softc *sc)
7872 {
7873 	struct statistics_block *sblk;
7874 
7875 	sblk = sc->stats_block;
7876 
7877 	BCE_PRINTF(
7878 		"---------------"
7879 		" Stats Block  (All Stats Not Shown Are 0) "
7880 		"---------------\n");
7881 
7882 	if (sblk->stat_IfHCInOctets_hi
7883 		|| sblk->stat_IfHCInOctets_lo)
7884 		BCE_PRINTF("0x%08X:%08X : "
7885 			"IfHcInOctets\n",
7886 			sblk->stat_IfHCInOctets_hi,
7887 			sblk->stat_IfHCInOctets_lo);
7888 
7889 	if (sblk->stat_IfHCInBadOctets_hi
7890 		|| sblk->stat_IfHCInBadOctets_lo)
7891 		BCE_PRINTF("0x%08X:%08X : "
7892 			"IfHcInBadOctets\n",
7893 			sblk->stat_IfHCInBadOctets_hi,
7894 			sblk->stat_IfHCInBadOctets_lo);
7895 
7896 	if (sblk->stat_IfHCOutOctets_hi
7897 		|| sblk->stat_IfHCOutOctets_lo)
7898 		BCE_PRINTF("0x%08X:%08X : "
7899 			"IfHcOutOctets\n",
7900 			sblk->stat_IfHCOutOctets_hi,
7901 			sblk->stat_IfHCOutOctets_lo);
7902 
7903 	if (sblk->stat_IfHCOutBadOctets_hi
7904 		|| sblk->stat_IfHCOutBadOctets_lo)
7905 		BCE_PRINTF("0x%08X:%08X : "
7906 			"IfHcOutBadOctets\n",
7907 			sblk->stat_IfHCOutBadOctets_hi,
7908 			sblk->stat_IfHCOutBadOctets_lo);
7909 
7910 	if (sblk->stat_IfHCInUcastPkts_hi
7911 		|| sblk->stat_IfHCInUcastPkts_lo)
7912 		BCE_PRINTF("0x%08X:%08X : "
7913 			"IfHcInUcastPkts\n",
7914 			sblk->stat_IfHCInUcastPkts_hi,
7915 			sblk->stat_IfHCInUcastPkts_lo);
7916 
7917 	if (sblk->stat_IfHCInBroadcastPkts_hi
7918 		|| sblk->stat_IfHCInBroadcastPkts_lo)
7919 		BCE_PRINTF("0x%08X:%08X : "
7920 			"IfHcInBroadcastPkts\n",
7921 			sblk->stat_IfHCInBroadcastPkts_hi,
7922 			sblk->stat_IfHCInBroadcastPkts_lo);
7923 
7924 	if (sblk->stat_IfHCInMulticastPkts_hi
7925 		|| sblk->stat_IfHCInMulticastPkts_lo)
7926 		BCE_PRINTF("0x%08X:%08X : "
7927 			"IfHcInMulticastPkts\n",
7928 			sblk->stat_IfHCInMulticastPkts_hi,
7929 			sblk->stat_IfHCInMulticastPkts_lo);
7930 
7931 	if (sblk->stat_IfHCOutUcastPkts_hi
7932 		|| sblk->stat_IfHCOutUcastPkts_lo)
7933 		BCE_PRINTF("0x%08X:%08X : "
7934 			"IfHcOutUcastPkts\n",
7935 			sblk->stat_IfHCOutUcastPkts_hi,
7936 			sblk->stat_IfHCOutUcastPkts_lo);
7937 
7938 	if (sblk->stat_IfHCOutBroadcastPkts_hi
7939 		|| sblk->stat_IfHCOutBroadcastPkts_lo)
7940 		BCE_PRINTF("0x%08X:%08X : "
7941 			"IfHcOutBroadcastPkts\n",
7942 			sblk->stat_IfHCOutBroadcastPkts_hi,
7943 			sblk->stat_IfHCOutBroadcastPkts_lo);
7944 
7945 	if (sblk->stat_IfHCOutMulticastPkts_hi
7946 		|| sblk->stat_IfHCOutMulticastPkts_lo)
7947 		BCE_PRINTF("0x%08X:%08X : "
7948 			"IfHcOutMulticastPkts\n",
7949 			sblk->stat_IfHCOutMulticastPkts_hi,
7950 			sblk->stat_IfHCOutMulticastPkts_lo);
7951 
7952 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
7953 		BCE_PRINTF("         0x%08X : "
7954 			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
7955 			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
7956 
7957 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
7958 		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
7959 			sblk->stat_Dot3StatsCarrierSenseErrors);
7960 
7961 	if (sblk->stat_Dot3StatsFCSErrors)
7962 		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
7963 			sblk->stat_Dot3StatsFCSErrors);
7964 
7965 	if (sblk->stat_Dot3StatsAlignmentErrors)
7966 		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
7967 			sblk->stat_Dot3StatsAlignmentErrors);
7968 
7969 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
7970 		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
7971 			sblk->stat_Dot3StatsSingleCollisionFrames);
7972 
7973 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
7974 		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
7975 			sblk->stat_Dot3StatsMultipleCollisionFrames);
7976 
7977 	if (sblk->stat_Dot3StatsDeferredTransmissions)
7978 		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
7979 			sblk->stat_Dot3StatsDeferredTransmissions);
7980 
7981 	if (sblk->stat_Dot3StatsExcessiveCollisions)
7982 		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
7983 			sblk->stat_Dot3StatsExcessiveCollisions);
7984 
7985 	if (sblk->stat_Dot3StatsLateCollisions)
7986 		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
7987 			sblk->stat_Dot3StatsLateCollisions);
7988 
7989 	if (sblk->stat_EtherStatsCollisions)
7990 		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
7991 			sblk->stat_EtherStatsCollisions);
7992 
7993 	if (sblk->stat_EtherStatsFragments)
7994 		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
7995 			sblk->stat_EtherStatsFragments);
7996 
7997 	if (sblk->stat_EtherStatsJabbers)
7998 		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
7999 			sblk->stat_EtherStatsJabbers);
8000 
8001 	if (sblk->stat_EtherStatsUndersizePkts)
8002 		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
8003 			sblk->stat_EtherStatsUndersizePkts);
8004 
8005 	if (sblk->stat_EtherStatsOverrsizePkts)
8006 		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
8007 			sblk->stat_EtherStatsOverrsizePkts);
8008 
8009 	if (sblk->stat_EtherStatsPktsRx64Octets)
8010 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
8011 			sblk->stat_EtherStatsPktsRx64Octets);
8012 
8013 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
8014 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
8015 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
8016 
8017 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
8018 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
8019 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
8020 
8021 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
8022 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
8023 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
8024 
8025 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
8026 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
8027 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
8028 
8029 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
8030 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
8031 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
8032 
8033 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
8034 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
8035 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
8036 
8037 	if (sblk->stat_EtherStatsPktsTx64Octets)
8038 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
8039 			sblk->stat_EtherStatsPktsTx64Octets);
8040 
8041 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
8042 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
8043 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
8044 
8045 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
8046 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
8047 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
8048 
8049 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
8050 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
8051 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
8052 
8053 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
8054 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
8055 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
8056 
8057 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
8058 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
8059 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
8060 
8061 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
8062 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
8063 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
8064 
8065 	if (sblk->stat_XonPauseFramesReceived)
8066 		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
8067 			sblk->stat_XonPauseFramesReceived);
8068 
8069 	if (sblk->stat_XoffPauseFramesReceived)
8070 	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
8071 			sblk->stat_XoffPauseFramesReceived);
8072 
8073 	if (sblk->stat_OutXonSent)
8074 		BCE_PRINTF("         0x%08X : OutXonSent\n",
8075 			sblk->stat_OutXonSent);
8076 
8077 	if (sblk->stat_OutXoffSent)
8078 		BCE_PRINTF("         0x%08X : OutXoffSent\n",
8079 			sblk->stat_OutXoffSent);
8080 
8081 	if (sblk->stat_FlowControlDone)
8082 		BCE_PRINTF("         0x%08X : FlowControlDone\n",
8083 			sblk->stat_FlowControlDone);
8084 
8085 	if (sblk->stat_MacControlFramesReceived)
8086 		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
8087 			sblk->stat_MacControlFramesReceived);
8088 
8089 	if (sblk->stat_XoffStateEntered)
8090 		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
8091 			sblk->stat_XoffStateEntered);
8092 
8093 	if (sblk->stat_IfInFramesL2FilterDiscards)
8094 		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
8095 			sblk->stat_IfInFramesL2FilterDiscards);
8096 
8097 	if (sblk->stat_IfInRuleCheckerDiscards)
8098 		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
8099 			sblk->stat_IfInRuleCheckerDiscards);
8100 
8101 	if (sblk->stat_IfInFTQDiscards)
8102 		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
8103 			sblk->stat_IfInFTQDiscards);
8104 
8105 	if (sblk->stat_IfInMBUFDiscards)
8106 		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
8107 			sblk->stat_IfInMBUFDiscards);
8108 
8109 	if (sblk->stat_IfInRuleCheckerP4Hit)
8110 		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
8111 			sblk->stat_IfInRuleCheckerP4Hit);
8112 
8113 	if (sblk->stat_CatchupInRuleCheckerDiscards)
8114 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
8115 			sblk->stat_CatchupInRuleCheckerDiscards);
8116 
8117 	if (sblk->stat_CatchupInFTQDiscards)
8118 		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
8119 			sblk->stat_CatchupInFTQDiscards);
8120 
8121 	if (sblk->stat_CatchupInMBUFDiscards)
8122 		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
8123 			sblk->stat_CatchupInMBUFDiscards);
8124 
8125 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
8126 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
8127 			sblk->stat_CatchupInRuleCheckerP4Hit);
8128 
8129 	BCE_PRINTF(
8130 		"----------------------------"
8131 		"----------------"
8132 		"----------------------------\n");
8133 }
8134 
8135 
8136 /****************************************************************************/
8137 /* Prints out a summary of the driver state.                                */
8138 /*                                                                          */
8139 /* Returns:                                                                 */
8140 /*   Nothing.                                                               */
8141 /****************************************************************************/
8142 static void
8143 bce_dump_driver_state(struct bce_softc *sc)
8144 {
8145 	u32 val_hi, val_lo;
8146 
8147 	BCE_PRINTF(
8148 		"-----------------------------"
8149 		" Driver State "
8150 		"-----------------------------\n");
8151 
8152 	val_hi = BCE_ADDR_HI(sc);
8153 	val_lo = BCE_ADDR_LO(sc);
8154 	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
8155 		val_hi, val_lo);
8156 
8157 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
8158 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
8159 	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
8160 		val_hi, val_lo);
8161 
8162 	val_hi = BCE_ADDR_HI(sc->status_block);
8163 	val_lo = BCE_ADDR_LO(sc->status_block);
8164 	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
8165 		val_hi, val_lo);
8166 
8167 	val_hi = BCE_ADDR_HI(sc->stats_block);
8168 	val_lo = BCE_ADDR_LO(sc->stats_block);
8169 	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
8170 		val_hi, val_lo);
8171 
8172 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
8173 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
8174 	BCE_PRINTF(
8175 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
8176 		val_hi, val_lo);
8177 
8178 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
8179 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
8180 	BCE_PRINTF(
8181 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
8182 		val_hi, val_lo);
8183 
8184 	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
8185 	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
8186 	BCE_PRINTF(
8187 		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
8188 		val_hi, val_lo);
8189 
8190 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
8191 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
8192 	BCE_PRINTF(
8193 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
8194 		val_hi, val_lo);
8195 
8196 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
8197 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
8198 	BCE_PRINTF(
8199 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
8200 		val_hi, val_lo);
8201 
8202 	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
8203 	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
8204 	BCE_PRINTF(
8205 		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
8206 		val_hi, val_lo);
8207 
8208 	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
8209 		sc->interrupts_generated);
8210 
8211 	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
8212 		sc->rx_interrupts);
8213 
8214 	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
8215 		sc->tx_interrupts);
8216 
8217 	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
8218 		sc->last_status_idx);
8219 
8220 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
8221 		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
8222 
8223 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
8224 		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
8225 
8226 	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
8227 		sc->tx_prod_bseq);
8228 
8229 	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
8230 		sc->debug_tx_mbuf_alloc);
8231 
8232 	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
8233 		sc->used_tx_bd);
8234 
8235 	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
8236 		sc->tx_hi_watermark, sc->max_tx_bd);
8237 
8238 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
8239 		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
8240 
8241 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
8242 		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
8243 
8244 	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
8245 		sc->rx_prod_bseq);
8246 
8247 	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
8248 		sc->debug_rx_mbuf_alloc);
8249 
8250 	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
8251 		sc->free_rx_bd);
8252 
8253 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
8254 		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
8255 
8256 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
8257 		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
8258 
8259 	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
8260 		sc->debug_pg_mbuf_alloc);
8261 
8262 	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
8263 		sc->free_pg_bd);
8264 
8265 	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
8266 		sc->pg_low_watermark, sc->max_pg_bd);
8267 
8268 	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed) "
8269 		"mbuf alloc failures\n",
8270 		sc->mbuf_alloc_failed);
8271 
8272 	BCE_PRINTF("         0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
8273 		"simulated mbuf alloc failures\n",
8274 		sc->debug_mbuf_sim_alloc_failed);
8275 
8276 	BCE_PRINTF(
8277 		"----------------------------"
8278 		"----------------"
8279 		"----------------------------\n");
8280 }
8281 
8282 
8283 /****************************************************************************/
8284 /* Prints out the hardware state through a summary of important register,   */
8285 /* followed by a complete register dump.                                    */
8286 /*                                                                          */
8287 /* Returns:                                                                 */
8288 /*   Nothing.                                                               */
8289 /****************************************************************************/
8290 static void
8291 bce_dump_hw_state(struct bce_softc *sc)
8292 {
8293 	u32 val;
8294 
8295 	BCE_PRINTF(
8296 		"----------------------------"
8297 		" Hardware State "
8298 		"----------------------------\n");
8299 
8300 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8301 
8302 	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
8303 	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
8304 		val, BCE_MISC_ENABLE_STATUS_BITS);
8305 
8306 	val = REG_RD(sc, BCE_DMA_STATUS);
8307 	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
8308 
8309 	val = REG_RD(sc, BCE_CTX_STATUS);
8310 	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
8311 
8312 	val = REG_RD(sc, BCE_EMAC_STATUS);
8313 	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
8314 
8315 	val = REG_RD(sc, BCE_RPM_STATUS);
8316 	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
8317 
8318 	val = REG_RD(sc, 0x2004);
8319 	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
8320 
8321 	val = REG_RD(sc, BCE_RV2P_STATUS);
8322 	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
8323 
8324 	val = REG_RD(sc, 0x2c04);
8325 	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
8326 
8327 	val = REG_RD(sc, BCE_TBDR_STATUS);
8328 	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
8329 
8330 	val = REG_RD(sc, BCE_TDMA_STATUS);
8331 	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
8332 
8333 	val = REG_RD(sc, BCE_HC_STATUS);
8334 	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
8335 
8336 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8337 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
8338 
8339 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8340 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
8341 
8342 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8343 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
8344 
8345 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
8346 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
8347 
8348 	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
8349 	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
8350 
8351 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
8352 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
8353 
8354 	BCE_PRINTF(
8355 		"----------------------------"
8356 		"----------------"
8357 		"----------------------------\n");
8358 
8359 	BCE_PRINTF(
8360 		"----------------------------"
8361 		" Register  Dump "
8362 		"----------------------------\n");
8363 
8364 	for (int i = 0x400; i < 0x8000; i += 0x10) {
8365 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8366 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
8367 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
8368 	}
8369 
8370 	BCE_PRINTF(
8371 		"----------------------------"
8372 		"----------------"
8373 		"----------------------------\n");
8374 }
8375 
8376 
8377 /****************************************************************************/
8378 /* Prints out the bootcode state.                                           */
8379 /*                                                                          */
8380 /* Returns:                                                                 */
8381 /*   Nothing.                                                               */
8382 /****************************************************************************/
8383 static void
8384 bce_dump_bc_state(struct bce_softc *sc)
8385 {
8386 	u32 val;
8387 
8388 	BCE_PRINTF(
8389 		"----------------------------"
8390 		" Bootcode State "
8391 		"----------------------------\n");
8392 
8393 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8394 
8395 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
8396 	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
8397 		val, BCE_BC_RESET_TYPE);
8398 
8399 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
8400 	BCE_PRINTF("0x%08X - (0x%06X) state\n",
8401 		val, BCE_BC_STATE);
8402 
8403 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
8404 	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
8405 		val, BCE_BC_CONDITION);
8406 
8407 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
8408 	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
8409 		val, BCE_BC_STATE_DEBUG_CMD);
8410 
8411 	BCE_PRINTF(
8412 		"----------------------------"
8413 		"----------------"
8414 		"----------------------------\n");
8415 }
8416 
8417 
8418 /****************************************************************************/
8419 /* Prints out the TXP state.                                                */
8420 /*                                                                          */
8421 /* Returns:                                                                 */
8422 /*   Nothing.                                                               */
8423 /****************************************************************************/
8424 static void
8425 bce_dump_txp_state(struct bce_softc *sc)
8426 {
8427 	u32 val1;
8428 
8429 	BCE_PRINTF(
8430 		"----------------------------"
8431 		"   TXP  State   "
8432 		"----------------------------\n");
8433 
8434 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
8435 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE);
8436 
8437 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8438 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
8439 
8440 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
8441 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK);
8442 
8443 	BCE_PRINTF(
8444 		"----------------------------"
8445 		" Register  Dump "
8446 		"----------------------------\n");
8447 
8448 	for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
8449 		/* Skip the big blank spaces */
8450 		if (i < 0x454000 && i > 0x5ffff)
8451 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8452 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8453 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8454 	}
8455 
8456 	BCE_PRINTF(
8457 		"----------------------------"
8458 		"----------------"
8459 		"----------------------------\n");
8460 }
8461 
8462 
8463 /****************************************************************************/
8464 /* Prints out the RXP state.                                                */
8465 /*                                                                          */
8466 /* Returns:                                                                 */
8467 /*   Nothing.                                                               */
8468 /****************************************************************************/
8469 static void
8470 bce_dump_rxp_state(struct bce_softc *sc)
8471 {
8472 	u32 val1;
8473 
8474 	BCE_PRINTF(
8475 		"----------------------------"
8476 		"   RXP  State   "
8477 		"----------------------------\n");
8478 
8479 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
8480 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE);
8481 
8482 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8483 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
8484 
8485 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
8486 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK);
8487 
8488 	BCE_PRINTF(
8489 		"----------------------------"
8490 		" Register  Dump "
8491 		"----------------------------\n");
8492 
8493 	for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
8494 		/* Skip the big blank sapces */
8495 		if (i < 0xc5400 && i > 0xdffff)
8496 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8497 	 			i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8498 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8499 	}
8500 
8501 	BCE_PRINTF(
8502 		"----------------------------"
8503 		"----------------"
8504 		"----------------------------\n");
8505 }
8506 
8507 
8508 /****************************************************************************/
8509 /* Prints out the TPAT state.                                               */
8510 /*                                                                          */
8511 /* Returns:                                                                 */
8512 /*   Nothing.                                                               */
8513 /****************************************************************************/
8514 static void
8515 bce_dump_tpat_state(struct bce_softc *sc)
8516 {
8517 	u32 val1;
8518 
8519 	BCE_PRINTF(
8520 		"----------------------------"
8521 		"   TPAT State   "
8522 		"----------------------------\n");
8523 
8524 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
8525 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE);
8526 
8527 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8528 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
8529 
8530 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
8531 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK);
8532 
8533 	BCE_PRINTF(
8534 		"----------------------------"
8535 		" Register  Dump "
8536 		"----------------------------\n");
8537 
8538 	for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
8539 		/* Skip the big blank spaces */
8540 		if (i < 0x854000 && i > 0x9ffff)
8541 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8542 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8543 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8544 	}
8545 
8546 	BCE_PRINTF(
8547 		"----------------------------"
8548 		"----------------"
8549 		"----------------------------\n");
8550 }
8551 
8552 
8553 /* ToDo: Add CP and COM proccessor state dumps. */
8554 
8555 
8556 /****************************************************************************/
8557 /* Prints out the driver state and then enters the debugger.                */
8558 /*                                                                          */
8559 /* Returns:                                                                 */
8560 /*   Nothing.                                                               */
8561 /****************************************************************************/
8562 static void
8563 bce_breakpoint(struct bce_softc *sc)
8564 {
8565 
8566 	/*
8567 	 * Unreachable code to silence compiler warnings
8568 	 * about unused functions.
8569 	 */
8570 	if (0) {
8571 		bce_freeze_controller(sc);
8572 		bce_unfreeze_controller(sc);
8573    		bce_dump_txbd(sc, 0, NULL);
8574 		bce_dump_rxbd(sc, 0, NULL);
8575 		bce_dump_pgbd(sc, 0, NULL);
8576 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
8577 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
8578 		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
8579 		bce_dump_l2fhdr(sc, 0, NULL);
8580 		bce_dump_ctx(sc, RX_CID);
8581 		bce_dump_ftqs(sc);
8582 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
8583 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
8584 		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
8585 		bce_dump_status_block(sc);
8586 		bce_dump_stats_block(sc);
8587 		bce_dump_driver_state(sc);
8588 		bce_dump_hw_state(sc);
8589 		bce_dump_bc_state(sc);
8590 		bce_dump_txp_state(sc);
8591 		bce_dump_rxp_state(sc);
8592 		bce_dump_tpat_state(sc);
8593 	}
8594 
8595 	bce_dump_status_block(sc);
8596 	bce_dump_driver_state(sc);
8597 
8598 	/* Call the debugger. */
8599 	breakpoint();
8600 
8601 	return;
8602 }
8603 #endif
8604 
8605