xref: /freebsd/sys/dev/bce/if_bce.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 2006-2009 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5706S A2, A3
38  *   BCM5708C B1, B2
39  *   BCM5708S B1, B2
40  *   BCM5709C A1, C0
41  * 	 BCM5716C C0
42  *
43  * The following controllers are not supported by this driver:
44  *   BCM5706C A0, A1 (pre-production)
45  *   BCM5706S A0, A1 (pre-production)
46  *   BCM5708C A0, B0 (pre-production)
47  *   BCM5708S A0, B0 (pre-production)
48  *   BCM5709C A0  B0, B1, B2 (pre-production)
49  *   BCM5709S A0, A1, B0, B1, B2, C0 (pre-production)
50  */
51 
52 #include "opt_bce.h"
53 
54 #include <dev/bce/if_bcereg.h>
55 #include <dev/bce/if_bcefw.h>
56 
57 /****************************************************************************/
58 /* BCE Debug Options                                                        */
59 /****************************************************************************/
60 #ifdef BCE_DEBUG
61 	u32 bce_debug = BCE_WARN;
62 
63 	/*          0 = Never              */
64 	/*          1 = 1 in 2,147,483,648 */
65 	/*        256 = 1 in     8,388,608 */
66 	/*       2048 = 1 in     1,048,576 */
67 	/*      65536 = 1 in        32,768 */
68 	/*    1048576 = 1 in         2,048 */
69 	/*  268435456 =	1 in             8 */
70 	/*  536870912 = 1 in             4 */
71 	/* 1073741824 = 1 in             2 */
72 
73 	/* Controls how often the l2_fhdr frame error check will fail. */
74 	int l2fhdr_error_sim_control = 0;
75 
76 	/* Controls how often the unexpected attention check will fail. */
77 	int unexpected_attention_sim_control = 0;
78 
79 	/* Controls how often to simulate an mbuf allocation failure. */
80 	int mbuf_alloc_failed_sim_control = 0;
81 
82 	/* Controls how often to simulate a DMA mapping failure. */
83 	int dma_map_addr_failed_sim_control = 0;
84 
85 	/* Controls how often to simulate a bootcode failure. */
86 	int bootcode_running_failure_sim_control = 0;
87 #endif
88 
89 /****************************************************************************/
90 /* BCE Build Time Options                                                   */
91 /****************************************************************************/
92 /* #define BCE_NVRAM_WRITE_SUPPORT 1 */
93 
94 
95 /****************************************************************************/
96 /* PCI Device ID Table                                                      */
97 /*                                                                          */
98 /* Used by bce_probe() to identify the devices supported by this driver.    */
99 /****************************************************************************/
100 #define BCE_DEVDESC_MAX		64
101 
102 static struct bce_type bce_devs[] = {
103 	/* BCM5706C Controllers and OEM boards. */
104 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
105 		"HP NC370T Multifunction Gigabit Server Adapter" },
106 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
107 		"HP NC370i Multifunction Gigabit Server Adapter" },
108 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
109 		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
110 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
111 		"HP NC371i Multifunction Gigabit Server Adapter" },
112 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
113 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
114 
115 	/* BCM5706S controllers and OEM boards. */
116 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
117 		"HP NC370F Multifunction Gigabit Server Adapter" },
118 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
119 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
120 
121 	/* BCM5708C controllers and OEM boards. */
122 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
123 		"HP NC373T PCIe Multifunction Gig Server Adapter" },
124 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
125 		"HP NC373i Multifunction Gigabit Server Adapter" },
126 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
127 		"HP NC374m PCIe Multifunction Adapter" },
128 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
129 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
130 
131 	/* BCM5708S controllers and OEM boards. */
132 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
133 		"HP NC373m Multifunction Gigabit Server Adapter" },
134 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
135 		"HP NC373i Multifunction Gigabit Server Adapter" },
136 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
137 		"HP NC373F PCIe Multifunc Giga Server Adapter" },
138 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
139 		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
140 
141 	/* BCM5709C controllers and OEM boards. */
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
143 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
144 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
145 		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
146 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
147 		"Broadcom NetXtreme II BCM5709 1000Base-T" },
148 
149 	/* BCM5709S controllers and OEM boards. */
150 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
151 		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
152 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
153 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
154 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
155 		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
156 
157 	/* BCM5716 controllers and OEM boards. */
158 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,  PCI_ANY_ID,  PCI_ANY_ID,
159 		"Broadcom NetXtreme II BCM5716 1000Base-T" },
160 
161 	{ 0, 0, 0, 0, NULL }
162 };
163 
164 
165 /****************************************************************************/
166 /* Supported Flash NVRAM device data.                                       */
167 /****************************************************************************/
168 static struct flash_spec flash_table[] =
169 {
170 #define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
171 #define NONBUFFERED_FLAGS	(BCE_NV_WREN)
172 
173 	/* Slow EEPROM */
174 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - slow"},
178 	/* Expansion entry 0001 */
179 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 0001"},
183 	/* Saifun SA25F010 (non-buffered flash) */
184 	/* strap, cfg1, & write1 need updates */
185 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
186 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
188 	 "Non-buffered flash (128kB)"},
189 	/* Saifun SA25F020 (non-buffered flash) */
190 	/* strap, cfg1, & write1 need updates */
191 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
194 	 "Non-buffered flash (256kB)"},
195 	/* Expansion entry 0100 */
196 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
197 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 	 "Entry 0100"},
200 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
201 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
204 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
205 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
206 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
207 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
208 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
209 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
210 	/* Saifun SA25F005 (non-buffered flash) */
211 	/* strap, cfg1, & write1 need updates */
212 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
213 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
215 	 "Non-buffered flash (64kB)"},
216 	/* Fast EEPROM */
217 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
218 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
219 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
220 	 "EEPROM - fast"},
221 	/* Expansion entry 1001 */
222 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1001"},
226 	/* Expansion entry 1010 */
227 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
228 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1010"},
231 	/* ATMEL AT45DB011B (buffered flash) */
232 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
235 	 "Buffered flash (128kB)"},
236 	/* Expansion entry 1100 */
237 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
238 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 	 "Entry 1100"},
241 	/* Expansion entry 1101 */
242 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
243 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
244 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
245 	 "Entry 1101"},
246 	/* Ateml Expansion entry 1110 */
247 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
248 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
250 	 "Entry 1110 (Atmel)"},
251 	/* ATMEL AT45DB021B (buffered flash) */
252 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
253 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
254 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
255 	 "Buffered flash (256kB)"},
256 };
257 
258 /*
259  * The BCM5709 controllers transparently handle the
260  * differences between Atmel 264 byte pages and all
261  * flash devices which use 256 byte pages, so no
262  * logical-to-physical mapping is required in the
263  * driver.
264  */
265 static struct flash_spec flash_5709 = {
266 	.flags		= BCE_NV_BUFFERED,
267 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
268 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
269 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
270 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
271 	.name		= "5709/5716 buffered flash (256kB)",
272 };
273 
274 
275 /****************************************************************************/
276 /* FreeBSD device entry points.                                             */
277 /****************************************************************************/
278 static int  bce_probe				(device_t);
279 static int  bce_attach				(device_t);
280 static int  bce_detach				(device_t);
281 static int  bce_shutdown			(device_t);
282 
283 
284 /****************************************************************************/
285 /* BCE Debug Data Structure Dump Routines                                   */
286 /****************************************************************************/
287 #ifdef BCE_DEBUG
288 static u32	bce_reg_rd				(struct bce_softc *, u32);
289 static void	bce_reg_wr				(struct bce_softc *, u32, u32);
290 static void	bce_reg_wr16			(struct bce_softc *, u32, u16);
291 static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
292 static void bce_dump_enet           (struct bce_softc *, struct mbuf *);
293 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
294 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
295 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
296 #ifdef BCE_JUMBO_HDRSPLIT
297 static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
298 #endif
299 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
300 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
301 #ifdef BCE_JUMBO_HDRSPLIT
302 static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
303 #endif
304 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
305 static void bce_dump_ctx			(struct bce_softc *, u16);
306 static void bce_dump_ftqs			(struct bce_softc *);
307 static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
308 static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
309 #ifdef BCE_JUMBO_HDRSPLIT
310 static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
311 #endif
312 static void bce_dump_status_block	(struct bce_softc *);
313 static void bce_dump_stats_block	(struct bce_softc *);
314 static void bce_dump_driver_state	(struct bce_softc *);
315 static void bce_dump_hw_state		(struct bce_softc *);
316 static void bce_dump_mq_regs        (struct bce_softc *);
317 static void bce_dump_bc_state		(struct bce_softc *);
318 static void bce_dump_txp_state		(struct bce_softc *, int);
319 static void bce_dump_rxp_state		(struct bce_softc *, int);
320 static void bce_dump_tpat_state		(struct bce_softc *, int);
321 static void bce_dump_cp_state		(struct bce_softc *, int);
322 static void bce_dump_com_state		(struct bce_softc *, int);
323 static void bce_breakpoint			(struct bce_softc *);
324 #endif
325 
326 
327 /****************************************************************************/
328 /* BCE Register/Memory Access Routines                                      */
329 /****************************************************************************/
330 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
331 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
332 static void bce_shmem_wr            (struct bce_softc *, u32, u32);
333 static u32  bce_shmem_rd            (struct bce_softc *, u32);
334 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
335 static int  bce_miibus_read_reg		(device_t, int, int);
336 static int  bce_miibus_write_reg	(device_t, int, int, int);
337 static void bce_miibus_statchg		(device_t);
338 
339 
340 /****************************************************************************/
341 /* BCE NVRAM Access Routines                                                */
342 /****************************************************************************/
343 static int  bce_acquire_nvram_lock	(struct bce_softc *);
344 static int  bce_release_nvram_lock	(struct bce_softc *);
345 static void bce_enable_nvram_access	(struct bce_softc *);
346 static void	bce_disable_nvram_access(struct bce_softc *);
347 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
348 static int  bce_init_nvram			(struct bce_softc *);
349 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
350 static int  bce_nvram_test			(struct bce_softc *);
351 #ifdef BCE_NVRAM_WRITE_SUPPORT
352 static int  bce_enable_nvram_write	(struct bce_softc *);
353 static void bce_disable_nvram_write	(struct bce_softc *);
354 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
355 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
356 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
357 #endif
358 
359 /****************************************************************************/
360 /*                                                                          */
361 /****************************************************************************/
362 static void bce_get_media			(struct bce_softc *);
363 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
364 static int  bce_dma_alloc			(device_t);
365 static void bce_dma_free			(struct bce_softc *);
366 static void bce_release_resources	(struct bce_softc *);
367 
368 /****************************************************************************/
369 /* BCE Firmware Synchronization and Load                                    */
370 /****************************************************************************/
371 static int  bce_fw_sync				(struct bce_softc *, u32);
372 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
373 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
374 static void bce_init_rxp_cpu		(struct bce_softc *);
375 static void bce_init_txp_cpu 		(struct bce_softc *);
376 static void bce_init_tpat_cpu		(struct bce_softc *);
377 static void bce_init_cp_cpu		  	(struct bce_softc *);
378 static void bce_init_com_cpu	  	(struct bce_softc *);
379 static void bce_init_cpus			(struct bce_softc *);
380 
381 static void	bce_print_adapter_info	(struct bce_softc *);
382 static void bce_probe_pci_caps		(device_t, struct bce_softc *);
383 static void bce_stop				(struct bce_softc *);
384 static int  bce_reset				(struct bce_softc *, u32);
385 static int  bce_chipinit 			(struct bce_softc *);
386 static int  bce_blockinit 			(struct bce_softc *);
387 
388 static int  bce_init_tx_chain		(struct bce_softc *);
389 static void bce_free_tx_chain		(struct bce_softc *);
390 
391 static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
392 static int  bce_init_rx_chain		(struct bce_softc *);
393 static void bce_fill_rx_chain		(struct bce_softc *);
394 static void bce_free_rx_chain		(struct bce_softc *);
395 
396 #ifdef BCE_JUMBO_HDRSPLIT
397 static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
398 static int  bce_init_pg_chain		(struct bce_softc *);
399 static void bce_fill_pg_chain		(struct bce_softc *);
400 static void bce_free_pg_chain		(struct bce_softc *);
401 #endif
402 
403 static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
404 static void bce_start_locked		(struct ifnet *);
405 static void bce_start				(struct ifnet *);
406 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
407 static void bce_watchdog			(struct bce_softc *);
408 static int  bce_ifmedia_upd			(struct ifnet *);
409 static void bce_ifmedia_upd_locked	(struct ifnet *);
410 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
411 static void bce_init_locked			(struct bce_softc *);
412 static void bce_init				(void *);
413 static void bce_mgmt_init_locked	(struct bce_softc *sc);
414 
415 static void bce_init_ctx			(struct bce_softc *);
416 static void bce_get_mac_addr		(struct bce_softc *);
417 static void bce_set_mac_addr		(struct bce_softc *);
418 static void bce_phy_intr			(struct bce_softc *);
419 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
420 static void bce_rx_intr				(struct bce_softc *);
421 static void bce_tx_intr				(struct bce_softc *);
422 static void bce_disable_intr		(struct bce_softc *);
423 static void bce_enable_intr			(struct bce_softc *, int);
424 
425 static void bce_intr				(void *);
426 static void bce_set_rx_mode			(struct bce_softc *);
427 static void bce_stats_update		(struct bce_softc *);
428 static void bce_tick				(void *);
429 static void bce_pulse				(void *);
430 static void bce_add_sysctls			(struct bce_softc *);
431 
432 
433 /****************************************************************************/
434 /* FreeBSD device dispatch table.                                           */
435 /****************************************************************************/
436 static device_method_t bce_methods[] = {
437 	/* Device interface (device_if.h) */
438 	DEVMETHOD(device_probe,		bce_probe),
439 	DEVMETHOD(device_attach,	bce_attach),
440 	DEVMETHOD(device_detach,	bce_detach),
441 	DEVMETHOD(device_shutdown,	bce_shutdown),
442 /* Supported by device interface but not used here. */
443 /*	DEVMETHOD(device_identify,	bce_identify),      */
444 /*	DEVMETHOD(device_suspend,	bce_suspend),       */
445 /*	DEVMETHOD(device_resume,	bce_resume),        */
446 /*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
447 
448 	/* Bus interface (bus_if.h) */
449 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
450 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
451 
452 	/* MII interface (miibus_if.h) */
453 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
454 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
455 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
456 /* Supported by MII interface but not used here.       */
457 /*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
458 /*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
459 
460 	{ 0, 0 }
461 };
462 
463 static driver_t bce_driver = {
464 	"bce",
465 	bce_methods,
466 	sizeof(struct bce_softc)
467 };
468 
469 static devclass_t bce_devclass;
470 
471 MODULE_DEPEND(bce, pci, 1, 1, 1);
472 MODULE_DEPEND(bce, ether, 1, 1, 1);
473 MODULE_DEPEND(bce, miibus, 1, 1, 1);
474 
475 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
476 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
477 
478 
479 /****************************************************************************/
480 /* Tunable device values                                                    */
481 /****************************************************************************/
482 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
483 
484 /* Allowable values are TRUE or FALSE */
485 static int bce_tso_enable = TRUE;
486 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
487 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
488 "TSO Enable/Disable");
489 
490 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
491 /* ToDo: Add MSI-X support. */
492 static int bce_msi_enable = 1;
493 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
494 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
495 "MSI-X|MSI|INTx selector");
496 
497 /* ToDo: Add tunable to enable/disable strict MTU handling. */
498 /* Currently allows "loose" RX MTU checking (i.e. sets the  */
499 /* H/W RX MTU to the size of the largest receive buffer, or */
500 /* 2048 bytes). This will cause a UNH failure but is more   */
501 /* desireable from a functional perspective.                */
502 
503 
504 /****************************************************************************/
505 /* Device probe function.                                                   */
506 /*                                                                          */
507 /* Compares the device to the driver's list of supported devices and        */
508 /* reports back to the OS whether this is the right driver for the device.  */
509 /*                                                                          */
510 /* Returns:                                                                 */
511 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
512 /****************************************************************************/
513 static int
514 bce_probe(device_t dev)
515 {
516 	struct bce_type *t;
517 	struct bce_softc *sc;
518 	char *descbuf;
519 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
520 
521 	t = bce_devs;
522 
523 	sc = device_get_softc(dev);
524 	bzero(sc, sizeof(struct bce_softc));
525 	sc->bce_unit = device_get_unit(dev);
526 	sc->bce_dev = dev;
527 
528 	/* Get the data for the device to be probed. */
529 	vid  = pci_get_vendor(dev);
530 	did  = pci_get_device(dev);
531 	svid = pci_get_subvendor(dev);
532 	sdid = pci_get_subdevice(dev);
533 
534 	DBPRINT(sc, BCE_EXTREME_LOAD,
535 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
536 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
537 
538 	/* Look through the list of known devices for a match. */
539 	while(t->bce_name != NULL) {
540 
541 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
542 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
543 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
544 
545 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
546 
547 			if (descbuf == NULL)
548 				return(ENOMEM);
549 
550 			/* Print out the device identity. */
551 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
552 				t->bce_name,
553 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
554 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
555 
556 			device_set_desc_copy(dev, descbuf);
557 			free(descbuf, M_TEMP);
558 			return(BUS_PROBE_DEFAULT);
559 		}
560 		t++;
561 	}
562 
563 	return(ENXIO);
564 }
565 
566 
567 /****************************************************************************/
568 /* PCI Capabilities Probe Function.                                         */
569 /*                                                                          */
570 /* Walks the PCI capabiites list for the device to find what features are   */
571 /* supported.                                                               */
572 /*                                                                          */
573 /* Returns:                                                                 */
574 /*   None.                                                                  */
575 /****************************************************************************/
576 static void
577 bce_print_adapter_info(struct bce_softc *sc)
578 {
579     int i = 0;
580 
581 	DBENTER(BCE_VERBOSE_LOAD);
582 
583 	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
584 	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
585 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
586 
587 	/* Bus info. */
588 	if (sc->bce_flags & BCE_PCIE_FLAG) {
589 		printf("Bus (PCIe x%d, ", sc->link_width);
590 		switch (sc->link_speed) {
591 			case 1: printf("2.5Gbps); "); break;
592 			case 2:	printf("5Gbps); "); break;
593 			default: printf("Unknown link speed); ");
594 		}
595 	} else {
596 		printf("Bus (PCI%s, %s, %dMHz); ",
597 			((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
598 			((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
599 			sc->bus_speed_mhz);
600 	}
601 
602 	/* Firmware version and device features. */
603 	printf("B/C (%s); Flags (", sc->bce_bc_ver);
604 
605 #ifdef BCE_JUMBO_HDRSPLIT
606 	printf("SPLT ");
607     i++;
608 #endif
609     if (sc->bce_flags & BCE_USING_MSI_FLAG) {
610         if (i > 0) printf("|");
611 		printf("MSI"); i++;
612     }
613 
614     if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
615         if (i > 0) printf("|");
616 		printf("MSI-X "); i++;
617     }
618 
619     if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
620         if (i > 0) printf("|");
621 		printf("2.5G"); i++;
622     }
623 
624     if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
625         if (i > 0) printf("|");
626         printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
627     } else {
628         printf(")\n");
629     }
630 
631 	DBEXIT(BCE_VERBOSE_LOAD);
632 }
633 
634 
635 /****************************************************************************/
636 /* PCI Capabilities Probe Function.                                         */
637 /*                                                                          */
638 /* Walks the PCI capabiites list for the device to find what features are   */
639 /* supported.                                                               */
640 /*                                                                          */
641 /* Returns:                                                                 */
642 /*   None.                                                                  */
643 /****************************************************************************/
644 static void
645 bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
646 {
647 	u32 reg;
648 
649 	DBENTER(BCE_VERBOSE_LOAD);
650 
651 	/* Check if PCI-X capability is enabled. */
652 	if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
653 		if (reg != 0)
654 			sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
655 	}
656 
657 	/* Check if PCIe capability is enabled. */
658 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
659 		if (reg != 0) {
660 			u16 link_status = pci_read_config(dev, reg + 0x12, 2);
661 			DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = 0x%08X\n",
662 				link_status);
663 			sc->link_speed = link_status & 0xf;
664 			sc->link_width = (link_status >> 4) & 0x3f;
665 			sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
666 			sc->bce_flags |= BCE_PCIE_FLAG;
667 		}
668 	}
669 
670 	/* Check if MSI capability is enabled. */
671 	if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
672 		if (reg != 0)
673 			sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
674 	}
675 
676 	/* Check if MSI-X capability is enabled. */
677 	if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
678 		if (reg != 0)
679 			sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
680 	}
681 
682 	DBEXIT(BCE_VERBOSE_LOAD);
683 }
684 
685 
686 /****************************************************************************/
687 /* Device attach function.                                                  */
688 /*                                                                          */
689 /* Allocates device resources, performs secondary chip identification,      */
690 /* resets and initializes the hardware, and initializes driver instance     */
691 /* variables.                                                               */
692 /*                                                                          */
693 /* Returns:                                                                 */
694 /*   0 on success, positive value on failure.                               */
695 /****************************************************************************/
696 static int
697 bce_attach(device_t dev)
698 {
699 	struct bce_softc *sc;
700 	struct ifnet *ifp;
701 	u32 val;
702 	int error, rid, rc = 0;
703 
704 	sc = device_get_softc(dev);
705 	sc->bce_dev = dev;
706 
707 	DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
708 
709 	sc->bce_unit = device_get_unit(dev);
710 
711 	/* Set initial device and PHY flags */
712 	sc->bce_flags = 0;
713 	sc->bce_phy_flags = 0;
714 
715 	pci_enable_busmaster(dev);
716 
717 	/* Allocate PCI memory resources. */
718 	rid = PCIR_BAR(0);
719 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
720 		&rid, RF_ACTIVE);
721 
722 	if (sc->bce_res_mem == NULL) {
723 		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
724 			__FILE__, __LINE__);
725 		rc = ENXIO;
726 		goto bce_attach_fail;
727 	}
728 
729 	/* Get various resource handles. */
730 	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
731 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
732 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
733 
734 	bce_probe_pci_caps(dev, sc);
735 
736 	rid = 1;
737 #if 0
738 	/* Try allocating MSI-X interrupts. */
739 	if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
740 		(bce_msi_enable >= 2) &&
741 		((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
742 		&rid, RF_ACTIVE)) != NULL)) {
743 
744 		msi_needed = sc->bce_msi_count = 1;
745 
746 		if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
747 			(sc->bce_msi_count != msi_needed)) {
748 			BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
749 				"Received = %d, error = %d\n", __FILE__, __LINE__,
750 				msi_needed, sc->bce_msi_count, error);
751 			sc->bce_msi_count = 0;
752 			pci_release_msi(dev);
753 			bus_release_resource(dev, SYS_RES_MEMORY, rid,
754 				sc->bce_res_irq);
755 			sc->bce_res_irq = NULL;
756 		} else {
757 			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
758 				__FUNCTION__);
759 			sc->bce_flags |= BCE_USING_MSIX_FLAG;
760 			sc->bce_intr = bce_intr;
761 		}
762 	}
763 #endif
764 
765 	/* Try allocating a MSI interrupt. */
766 	if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
767 		(bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
768 		sc->bce_msi_count = 1;
769 		if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
770 			BCE_PRINTF("%s(%d): MSI allocation failed! error = %d\n",
771 				__FILE__, __LINE__, error);
772 			sc->bce_msi_count = 0;
773 			pci_release_msi(dev);
774 		} else {
775 			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI interrupt.\n",
776 				__FUNCTION__);
777 			sc->bce_flags |= BCE_USING_MSI_FLAG;
778 			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
779 				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
780 				sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
781 			sc->bce_irq_rid = 1;
782 			sc->bce_intr = bce_intr;
783 		}
784 	}
785 
786 	/* Try allocating a legacy interrupt. */
787 	if (sc->bce_msi_count == 0) {
788 		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
789 			__FUNCTION__);
790 		rid = 0;
791 		sc->bce_intr = bce_intr;
792 	}
793 
794 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
795 		&rid, RF_SHAREABLE | RF_ACTIVE);
796 
797 	sc->bce_irq_rid = rid;
798 
799 	/* Report any IRQ allocation errors. */
800 	if (sc->bce_res_irq == NULL) {
801 		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
802 			__FILE__, __LINE__);
803 		rc = ENXIO;
804 		goto bce_attach_fail;
805 	}
806 
807 	/* Initialize mutex for the current device instance. */
808 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
809 
810 	/*
811 	 * Configure byte swap and enable indirect register access.
812 	 * Rely on CPU to do target byte swapping on big endian systems.
813 	 * Access to registers outside of PCI configurtion space are not
814 	 * valid until this is done.
815 	 */
816 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
817 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
818 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
819 
820 	/* Save ASIC revsion info. */
821 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
822 
823 	/* Weed out any non-production controller revisions. */
824 	switch(BCE_CHIP_ID(sc)) {
825 		case BCE_CHIP_ID_5706_A0:
826 		case BCE_CHIP_ID_5706_A1:
827 		case BCE_CHIP_ID_5708_A0:
828 		case BCE_CHIP_ID_5708_B0:
829 		case BCE_CHIP_ID_5709_A0:
830 		case BCE_CHIP_ID_5709_B0:
831 		case BCE_CHIP_ID_5709_B1:
832 		case BCE_CHIP_ID_5709_B2:
833 			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
834 				__FILE__, __LINE__,
835 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
836 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
837 			rc = ENODEV;
838 			goto bce_attach_fail;
839 	}
840 
841 	/*
842 	 * The embedded PCIe to PCI-X bridge (EPB)
843 	 * in the 5708 cannot address memory above
844 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
845 	 */
846 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
847 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
848 	else
849 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
850 
851 	/*
852 	 * Find the base address for shared memory access.
853 	 * Newer versions of bootcode use a signature and offset
854 	 * while older versions use a fixed address.
855 	 */
856 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
857 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
858 		/* Multi-port devices use different offsets in shared memory. */
859 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
860 			(pci_get_function(sc->bce_dev) << 2));
861 	else
862 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
863 
864 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
865 		__FUNCTION__, sc->bce_shmem_base);
866 
867 	/* Fetch the bootcode revision. */
868     val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
869     for (int i = 0, j = 0; i < 3; i++) {
870         u8 num;
871 
872         num = (u8) (val >> (24 - (i * 8)));
873         for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
874             if (num >= k || !skip0 || k == 1) {
875                 sc->bce_bc_ver[j++] = (num / k) + '0';
876                 skip0 = 0;
877             }
878         }
879         if (i != 2)
880             sc->bce_bc_ver[j++] = '.';
881     }
882 
883     /* Check if any management firwmare is running. */
884     val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
885     if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
886         sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
887 
888         /* Allow time for firmware to enter the running state. */
889         for (int i = 0; i < 30; i++) {
890             val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
891             if (val & BCE_CONDITION_MFW_RUN_MASK)
892                 break;
893             DELAY(10000);
894         }
895     }
896 
897     /* Check the current bootcode state. */
898     val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
899     val &= BCE_CONDITION_MFW_RUN_MASK;
900     if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
901         val != BCE_CONDITION_MFW_RUN_NONE) {
902         u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
903         int i = 0;
904 
905         for (int j = 0; j < 3; j++) {
906             val = bce_reg_rd_ind(sc, addr + j * 4);
907             val = bswap32(val);
908             memcpy(&sc->bce_mfw_ver[i], &val, 4);
909             i += 4;
910         }
911     }
912 
913 	/* Get PCI bus information (speed and type). */
914 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
915 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
916 		u32 clkreg;
917 
918 		sc->bce_flags |= BCE_PCIX_FLAG;
919 
920 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
921 
922 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
923 		switch (clkreg) {
924 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
925 			sc->bus_speed_mhz = 133;
926 			break;
927 
928 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
929 			sc->bus_speed_mhz = 100;
930 			break;
931 
932 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
933 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
934 			sc->bus_speed_mhz = 66;
935 			break;
936 
937 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
938 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
939 			sc->bus_speed_mhz = 50;
940 			break;
941 
942 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
943 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
944 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
945 			sc->bus_speed_mhz = 33;
946 			break;
947 		}
948 	} else {
949 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
950 			sc->bus_speed_mhz = 66;
951 		else
952 			sc->bus_speed_mhz = 33;
953 	}
954 
955 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
956 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
957 
958 	/* Reset the controller and announce to bootcode that driver is present. */
959 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
960 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
961 			__FILE__, __LINE__);
962 		rc = ENXIO;
963 		goto bce_attach_fail;
964 	}
965 
966 	/* Initialize the controller. */
967 	if (bce_chipinit(sc)) {
968 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
969 			__FILE__, __LINE__);
970 		rc = ENXIO;
971 		goto bce_attach_fail;
972 	}
973 
974 	/* Perform NVRAM test. */
975 	if (bce_nvram_test(sc)) {
976 		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
977 			__FILE__, __LINE__);
978 		rc = ENXIO;
979 		goto bce_attach_fail;
980 	}
981 
982 	/* Fetch the permanent Ethernet MAC address. */
983 	bce_get_mac_addr(sc);
984 
985 	/*
986 	 * Trip points control how many BDs
987 	 * should be ready before generating an
988 	 * interrupt while ticks control how long
989 	 * a BD can sit in the chain before
990 	 * generating an interrupt.  Set the default
991 	 * values for the RX and TX chains.
992 	 */
993 
994 #ifdef BCE_DEBUG
995 	/* Force more frequent interrupts. */
996 	sc->bce_tx_quick_cons_trip_int = 1;
997 	sc->bce_tx_quick_cons_trip     = 1;
998 	sc->bce_tx_ticks_int           = 0;
999 	sc->bce_tx_ticks               = 0;
1000 
1001 	sc->bce_rx_quick_cons_trip_int = 1;
1002 	sc->bce_rx_quick_cons_trip     = 1;
1003 	sc->bce_rx_ticks_int           = 0;
1004 	sc->bce_rx_ticks               = 0;
1005 #else
1006 	/* Improve throughput at the expense of increased latency. */
1007 	sc->bce_tx_quick_cons_trip_int = 20;
1008 	sc->bce_tx_quick_cons_trip     = 20;
1009 	sc->bce_tx_ticks_int           = 80;
1010 	sc->bce_tx_ticks               = 80;
1011 
1012 	sc->bce_rx_quick_cons_trip_int = 6;
1013 	sc->bce_rx_quick_cons_trip     = 6;
1014 	sc->bce_rx_ticks_int           = 18;
1015 	sc->bce_rx_ticks               = 18;
1016 #endif
1017 
1018 	/* Update statistics once every second. */
1019 	sc->bce_stats_ticks = 1000000 & 0xffff00;
1020 
1021 	/* Find the media type for the adapter. */
1022 	bce_get_media(sc);
1023 
1024 	/* Store data needed by PHY driver for backplane applications */
1025 	sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1026 	sc->bce_port_hw_cfg   = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
1027 
1028 	/* Allocate DMA memory resources. */
1029 	if (bce_dma_alloc(dev)) {
1030 		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
1031 		    __FILE__, __LINE__);
1032 		rc = ENXIO;
1033 		goto bce_attach_fail;
1034 	}
1035 
1036 	/* Allocate an ifnet structure. */
1037 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
1038 	if (ifp == NULL) {
1039 		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
1040 			__FILE__, __LINE__);
1041 		rc = ENXIO;
1042 		goto bce_attach_fail;
1043 	}
1044 
1045 	/* Initialize the ifnet interface. */
1046 	ifp->if_softc        = sc;
1047 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1048 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1049 	ifp->if_ioctl        = bce_ioctl;
1050 	ifp->if_start        = bce_start;
1051 	ifp->if_init         = bce_init;
1052 	ifp->if_mtu          = ETHERMTU;
1053 
1054 	if (bce_tso_enable) {
1055 		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1056 		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
1057 	} else {
1058 		ifp->if_hwassist = BCE_IF_HWASSIST;
1059 		ifp->if_capabilities = BCE_IF_CAPABILITIES;
1060 	}
1061 
1062 	ifp->if_capenable    = ifp->if_capabilities;
1063 
1064 	/*
1065 	 * Assume standard mbuf sizes for buffer allocation.
1066 	 * This may change later if the MTU size is set to
1067 	 * something other than 1500.
1068 	 */
1069 #ifdef BCE_JUMBO_HDRSPLIT
1070 	sc->rx_bd_mbuf_alloc_size = MHLEN;
1071 	/* Make sure offset is 16 byte aligned for hardware. */
1072 	sc->rx_bd_mbuf_align_pad  = roundup2((MSIZE - MHLEN), 16) -
1073 		(MSIZE - MHLEN);
1074 	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
1075 		sc->rx_bd_mbuf_align_pad;
1076 	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1077 #else
1078 	sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1079 	sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
1080 	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
1081 		sc->rx_bd_mbuf_align_pad;
1082 #endif
1083 
1084 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1085 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1086 	IFQ_SET_READY(&ifp->if_snd);
1087 
1088 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1089 		ifp->if_baudrate = IF_Mbps(2500ULL);
1090 	else
1091 		ifp->if_baudrate = IF_Mbps(1000);
1092 
1093 	/* Check for an MII child bus by probing the PHY. */
1094 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
1095 		bce_ifmedia_sts)) {
1096 		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
1097 			__FILE__, __LINE__);
1098 		rc = ENXIO;
1099 		goto bce_attach_fail;
1100 	}
1101 
1102 	/* Attach to the Ethernet interface list. */
1103 	ether_ifattach(ifp, sc->eaddr);
1104 
1105 #if __FreeBSD_version < 500000
1106 	callout_init(&sc->bce_tick_callout);
1107 	callout_init(&sc->bce_pulse_callout);
1108 #else
1109 	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1110 	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1111 #endif
1112 
1113 	/* Hookup IRQ last. */
1114 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1115 		NULL, bce_intr, sc, &sc->bce_intrhand);
1116 
1117 	if (rc) {
1118 		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1119 			__FILE__, __LINE__);
1120 		bce_detach(dev);
1121 		goto bce_attach_exit;
1122 	}
1123 
1124 	/*
1125 	 * At this point we've acquired all the resources
1126 	 * we need to run so there's no turning back, we're
1127 	 * cleared for launch.
1128 	 */
1129 
1130 	/* Print some important debugging info. */
1131 	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1132 
1133 	/* Add the supported sysctls to the kernel. */
1134 	bce_add_sysctls(sc);
1135 
1136 	BCE_LOCK(sc);
1137 
1138 	/*
1139 	 * The chip reset earlier notified the bootcode that
1140 	 * a driver is present.  We now need to start our pulse
1141 	 * routine so that the bootcode is reminded that we're
1142 	 * still running.
1143 	 */
1144 	bce_pulse(sc);
1145 
1146 	bce_mgmt_init_locked(sc);
1147 	BCE_UNLOCK(sc);
1148 
1149 	/* Finally, print some useful adapter info */
1150 	bce_print_adapter_info(sc);
1151 	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1152 		__FUNCTION__, sc);
1153 
1154 	goto bce_attach_exit;
1155 
1156 bce_attach_fail:
1157 	bce_release_resources(sc);
1158 
1159 bce_attach_exit:
1160 
1161 	DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1162 
1163 	return(rc);
1164 }
1165 
1166 
1167 /****************************************************************************/
1168 /* Device detach function.                                                  */
1169 /*                                                                          */
1170 /* Stops the controller, resets the controller, and releases resources.     */
1171 /*                                                                          */
1172 /* Returns:                                                                 */
1173 /*   0 on success, positive value on failure.                               */
1174 /****************************************************************************/
1175 static int
1176 bce_detach(device_t dev)
1177 {
1178 	struct bce_softc *sc = device_get_softc(dev);
1179 	struct ifnet *ifp;
1180 	u32 msg;
1181 
1182 	DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1183 
1184 	ifp = sc->bce_ifp;
1185 
1186 	/* Stop and reset the controller. */
1187 	BCE_LOCK(sc);
1188 
1189 	/* Stop the pulse so the bootcode can go to driver absent state. */
1190 	callout_stop(&sc->bce_pulse_callout);
1191 
1192 	bce_stop(sc);
1193 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1194 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1195 	else
1196 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1197 	bce_reset(sc, msg);
1198 
1199 	BCE_UNLOCK(sc);
1200 
1201 	ether_ifdetach(ifp);
1202 
1203 	/* If we have a child device on the MII bus remove it too. */
1204 	bus_generic_detach(dev);
1205 	device_delete_child(dev, sc->bce_miibus);
1206 
1207 	/* Release all remaining resources. */
1208 	bce_release_resources(sc);
1209 
1210 	DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1211 
1212 	return(0);
1213 }
1214 
1215 
1216 /****************************************************************************/
1217 /* Device shutdown function.                                                */
1218 /*                                                                          */
1219 /* Stops and resets the controller.                                         */
1220 /*                                                                          */
1221 /* Returns:                                                                 */
1222 /*   0 on success, positive value on failure.                               */
1223 /****************************************************************************/
1224 static int
1225 bce_shutdown(device_t dev)
1226 {
1227 	struct bce_softc *sc = device_get_softc(dev);
1228 	u32 msg;
1229 
1230 	DBENTER(BCE_VERBOSE);
1231 
1232 	BCE_LOCK(sc);
1233 	bce_stop(sc);
1234 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1235 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1236 	else
1237 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1238 	bce_reset(sc, msg);
1239 	BCE_UNLOCK(sc);
1240 
1241 	DBEXIT(BCE_VERBOSE);
1242 
1243 	return (0);
1244 }
1245 
1246 
1247 #ifdef BCE_DEBUG
1248 /****************************************************************************/
1249 /* Register read.                                                           */
1250 /*                                                                          */
1251 /* Returns:                                                                 */
1252 /*   The value of the register.                                             */
1253 /****************************************************************************/
1254 static u32
1255 bce_reg_rd(struct bce_softc *sc, u32 offset)
1256 {
1257 	u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1258 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1259 		__FUNCTION__, offset, val);
1260 	return val;
1261 }
1262 
1263 
1264 /****************************************************************************/
1265 /* Register write (16 bit).                                                 */
1266 /*                                                                          */
1267 /* Returns:                                                                 */
1268 /*   Nothing.                                                               */
1269 /****************************************************************************/
1270 static void
1271 bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1272 {
1273 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1274 		__FUNCTION__, offset, val);
1275 	bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1276 }
1277 
1278 
1279 /****************************************************************************/
1280 /* Register write.                                                          */
1281 /*                                                                          */
1282 /* Returns:                                                                 */
1283 /*   Nothing.                                                               */
1284 /****************************************************************************/
1285 static void
1286 bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1287 {
1288 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1289 		__FUNCTION__, offset, val);
1290 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1291 }
1292 #endif
1293 
1294 /****************************************************************************/
1295 /* Indirect register read.                                                  */
1296 /*                                                                          */
1297 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1298 /* configuration space.  Using this mechanism avoids issues with posted     */
1299 /* reads but is much slower than memory-mapped I/O.                         */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   The value of the register.                                             */
1303 /****************************************************************************/
1304 static u32
1305 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1306 {
1307 	device_t dev;
1308 	dev = sc->bce_dev;
1309 
1310 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1311 #ifdef BCE_DEBUG
1312 	{
1313 		u32 val;
1314 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1315 		DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1316 			__FUNCTION__, offset, val);
1317 		return val;
1318 	}
1319 #else
1320 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1321 #endif
1322 }
1323 
1324 
1325 /****************************************************************************/
1326 /* Indirect register write.                                                 */
1327 /*                                                                          */
1328 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1329 /* configuration space.  Using this mechanism avoids issues with posted     */
1330 /* writes but is muchh slower than memory-mapped I/O.                       */
1331 /*                                                                          */
1332 /* Returns:                                                                 */
1333 /*   Nothing.                                                               */
1334 /****************************************************************************/
1335 static void
1336 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1337 {
1338 	device_t dev;
1339 	dev = sc->bce_dev;
1340 
1341 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1342 		__FUNCTION__, offset, val);
1343 
1344 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1345 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1346 }
1347 
1348 
1349 /****************************************************************************/
1350 /* Shared memory write.                                                     */
1351 /*                                                                          */
1352 /* Writes NetXtreme II shared memory region.                                */
1353 /*                                                                          */
1354 /* Returns:                                                                 */
1355 /*   Nothing.                                                               */
1356 /****************************************************************************/
1357 static void
1358 bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
1359 {
1360 	bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1361 }
1362 
1363 
1364 /****************************************************************************/
1365 /* Shared memory read.                                                      */
1366 /*                                                                          */
1367 /* Reads NetXtreme II shared memory region.                                 */
1368 /*                                                                          */
1369 /* Returns:                                                                 */
1370 /*   The 32 bit value read.                                                 */
1371 /****************************************************************************/
1372 static u32
1373 bce_shmem_rd(struct bce_softc *sc, u32 offset)
1374 {
1375 	return (bce_reg_rd_ind(sc, sc->bce_shmem_base + offset));
1376 }
1377 
1378 
1379 #ifdef BCE_DEBUG
1380 /****************************************************************************/
1381 /* Context memory read.                                                     */
1382 /*                                                                          */
1383 /* The NetXtreme II controller uses context memory to track connection      */
1384 /* information for L2 and higher network protocols.                         */
1385 /*                                                                          */
1386 /* Returns:                                                                 */
1387 /*   The requested 32 bit value of context memory.                          */
1388 /****************************************************************************/
1389 static u32
1390 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1391 {
1392 	u32 idx, offset, retry_cnt = 5, val;
1393 
1394 	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1395 		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1396 			__FUNCTION__, cid_addr));
1397 
1398 	offset = ctx_offset + cid_addr;
1399 
1400 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1401 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1402 
1403 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1404 
1405 		for (idx = 0; idx < retry_cnt; idx++) {
1406 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1407 			if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1408 				break;
1409 			DELAY(5);
1410 		}
1411 
1412 		if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1413 			BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1414 				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1415 				__FILE__, __LINE__, cid_addr, ctx_offset);
1416 
1417 		val = REG_RD(sc, BCE_CTX_CTX_DATA);
1418 	} else {
1419 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1420 		val = REG_RD(sc, BCE_CTX_DATA);
1421 	}
1422 
1423 	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1424 		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1425 
1426 	return(val);
1427 }
1428 #endif
1429 
1430 
1431 /****************************************************************************/
1432 /* Context memory write.                                                    */
1433 /*                                                                          */
1434 /* The NetXtreme II controller uses context memory to track connection      */
1435 /* information for L2 and higher network protocols.                         */
1436 /*                                                                          */
1437 /* Returns:                                                                 */
1438 /*   Nothing.                                                               */
1439 /****************************************************************************/
1440 static void
1441 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1442 {
1443 	u32 idx, offset = ctx_offset + cid_addr;
1444 	u32 val, retry_cnt = 5;
1445 
1446 	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1447 		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1448 
1449 	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1450 		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1451 			__FUNCTION__, cid_addr));
1452 
1453 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1454 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1455 
1456 		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1457 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1458 
1459 		for (idx = 0; idx < retry_cnt; idx++) {
1460 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1461 			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1462 				break;
1463 			DELAY(5);
1464 		}
1465 
1466 		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1467 			BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1468 				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1469 				__FILE__, __LINE__, cid_addr, ctx_offset);
1470 
1471 	} else {
1472 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1473 		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1474 	}
1475 }
1476 
1477 
1478 /****************************************************************************/
1479 /* PHY register read.                                                       */
1480 /*                                                                          */
1481 /* Implements register reads on the MII bus.                                */
1482 /*                                                                          */
1483 /* Returns:                                                                 */
1484 /*   The value of the register.                                             */
1485 /****************************************************************************/
1486 static int
1487 bce_miibus_read_reg(device_t dev, int phy, int reg)
1488 {
1489 	struct bce_softc *sc;
1490 	u32 val;
1491 	int i;
1492 
1493 	sc = device_get_softc(dev);
1494 
1495 	/* Make sure we are accessing the correct PHY address. */
1496 	if (phy != sc->bce_phy_addr) {
1497 		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1498 		return(0);
1499 	}
1500 
1501 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1502 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1503 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1504 
1505 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1506 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1507 
1508 		DELAY(40);
1509 	}
1510 
1511 
1512 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1513 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1514 		BCE_EMAC_MDIO_COMM_START_BUSY;
1515 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1516 
1517 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1518 		DELAY(10);
1519 
1520 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1521 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1522 			DELAY(5);
1523 
1524 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1525 			val &= BCE_EMAC_MDIO_COMM_DATA;
1526 
1527 			break;
1528 		}
1529 	}
1530 
1531 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1532 		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1533 			__FILE__, __LINE__, phy, reg);
1534 		val = 0x0;
1535 	} else {
1536 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1537 	}
1538 
1539 
1540 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1541 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1542 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1543 
1544 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1545 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1546 
1547 		DELAY(40);
1548 	}
1549 
1550 	DB_PRINT_PHY_REG(reg, val);
1551 	return (val & 0xffff);
1552 
1553 }
1554 
1555 
1556 /****************************************************************************/
1557 /* PHY register write.                                                      */
1558 /*                                                                          */
1559 /* Implements register writes on the MII bus.                               */
1560 /*                                                                          */
1561 /* Returns:                                                                 */
1562 /*   The value of the register.                                             */
1563 /****************************************************************************/
1564 static int
1565 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1566 {
1567 	struct bce_softc *sc;
1568 	u32 val1;
1569 	int i;
1570 
1571 	sc = device_get_softc(dev);
1572 
1573 	/* Make sure we are accessing the correct PHY address. */
1574 	if (phy != sc->bce_phy_addr) {
1575 		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1576 		return(0);
1577 	}
1578 
1579 	DB_PRINT_PHY_REG(reg, val);
1580 
1581 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1582 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1583 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1584 
1585 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1586 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1587 
1588 		DELAY(40);
1589 	}
1590 
1591 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1592 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1593 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1594 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1595 
1596 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1597 		DELAY(10);
1598 
1599 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1600 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1601 			DELAY(5);
1602 			break;
1603 		}
1604 	}
1605 
1606 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1607 		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1608 			__FILE__, __LINE__);
1609 
1610 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1611 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1612 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1613 
1614 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1615 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1616 
1617 		DELAY(40);
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 
1624 /****************************************************************************/
1625 /* MII bus status change.                                                   */
1626 /*                                                                          */
1627 /* Called by the MII bus driver when the PHY establishes link to set the    */
1628 /* MAC interface registers.                                                 */
1629 /*                                                                          */
1630 /* Returns:                                                                 */
1631 /*   Nothing.                                                               */
1632 /****************************************************************************/
1633 static void
1634 bce_miibus_statchg(device_t dev)
1635 {
1636 	struct bce_softc *sc;
1637 	struct mii_data *mii;
1638 	int val;
1639 
1640 	sc = device_get_softc(dev);
1641 
1642 	DBENTER(BCE_VERBOSE_PHY);
1643 
1644 	mii = device_get_softc(sc->bce_miibus);
1645 
1646 	val = REG_RD(sc, BCE_EMAC_MODE);
1647 	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1648 		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1649 		BCE_EMAC_MODE_25G);
1650 
1651 	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1652 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1653 	case IFM_10_T:
1654 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1655 			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1656 			val |= BCE_EMAC_MODE_PORT_MII_10;
1657 			break;
1658 		}
1659 		/* fall-through */
1660 	case IFM_100_TX:
1661 		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1662 		val |= BCE_EMAC_MODE_PORT_MII;
1663 		break;
1664 	case IFM_2500_SX:
1665 		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1666 		val |= BCE_EMAC_MODE_25G;
1667 		/* fall-through */
1668 	case IFM_1000_T:
1669 	case IFM_1000_SX:
1670 		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1671 		val |= BCE_EMAC_MODE_PORT_GMII;
1672 		break;
1673 	default:
1674 		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1675 			"interface.\n");
1676 		val |= BCE_EMAC_MODE_PORT_GMII;
1677 	}
1678 
1679 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1680 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1681 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1682 		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1683 	} else
1684 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1685 
1686 	REG_WR(sc, BCE_EMAC_MODE, val);
1687 
1688 #if 0
1689 	/* ToDo: Enable flow control support in brgphy and bge. */
1690 	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1691 	if (mii->mii_media_active & IFM_FLAG0)
1692 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1693 	if (mii->mii_media_active & IFM_FLAG1)
1694 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1695 #endif
1696 
1697 	DBEXIT(BCE_VERBOSE_PHY);
1698 }
1699 
1700 
1701 /****************************************************************************/
1702 /* Acquire NVRAM lock.                                                      */
1703 /*                                                                          */
1704 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1705 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1706 /* for use by the driver.                                                   */
1707 /*                                                                          */
1708 /* Returns:                                                                 */
1709 /*   0 on success, positive value on failure.                               */
1710 /****************************************************************************/
1711 static int
1712 bce_acquire_nvram_lock(struct bce_softc *sc)
1713 {
1714 	u32 val;
1715 	int j, rc = 0;
1716 
1717 	DBENTER(BCE_VERBOSE_NVRAM);
1718 
1719 	/* Request access to the flash interface. */
1720 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1721 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1722 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1723 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1724 			break;
1725 
1726 		DELAY(5);
1727 	}
1728 
1729 	if (j >= NVRAM_TIMEOUT_COUNT) {
1730 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1731 		rc = EBUSY;
1732 	}
1733 
1734 	DBEXIT(BCE_VERBOSE_NVRAM);
1735 	return (rc);
1736 }
1737 
1738 
1739 /****************************************************************************/
1740 /* Release NVRAM lock.                                                      */
1741 /*                                                                          */
1742 /* When the caller is finished accessing NVRAM the lock must be released.   */
1743 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1744 /* for use by the driver.                                                   */
1745 /*                                                                          */
1746 /* Returns:                                                                 */
1747 /*   0 on success, positive value on failure.                               */
1748 /****************************************************************************/
1749 static int
1750 bce_release_nvram_lock(struct bce_softc *sc)
1751 {
1752 	u32 val;
1753 	int j, rc = 0;
1754 
1755 	DBENTER(BCE_VERBOSE_NVRAM);
1756 
1757 	/*
1758 	 * Relinquish nvram interface.
1759 	 */
1760 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1761 
1762 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1763 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1764 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1765 			break;
1766 
1767 		DELAY(5);
1768 	}
1769 
1770 	if (j >= NVRAM_TIMEOUT_COUNT) {
1771 		DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1772 		rc = EBUSY;
1773 	}
1774 
1775 	DBEXIT(BCE_VERBOSE_NVRAM);
1776 	return (rc);
1777 }
1778 
1779 
1780 #ifdef BCE_NVRAM_WRITE_SUPPORT
1781 /****************************************************************************/
1782 /* Enable NVRAM write access.                                               */
1783 /*                                                                          */
1784 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1785 /*                                                                          */
1786 /* Returns:                                                                 */
1787 /*   0 on success, positive value on failure.                               */
1788 /****************************************************************************/
1789 static int
1790 bce_enable_nvram_write(struct bce_softc *sc)
1791 {
1792 	u32 val;
1793 	int rc = 0;
1794 
1795 	DBENTER(BCE_VERBOSE_NVRAM);
1796 
1797 	val = REG_RD(sc, BCE_MISC_CFG);
1798 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1799 
1800 	if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1801 		int j;
1802 
1803 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1804 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1805 
1806 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1807 			DELAY(5);
1808 
1809 			val = REG_RD(sc, BCE_NVM_COMMAND);
1810 			if (val & BCE_NVM_COMMAND_DONE)
1811 				break;
1812 		}
1813 
1814 		if (j >= NVRAM_TIMEOUT_COUNT) {
1815 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1816 			rc = EBUSY;
1817 		}
1818 	}
1819 
1820 	DBENTER(BCE_VERBOSE_NVRAM);
1821 	return (rc);
1822 }
1823 
1824 
1825 /****************************************************************************/
1826 /* Disable NVRAM write access.                                              */
1827 /*                                                                          */
1828 /* When the caller is finished writing to NVRAM write access must be        */
1829 /* disabled.                                                                */
1830 /*                                                                          */
1831 /* Returns:                                                                 */
1832 /*   Nothing.                                                               */
1833 /****************************************************************************/
1834 static void
1835 bce_disable_nvram_write(struct bce_softc *sc)
1836 {
1837 	u32 val;
1838 
1839 	DBENTER(BCE_VERBOSE_NVRAM);
1840 
1841 	val = REG_RD(sc, BCE_MISC_CFG);
1842 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1843 
1844 	DBEXIT(BCE_VERBOSE_NVRAM);
1845 
1846 }
1847 #endif
1848 
1849 
1850 /****************************************************************************/
1851 /* Enable NVRAM access.                                                     */
1852 /*                                                                          */
1853 /* Before accessing NVRAM for read or write operations the caller must      */
1854 /* enabled NVRAM access.                                                    */
1855 /*                                                                          */
1856 /* Returns:                                                                 */
1857 /*   Nothing.                                                               */
1858 /****************************************************************************/
1859 static void
1860 bce_enable_nvram_access(struct bce_softc *sc)
1861 {
1862 	u32 val;
1863 
1864 	DBENTER(BCE_VERBOSE_NVRAM);
1865 
1866 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1867 	/* Enable both bits, even on read. */
1868 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1869 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1870 
1871 	DBEXIT(BCE_VERBOSE_NVRAM);
1872 }
1873 
1874 
1875 /****************************************************************************/
1876 /* Disable NVRAM access.                                                    */
1877 /*                                                                          */
1878 /* When the caller is finished accessing NVRAM access must be disabled.     */
1879 /*                                                                          */
1880 /* Returns:                                                                 */
1881 /*   Nothing.                                                               */
1882 /****************************************************************************/
1883 static void
1884 bce_disable_nvram_access(struct bce_softc *sc)
1885 {
1886 	u32 val;
1887 
1888 	DBENTER(BCE_VERBOSE_NVRAM);
1889 
1890 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1891 
1892 	/* Disable both bits, even after read. */
1893 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1894 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1895 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1896 
1897 	DBEXIT(BCE_VERBOSE_NVRAM);
1898 }
1899 
1900 
1901 #ifdef BCE_NVRAM_WRITE_SUPPORT
1902 /****************************************************************************/
1903 /* Erase NVRAM page before writing.                                         */
1904 /*                                                                          */
1905 /* Non-buffered flash parts require that a page be erased before it is      */
1906 /* written.                                                                 */
1907 /*                                                                          */
1908 /* Returns:                                                                 */
1909 /*   0 on success, positive value on failure.                               */
1910 /****************************************************************************/
1911 static int
1912 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1913 {
1914 	u32 cmd;
1915 	int j, rc = 0;
1916 
1917 	DBENTER(BCE_VERBOSE_NVRAM);
1918 
1919 	/* Buffered flash doesn't require an erase. */
1920 	if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
1921 		goto bce_nvram_erase_page_exit;
1922 
1923 	/* Build an erase command. */
1924 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1925 	      BCE_NVM_COMMAND_DOIT;
1926 
1927 	/*
1928 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1929 	 * and issue the erase command.
1930 	 */
1931 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1932 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1933 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1934 
1935 	/* Wait for completion. */
1936 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1937 		u32 val;
1938 
1939 		DELAY(5);
1940 
1941 		val = REG_RD(sc, BCE_NVM_COMMAND);
1942 		if (val & BCE_NVM_COMMAND_DONE)
1943 			break;
1944 	}
1945 
1946 	if (j >= NVRAM_TIMEOUT_COUNT) {
1947 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1948 		rc = EBUSY;
1949 	}
1950 
1951 bce_nvram_erase_page_exit:
1952 	DBEXIT(BCE_VERBOSE_NVRAM);
1953 	return (rc);
1954 }
1955 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1956 
1957 
1958 /****************************************************************************/
1959 /* Read a dword (32 bits) from NVRAM.                                       */
1960 /*                                                                          */
1961 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1962 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1963 /*                                                                          */
1964 /* Returns:                                                                 */
1965 /*   0 on success and the 32 bit value read, positive value on failure.     */
1966 /****************************************************************************/
1967 static int
1968 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1969 							u32 cmd_flags)
1970 {
1971 	u32 cmd;
1972 	int i, rc = 0;
1973 
1974 	DBENTER(BCE_EXTREME_NVRAM);
1975 
1976 	/* Build the command word. */
1977 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1978 
1979 	/* Calculate the offset for buffered flash if translation is used. */
1980 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1981 		offset = ((offset / sc->bce_flash_info->page_size) <<
1982 			   sc->bce_flash_info->page_bits) +
1983 			  (offset % sc->bce_flash_info->page_size);
1984 	}
1985 
1986 	/*
1987 	 * Clear the DONE bit separately, set the address to read,
1988 	 * and issue the read.
1989 	 */
1990 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1991 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1992 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1993 
1994 	/* Wait for completion. */
1995 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1996 		u32 val;
1997 
1998 		DELAY(5);
1999 
2000 		val = REG_RD(sc, BCE_NVM_COMMAND);
2001 		if (val & BCE_NVM_COMMAND_DONE) {
2002 			val = REG_RD(sc, BCE_NVM_READ);
2003 
2004 			val = bce_be32toh(val);
2005 			memcpy(ret_val, &val, 4);
2006 			break;
2007 		}
2008 	}
2009 
2010 	/* Check for errors. */
2011 	if (i >= NVRAM_TIMEOUT_COUNT) {
2012 		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
2013 			__FILE__, __LINE__, offset);
2014 		rc = EBUSY;
2015 	}
2016 
2017 	DBEXIT(BCE_EXTREME_NVRAM);
2018 	return(rc);
2019 }
2020 
2021 
2022 #ifdef BCE_NVRAM_WRITE_SUPPORT
2023 /****************************************************************************/
2024 /* Write a dword (32 bits) to NVRAM.                                        */
2025 /*                                                                          */
2026 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
2027 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
2028 /* enabled NVRAM write access.                                              */
2029 /*                                                                          */
2030 /* Returns:                                                                 */
2031 /*   0 on success, positive value on failure.                               */
2032 /****************************************************************************/
2033 static int
2034 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
2035 	u32 cmd_flags)
2036 {
2037 	u32 cmd, val32;
2038 	int j, rc = 0;
2039 
2040 	DBENTER(BCE_VERBOSE_NVRAM);
2041 
2042 	/* Build the command word. */
2043 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
2044 
2045 	/* Calculate the offset for buffered flash if translation is used. */
2046 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2047 		offset = ((offset / sc->bce_flash_info->page_size) <<
2048 			  sc->bce_flash_info->page_bits) +
2049 			 (offset % sc->bce_flash_info->page_size);
2050 	}
2051 
2052 	/*
2053 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
2054 	 * set the NVRAM address to write, and issue the write command
2055 	 */
2056 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2057 	memcpy(&val32, val, 4);
2058 	val32 = htobe32(val32);
2059 	REG_WR(sc, BCE_NVM_WRITE, val32);
2060 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2061 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
2062 
2063 	/* Wait for completion. */
2064 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2065 		DELAY(5);
2066 
2067 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
2068 			break;
2069 	}
2070 	if (j >= NVRAM_TIMEOUT_COUNT) {
2071 		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
2072 			__FILE__, __LINE__, offset);
2073 		rc = EBUSY;
2074 	}
2075 
2076 	DBEXIT(BCE_VERBOSE_NVRAM);
2077 	return (rc);
2078 }
2079 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2080 
2081 
2082 /****************************************************************************/
2083 /* Initialize NVRAM access.                                                 */
2084 /*                                                                          */
2085 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
2086 /* access that device.                                                      */
2087 /*                                                                          */
2088 /* Returns:                                                                 */
2089 /*   0 on success, positive value on failure.                               */
2090 /****************************************************************************/
2091 static int
2092 bce_init_nvram(struct bce_softc *sc)
2093 {
2094 	u32 val;
2095 	int j, entry_count, rc = 0;
2096 	struct flash_spec *flash;
2097 
2098 	DBENTER(BCE_VERBOSE_NVRAM);
2099 
2100 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2101 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2102 		sc->bce_flash_info = &flash_5709;
2103 		goto bce_init_nvram_get_flash_size;
2104 	}
2105 
2106 	/* Determine the selected interface. */
2107 	val = REG_RD(sc, BCE_NVM_CFG1);
2108 
2109 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2110 
2111 	/*
2112 	 * Flash reconfiguration is required to support additional
2113 	 * NVRAM devices not directly supported in hardware.
2114 	 * Check if the flash interface was reconfigured
2115 	 * by the bootcode.
2116 	 */
2117 
2118 	if (val & 0x40000000) {
2119 		/* Flash interface reconfigured by bootcode. */
2120 
2121 		DBPRINT(sc,BCE_INFO_LOAD,
2122 			"bce_init_nvram(): Flash WAS reconfigured.\n");
2123 
2124 		for (j = 0, flash = &flash_table[0]; j < entry_count;
2125 		     j++, flash++) {
2126 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
2127 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2128 				sc->bce_flash_info = flash;
2129 				break;
2130 			}
2131 		}
2132 	} else {
2133 		/* Flash interface not yet reconfigured. */
2134 		u32 mask;
2135 
2136 		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2137 			__FUNCTION__);
2138 
2139 		if (val & (1 << 23))
2140 			mask = FLASH_BACKUP_STRAP_MASK;
2141 		else
2142 			mask = FLASH_STRAP_MASK;
2143 
2144 		/* Look for the matching NVRAM device configuration data. */
2145 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2146 
2147 			/* Check if the device matches any of the known devices. */
2148 			if ((val & mask) == (flash->strapping & mask)) {
2149 				/* Found a device match. */
2150 				sc->bce_flash_info = flash;
2151 
2152 				/* Request access to the flash interface. */
2153 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2154 					return rc;
2155 
2156 				/* Reconfigure the flash interface. */
2157 				bce_enable_nvram_access(sc);
2158 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2159 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2160 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2161 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2162 				bce_disable_nvram_access(sc);
2163 				bce_release_nvram_lock(sc);
2164 
2165 				break;
2166 			}
2167 		}
2168 	}
2169 
2170 	/* Check if a matching device was found. */
2171 	if (j == entry_count) {
2172 		sc->bce_flash_info = NULL;
2173 		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2174 			__FILE__, __LINE__);
2175 		rc = ENODEV;
2176 	}
2177 
2178 bce_init_nvram_get_flash_size:
2179 	/* Write the flash config data to the shared memory interface. */
2180 	val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
2181 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2182 	if (val)
2183 		sc->bce_flash_size = val;
2184 	else
2185 		sc->bce_flash_size = sc->bce_flash_info->total_size;
2186 
2187 	DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2188 		__FUNCTION__, sc->bce_flash_info->name,
2189 		sc->bce_flash_info->total_size);
2190 
2191 	DBEXIT(BCE_VERBOSE_NVRAM);
2192 	return rc;
2193 }
2194 
2195 
2196 /****************************************************************************/
2197 /* Read an arbitrary range of data from NVRAM.                              */
2198 /*                                                                          */
2199 /* Prepares the NVRAM interface for access and reads the requested data     */
2200 /* into the supplied buffer.                                                */
2201 /*                                                                          */
2202 /* Returns:                                                                 */
2203 /*   0 on success and the data read, positive value on failure.             */
2204 /****************************************************************************/
2205 static int
2206 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2207 	int buf_size)
2208 {
2209 	int rc = 0;
2210 	u32 cmd_flags, offset32, len32, extra;
2211 
2212 	DBENTER(BCE_VERBOSE_NVRAM);
2213 
2214 	if (buf_size == 0)
2215 		goto bce_nvram_read_exit;
2216 
2217 	/* Request access to the flash interface. */
2218 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2219 		goto bce_nvram_read_exit;
2220 
2221 	/* Enable access to flash interface */
2222 	bce_enable_nvram_access(sc);
2223 
2224 	len32 = buf_size;
2225 	offset32 = offset;
2226 	extra = 0;
2227 
2228 	cmd_flags = 0;
2229 
2230 	if (offset32 & 3) {
2231 		u8 buf[4];
2232 		u32 pre_len;
2233 
2234 		offset32 &= ~3;
2235 		pre_len = 4 - (offset & 3);
2236 
2237 		if (pre_len >= len32) {
2238 			pre_len = len32;
2239 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2240 		}
2241 		else {
2242 			cmd_flags = BCE_NVM_COMMAND_FIRST;
2243 		}
2244 
2245 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2246 
2247 		if (rc)
2248 			return rc;
2249 
2250 		memcpy(ret_buf, buf + (offset & 3), pre_len);
2251 
2252 		offset32 += 4;
2253 		ret_buf += pre_len;
2254 		len32 -= pre_len;
2255 	}
2256 
2257 	if (len32 & 3) {
2258 		extra = 4 - (len32 & 3);
2259 		len32 = (len32 + 4) & ~3;
2260 	}
2261 
2262 	if (len32 == 4) {
2263 		u8 buf[4];
2264 
2265 		if (cmd_flags)
2266 			cmd_flags = BCE_NVM_COMMAND_LAST;
2267 		else
2268 			cmd_flags = BCE_NVM_COMMAND_FIRST |
2269 				    BCE_NVM_COMMAND_LAST;
2270 
2271 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2272 
2273 		memcpy(ret_buf, buf, 4 - extra);
2274 	}
2275 	else if (len32 > 0) {
2276 		u8 buf[4];
2277 
2278 		/* Read the first word. */
2279 		if (cmd_flags)
2280 			cmd_flags = 0;
2281 		else
2282 			cmd_flags = BCE_NVM_COMMAND_FIRST;
2283 
2284 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2285 
2286 		/* Advance to the next dword. */
2287 		offset32 += 4;
2288 		ret_buf += 4;
2289 		len32 -= 4;
2290 
2291 		while (len32 > 4 && rc == 0) {
2292 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2293 
2294 			/* Advance to the next dword. */
2295 			offset32 += 4;
2296 			ret_buf += 4;
2297 			len32 -= 4;
2298 		}
2299 
2300 		if (rc)
2301 			goto bce_nvram_read_locked_exit;
2302 
2303 		cmd_flags = BCE_NVM_COMMAND_LAST;
2304 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2305 
2306 		memcpy(ret_buf, buf, 4 - extra);
2307 	}
2308 
2309 bce_nvram_read_locked_exit:
2310 	/* Disable access to flash interface and release the lock. */
2311 	bce_disable_nvram_access(sc);
2312 	bce_release_nvram_lock(sc);
2313 
2314 bce_nvram_read_exit:
2315 	DBEXIT(BCE_VERBOSE_NVRAM);
2316 	return rc;
2317 }
2318 
2319 
2320 #ifdef BCE_NVRAM_WRITE_SUPPORT
2321 /****************************************************************************/
2322 /* Write an arbitrary range of data from NVRAM.                             */
2323 /*                                                                          */
2324 /* Prepares the NVRAM interface for write access and writes the requested   */
2325 /* data from the supplied buffer.  The caller is responsible for            */
2326 /* calculating any appropriate CRCs.                                        */
2327 /*                                                                          */
2328 /* Returns:                                                                 */
2329 /*   0 on success, positive value on failure.                               */
2330 /****************************************************************************/
2331 static int
2332 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2333 	int buf_size)
2334 {
2335 	u32 written, offset32, len32;
2336 	u8 *buf, start[4], end[4];
2337 	int rc = 0;
2338 	int align_start, align_end;
2339 
2340 	DBENTER(BCE_VERBOSE_NVRAM);
2341 
2342 	buf = data_buf;
2343 	offset32 = offset;
2344 	len32 = buf_size;
2345 	align_start = align_end = 0;
2346 
2347 	if ((align_start = (offset32 & 3))) {
2348 		offset32 &= ~3;
2349 		len32 += align_start;
2350 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2351 			goto bce_nvram_write_exit;
2352 	}
2353 
2354 	if (len32 & 3) {
2355 	       	if ((len32 > 4) || !align_start) {
2356 			align_end = 4 - (len32 & 3);
2357 			len32 += align_end;
2358 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2359 				end, 4))) {
2360 				goto bce_nvram_write_exit;
2361 			}
2362 		}
2363 	}
2364 
2365 	if (align_start || align_end) {
2366 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2367 		if (buf == 0) {
2368 			rc = ENOMEM;
2369 			goto bce_nvram_write_exit;
2370 		}
2371 
2372 		if (align_start) {
2373 			memcpy(buf, start, 4);
2374 		}
2375 
2376 		if (align_end) {
2377 			memcpy(buf + len32 - 4, end, 4);
2378 		}
2379 		memcpy(buf + align_start, data_buf, buf_size);
2380 	}
2381 
2382 	written = 0;
2383 	while ((written < len32) && (rc == 0)) {
2384 		u32 page_start, page_end, data_start, data_end;
2385 		u32 addr, cmd_flags;
2386 		int i;
2387 		u8 flash_buffer[264];
2388 
2389 	    /* Find the page_start addr */
2390 		page_start = offset32 + written;
2391 		page_start -= (page_start % sc->bce_flash_info->page_size);
2392 		/* Find the page_end addr */
2393 		page_end = page_start + sc->bce_flash_info->page_size;
2394 		/* Find the data_start addr */
2395 		data_start = (written == 0) ? offset32 : page_start;
2396 		/* Find the data_end addr */
2397 		data_end = (page_end > offset32 + len32) ?
2398 			(offset32 + len32) : page_end;
2399 
2400 		/* Request access to the flash interface. */
2401 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2402 			goto bce_nvram_write_exit;
2403 
2404 		/* Enable access to flash interface */
2405 		bce_enable_nvram_access(sc);
2406 
2407 		cmd_flags = BCE_NVM_COMMAND_FIRST;
2408 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2409 			int j;
2410 
2411 			/* Read the whole page into the buffer
2412 			 * (non-buffer flash only) */
2413 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2414 				if (j == (sc->bce_flash_info->page_size - 4)) {
2415 					cmd_flags |= BCE_NVM_COMMAND_LAST;
2416 				}
2417 				rc = bce_nvram_read_dword(sc,
2418 					page_start + j,
2419 					&flash_buffer[j],
2420 					cmd_flags);
2421 
2422 				if (rc)
2423 					goto bce_nvram_write_locked_exit;
2424 
2425 				cmd_flags = 0;
2426 			}
2427 		}
2428 
2429 		/* Enable writes to flash interface (unlock write-protect) */
2430 		if ((rc = bce_enable_nvram_write(sc)) != 0)
2431 			goto bce_nvram_write_locked_exit;
2432 
2433 		/* Erase the page */
2434 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2435 			goto bce_nvram_write_locked_exit;
2436 
2437 		/* Re-enable the write again for the actual write */
2438 		bce_enable_nvram_write(sc);
2439 
2440 		/* Loop to write back the buffer data from page_start to
2441 		 * data_start */
2442 		i = 0;
2443 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2444 			for (addr = page_start; addr < data_start;
2445 				addr += 4, i += 4) {
2446 
2447 				rc = bce_nvram_write_dword(sc, addr,
2448 					&flash_buffer[i], cmd_flags);
2449 
2450 				if (rc != 0)
2451 					goto bce_nvram_write_locked_exit;
2452 
2453 				cmd_flags = 0;
2454 			}
2455 		}
2456 
2457 		/* Loop to write the new data from data_start to data_end */
2458 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2459 			if ((addr == page_end - 4) ||
2460 				((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2461 				(addr == data_end - 4))) {
2462 
2463 				cmd_flags |= BCE_NVM_COMMAND_LAST;
2464 			}
2465 			rc = bce_nvram_write_dword(sc, addr, buf,
2466 				cmd_flags);
2467 
2468 			if (rc != 0)
2469 				goto bce_nvram_write_locked_exit;
2470 
2471 			cmd_flags = 0;
2472 			buf += 4;
2473 		}
2474 
2475 		/* Loop to write back the buffer data from data_end
2476 		 * to page_end */
2477 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2478 			for (addr = data_end; addr < page_end;
2479 				addr += 4, i += 4) {
2480 
2481 				if (addr == page_end-4) {
2482 					cmd_flags = BCE_NVM_COMMAND_LAST;
2483                 		}
2484 				rc = bce_nvram_write_dword(sc, addr,
2485 					&flash_buffer[i], cmd_flags);
2486 
2487 				if (rc != 0)
2488 					goto bce_nvram_write_locked_exit;
2489 
2490 				cmd_flags = 0;
2491 			}
2492 		}
2493 
2494 		/* Disable writes to flash interface (lock write-protect) */
2495 		bce_disable_nvram_write(sc);
2496 
2497 		/* Disable access to flash interface */
2498 		bce_disable_nvram_access(sc);
2499 		bce_release_nvram_lock(sc);
2500 
2501 		/* Increment written */
2502 		written += data_end - data_start;
2503 	}
2504 
2505 	goto bce_nvram_write_exit;
2506 
2507 bce_nvram_write_locked_exit:
2508 		bce_disable_nvram_write(sc);
2509 		bce_disable_nvram_access(sc);
2510 		bce_release_nvram_lock(sc);
2511 
2512 bce_nvram_write_exit:
2513 	if (align_start || align_end)
2514 		free(buf, M_DEVBUF);
2515 
2516 	DBEXIT(BCE_VERBOSE_NVRAM);
2517 	return (rc);
2518 }
2519 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2520 
2521 
2522 /****************************************************************************/
2523 /* Verifies that NVRAM is accessible and contains valid data.               */
2524 /*                                                                          */
2525 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2526 /* correct.                                                                 */
2527 /*                                                                          */
2528 /* Returns:                                                                 */
2529 /*   0 on success, positive value on failure.                               */
2530 /****************************************************************************/
2531 static int
2532 bce_nvram_test(struct bce_softc *sc)
2533 {
2534 	u32 buf[BCE_NVRAM_SIZE / 4];
2535 	u8 *data = (u8 *) buf;
2536 	int rc = 0;
2537 	u32 magic, csum;
2538 
2539 	DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2540 
2541 	/*
2542 	 * Check that the device NVRAM is valid by reading
2543 	 * the magic value at offset 0.
2544 	 */
2545 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2546 		BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__);
2547 		goto bce_nvram_test_exit;
2548 	}
2549 
2550 	/*
2551 	 * Verify that offset 0 of the NVRAM contains
2552 	 * a valid magic number.
2553 	 */
2554     magic = bce_be32toh(buf[0]);
2555 	if (magic != BCE_NVRAM_MAGIC) {
2556 		rc = ENODEV;
2557 		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2558 			"Found: 0x%08X\n",
2559 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2560 		goto bce_nvram_test_exit;
2561 	}
2562 
2563 	/*
2564 	 * Verify that the device NVRAM includes valid
2565 	 * configuration data.
2566 	 */
2567 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2568 		BCE_PRINTF("%s(%d): Unable to read Manufacturing Information from "
2569 			"NVRAM!\n", __FILE__, __LINE__);
2570 		goto bce_nvram_test_exit;
2571 	}
2572 
2573 	csum = ether_crc32_le(data, 0x100);
2574 	if (csum != BCE_CRC32_RESIDUAL) {
2575 		rc = ENODEV;
2576 		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2577 			"Expected: 0x%08X, Found: 0x%08X\n",
2578 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2579 		goto bce_nvram_test_exit;
2580 	}
2581 
2582 	csum = ether_crc32_le(data + 0x100, 0x100);
2583 	if (csum != BCE_CRC32_RESIDUAL) {
2584 		rc = ENODEV;
2585 		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2586 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2587 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2588 	}
2589 
2590 bce_nvram_test_exit:
2591 	DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2592 	return rc;
2593 }
2594 
2595 
2596 /****************************************************************************/
2597 /* Identifies the current media type of the controller and sets the PHY     */
2598 /* address.                                                                 */
2599 /*                                                                          */
2600 /* Returns:                                                                 */
2601 /*   Nothing.                                                               */
2602 /****************************************************************************/
2603 static void
2604 bce_get_media(struct bce_softc *sc)
2605 {
2606 	u32 val;
2607 
2608 	DBENTER(BCE_VERBOSE);
2609 
2610 	/* Assume PHY address for copper controllers. */
2611 	sc->bce_phy_addr = 1;
2612 
2613 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2614  		u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2615 		u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2616 		u32 strap;
2617 
2618 		/*
2619 		 * The BCM5709S is software configurable
2620 		 * for Copper or SerDes operation.
2621 		 */
2622 		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2623 			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for copper.\n");
2624 			goto bce_get_media_exit;
2625 		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2626 			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for dual media.\n");
2627 			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2628 			goto bce_get_media_exit;
2629 		}
2630 
2631 		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2632 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2633 		else
2634 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2635 
2636 		if (pci_get_function(sc->bce_dev) == 0) {
2637 			switch (strap) {
2638 			case 0x4:
2639 			case 0x5:
2640 			case 0x6:
2641 				DBPRINT(sc, BCE_INFO_LOAD,
2642 					"BCM5709 s/w configured for SerDes.\n");
2643 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2644 			default:
2645 				DBPRINT(sc, BCE_INFO_LOAD,
2646 					"BCM5709 s/w configured for Copper.\n");
2647 			}
2648 		} else {
2649 			switch (strap) {
2650 			case 0x1:
2651 			case 0x2:
2652 			case 0x4:
2653 				DBPRINT(sc, BCE_INFO_LOAD,
2654 					"BCM5709 s/w configured for SerDes.\n");
2655 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2656 			default:
2657 				DBPRINT(sc, BCE_INFO_LOAD,
2658 					"BCM5709 s/w configured for Copper.\n");
2659 			}
2660 		}
2661 
2662 	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2663 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2664 
2665 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2666 		sc->bce_flags |= BCE_NO_WOL_FLAG;
2667 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2668 			sc->bce_phy_addr = 2;
2669 			val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
2670 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2671 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
2672 				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
2673 			}
2674 		}
2675 	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2676 		   (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2677 		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2678 
2679 bce_get_media_exit:
2680 	DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2681 		"Using PHY address %d.\n", sc->bce_phy_addr);
2682 
2683 	DBEXIT(BCE_VERBOSE);
2684 }
2685 
2686 
2687 /****************************************************************************/
2688 /* Free any DMA memory owned by the driver.                                 */
2689 /*                                                                          */
2690 /* Scans through each data structre that requires DMA memory and frees      */
2691 /* the memory if allocated.                                                 */
2692 /*                                                                          */
2693 /* Returns:                                                                 */
2694 /*   Nothing.                                                               */
2695 /****************************************************************************/
2696 static void
2697 bce_dma_free(struct bce_softc *sc)
2698 {
2699 	int i;
2700 
2701 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2702 
2703 	/* Free, unmap, and destroy the status block. */
2704 	if (sc->status_block != NULL) {
2705 		bus_dmamem_free(
2706 			sc->status_tag,
2707 		    sc->status_block,
2708 		    sc->status_map);
2709 		sc->status_block = NULL;
2710 	}
2711 
2712 	if (sc->status_map != NULL) {
2713 		bus_dmamap_unload(
2714 			sc->status_tag,
2715 		    sc->status_map);
2716 		bus_dmamap_destroy(sc->status_tag,
2717 		    sc->status_map);
2718 		sc->status_map = NULL;
2719 	}
2720 
2721 	if (sc->status_tag != NULL) {
2722 		bus_dma_tag_destroy(sc->status_tag);
2723 		sc->status_tag = NULL;
2724 	}
2725 
2726 
2727 	/* Free, unmap, and destroy the statistics block. */
2728 	if (sc->stats_block != NULL) {
2729 		bus_dmamem_free(
2730 			sc->stats_tag,
2731 		    sc->stats_block,
2732 		    sc->stats_map);
2733 		sc->stats_block = NULL;
2734 	}
2735 
2736 	if (sc->stats_map != NULL) {
2737 		bus_dmamap_unload(
2738 			sc->stats_tag,
2739 		    sc->stats_map);
2740 		bus_dmamap_destroy(sc->stats_tag,
2741 		    sc->stats_map);
2742 		sc->stats_map = NULL;
2743 	}
2744 
2745 	if (sc->stats_tag != NULL) {
2746 		bus_dma_tag_destroy(sc->stats_tag);
2747 		sc->stats_tag = NULL;
2748 	}
2749 
2750 
2751 	/* Free, unmap and destroy all context memory pages. */
2752 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2753 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2754 		for (i = 0; i < sc->ctx_pages; i++ ) {
2755 			if (sc->ctx_block[i] != NULL) {
2756 				bus_dmamem_free(
2757 					sc->ctx_tag,
2758 				    sc->ctx_block[i],
2759 				    sc->ctx_map[i]);
2760 				sc->ctx_block[i] = NULL;
2761 			}
2762 
2763 			if (sc->ctx_map[i] != NULL) {
2764 				bus_dmamap_unload(
2765 					sc->ctx_tag,
2766 		    		sc->ctx_map[i]);
2767 				bus_dmamap_destroy(
2768 					sc->ctx_tag,
2769 				    sc->ctx_map[i]);
2770 				sc->ctx_map[i] = NULL;
2771 			}
2772 		}
2773 
2774 		/* Destroy the context memory tag. */
2775 		if (sc->ctx_tag != NULL) {
2776 			bus_dma_tag_destroy(sc->ctx_tag);
2777 			sc->ctx_tag = NULL;
2778 		}
2779 	}
2780 
2781 
2782 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2783 	for (i = 0; i < TX_PAGES; i++ ) {
2784 		if (sc->tx_bd_chain[i] != NULL) {
2785 			bus_dmamem_free(
2786 				sc->tx_bd_chain_tag,
2787 			    sc->tx_bd_chain[i],
2788 			    sc->tx_bd_chain_map[i]);
2789 			sc->tx_bd_chain[i] = NULL;
2790 		}
2791 
2792 		if (sc->tx_bd_chain_map[i] != NULL) {
2793 			bus_dmamap_unload(
2794 				sc->tx_bd_chain_tag,
2795 		    	sc->tx_bd_chain_map[i]);
2796 			bus_dmamap_destroy(
2797 				sc->tx_bd_chain_tag,
2798 			    sc->tx_bd_chain_map[i]);
2799 			sc->tx_bd_chain_map[i] = NULL;
2800 		}
2801 	}
2802 
2803 	/* Destroy the TX buffer descriptor tag. */
2804 	if (sc->tx_bd_chain_tag != NULL) {
2805 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2806 		sc->tx_bd_chain_tag = NULL;
2807 	}
2808 
2809 
2810 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2811 	for (i = 0; i < RX_PAGES; i++ ) {
2812 		if (sc->rx_bd_chain[i] != NULL) {
2813 			bus_dmamem_free(
2814 				sc->rx_bd_chain_tag,
2815 			    sc->rx_bd_chain[i],
2816 			    sc->rx_bd_chain_map[i]);
2817 			sc->rx_bd_chain[i] = NULL;
2818 		}
2819 
2820 		if (sc->rx_bd_chain_map[i] != NULL) {
2821 			bus_dmamap_unload(
2822 				sc->rx_bd_chain_tag,
2823 		    	sc->rx_bd_chain_map[i]);
2824 			bus_dmamap_destroy(
2825 				sc->rx_bd_chain_tag,
2826 			    sc->rx_bd_chain_map[i]);
2827 			sc->rx_bd_chain_map[i] = NULL;
2828 		}
2829 	}
2830 
2831 	/* Destroy the RX buffer descriptor tag. */
2832 	if (sc->rx_bd_chain_tag != NULL) {
2833 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2834 		sc->rx_bd_chain_tag = NULL;
2835 	}
2836 
2837 
2838 #ifdef BCE_JUMBO_HDRSPLIT
2839 	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2840 	for (i = 0; i < PG_PAGES; i++ ) {
2841 		if (sc->pg_bd_chain[i] != NULL) {
2842 			bus_dmamem_free(
2843 				sc->pg_bd_chain_tag,
2844 			    sc->pg_bd_chain[i],
2845 			    sc->pg_bd_chain_map[i]);
2846 			sc->pg_bd_chain[i] = NULL;
2847 		}
2848 
2849 		if (sc->pg_bd_chain_map[i] != NULL) {
2850 			bus_dmamap_unload(
2851 				sc->pg_bd_chain_tag,
2852 		    	sc->pg_bd_chain_map[i]);
2853 			bus_dmamap_destroy(
2854 				sc->pg_bd_chain_tag,
2855 			    sc->pg_bd_chain_map[i]);
2856 			sc->pg_bd_chain_map[i] = NULL;
2857 		}
2858 	}
2859 
2860 	/* Destroy the page buffer descriptor tag. */
2861 	if (sc->pg_bd_chain_tag != NULL) {
2862 		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2863 		sc->pg_bd_chain_tag = NULL;
2864 	}
2865 #endif
2866 
2867 
2868 	/* Unload and destroy the TX mbuf maps. */
2869 	for (i = 0; i < TOTAL_TX_BD; i++) {
2870 		if (sc->tx_mbuf_map[i] != NULL) {
2871 			bus_dmamap_unload(sc->tx_mbuf_tag,
2872 				sc->tx_mbuf_map[i]);
2873 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2874 	 			sc->tx_mbuf_map[i]);
2875 			sc->tx_mbuf_map[i] = NULL;
2876 		}
2877 	}
2878 
2879 	/* Destroy the TX mbuf tag. */
2880 	if (sc->tx_mbuf_tag != NULL) {
2881 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2882 		sc->tx_mbuf_tag = NULL;
2883 	}
2884 
2885 	/* Unload and destroy the RX mbuf maps. */
2886 	for (i = 0; i < TOTAL_RX_BD; i++) {
2887 		if (sc->rx_mbuf_map[i] != NULL) {
2888 			bus_dmamap_unload(sc->rx_mbuf_tag,
2889 				sc->rx_mbuf_map[i]);
2890 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2891 	 			sc->rx_mbuf_map[i]);
2892 			sc->rx_mbuf_map[i] = NULL;
2893 		}
2894 	}
2895 
2896 	/* Destroy the RX mbuf tag. */
2897 	if (sc->rx_mbuf_tag != NULL) {
2898 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2899 		sc->rx_mbuf_tag = NULL;
2900 	}
2901 
2902 #ifdef BCE_JUMBO_HDRSPLIT
2903 	/* Unload and destroy the page mbuf maps. */
2904 	for (i = 0; i < TOTAL_PG_BD; i++) {
2905 		if (sc->pg_mbuf_map[i] != NULL) {
2906 			bus_dmamap_unload(sc->pg_mbuf_tag,
2907 				sc->pg_mbuf_map[i]);
2908 			bus_dmamap_destroy(sc->pg_mbuf_tag,
2909 	 			sc->pg_mbuf_map[i]);
2910 			sc->pg_mbuf_map[i] = NULL;
2911 		}
2912 	}
2913 
2914 	/* Destroy the page mbuf tag. */
2915 	if (sc->pg_mbuf_tag != NULL) {
2916 		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2917 		sc->pg_mbuf_tag = NULL;
2918 	}
2919 #endif
2920 
2921 	/* Destroy the parent tag */
2922 	if (sc->parent_tag != NULL) {
2923 		bus_dma_tag_destroy(sc->parent_tag);
2924 		sc->parent_tag = NULL;
2925 	}
2926 
2927 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2928 }
2929 
2930 
2931 /****************************************************************************/
2932 /* Get DMA memory from the OS.                                              */
2933 /*                                                                          */
2934 /* Validates that the OS has provided DMA buffers in response to a          */
2935 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2936 /* When the callback is used the OS will return 0 for the mapping function  */
2937 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2938 /* failures back to the caller.                                             */
2939 /*                                                                          */
2940 /* Returns:                                                                 */
2941 /*   Nothing.                                                               */
2942 /****************************************************************************/
2943 static void
2944 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2945 {
2946 	bus_addr_t *busaddr = arg;
2947 
2948 	/* Simulate a mapping failure. */
2949 	DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
2950 		error = ENOMEM);
2951 
2952 	/* Check for an error and signal the caller that an error occurred. */
2953 	if (error) {
2954 		*busaddr = 0;
2955 	} else {
2956 		*busaddr = segs->ds_addr;
2957 	}
2958 
2959 	return;
2960 }
2961 
2962 
2963 /****************************************************************************/
2964 /* Allocate any DMA memory needed by the driver.                            */
2965 /*                                                                          */
2966 /* Allocates DMA memory needed for the various global structures needed by  */
2967 /* hardware.                                                                */
2968 /*                                                                          */
2969 /* Memory alignment requirements:                                           */
2970 /* +-----------------+----------+----------+----------+----------+          */
2971 /* |                 |   5706   |   5708   |   5709   |   5716   |          */
2972 /* +-----------------+----------+----------+----------+----------+          */
2973 /* |Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2974 /* |Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2975 /* |RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |          */
2976 /* |PG Buffers       |   none   |   none   |   none   |   none   |          */
2977 /* |TX Buffers       |   none   |   none   |   none   |   none   |          */
2978 /* |Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |          */
2979 /* +-----------------+----------+----------+----------+----------+          */
2980 /*                                                                          */
2981 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2982 /*                                                                          */
2983 /* Returns:                                                                 */
2984 /*   0 for success, positive value for failure.                             */
2985 /****************************************************************************/
2986 static int
2987 bce_dma_alloc(device_t dev)
2988 {
2989 	struct bce_softc *sc;
2990 	int i, error, rc = 0;
2991 	bus_size_t max_size, max_seg_size;
2992 	int max_segments;
2993 
2994 	sc = device_get_softc(dev);
2995 
2996 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
2997 
2998 	/*
2999 	 * Allocate the parent bus DMA tag appropriate for PCI.
3000 	 */
3001 	if (bus_dma_tag_create(NULL,
3002 			1,
3003 			BCE_DMA_BOUNDARY,
3004 			sc->max_bus_addr,
3005 			BUS_SPACE_MAXADDR,
3006 			NULL, NULL,
3007 			MAXBSIZE,
3008 			BUS_SPACE_UNRESTRICTED,
3009 			BUS_SPACE_MAXSIZE_32BIT,
3010 			0,
3011 			NULL, NULL,
3012 			&sc->parent_tag)) {
3013 		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
3014 			__FILE__, __LINE__);
3015 		rc = ENOMEM;
3016 		goto bce_dma_alloc_exit;
3017 	}
3018 
3019 	/*
3020 	 * Create a DMA tag for the status block, allocate and clear the
3021 	 * memory, map the memory into DMA space, and fetch the physical
3022 	 * address of the block.
3023 	 */
3024 	if (bus_dma_tag_create(sc->parent_tag,
3025 	    	BCE_DMA_ALIGN,
3026 	    	BCE_DMA_BOUNDARY,
3027 	    	sc->max_bus_addr,
3028 	    	BUS_SPACE_MAXADDR,
3029 	    	NULL, NULL,
3030 	    	BCE_STATUS_BLK_SZ,
3031 	    	1,
3032 	    	BCE_STATUS_BLK_SZ,
3033 	    	0,
3034 	    	NULL, NULL,
3035 	    	&sc->status_tag)) {
3036 		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
3037 			__FILE__, __LINE__);
3038 		rc = ENOMEM;
3039 		goto bce_dma_alloc_exit;
3040 	}
3041 
3042 	if(bus_dmamem_alloc(sc->status_tag,
3043 	    	(void **)&sc->status_block,
3044 	    	BUS_DMA_NOWAIT,
3045 	    	&sc->status_map)) {
3046 		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
3047 			__FILE__, __LINE__);
3048 		rc = ENOMEM;
3049 		goto bce_dma_alloc_exit;
3050 	}
3051 
3052 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
3053 
3054 	error = bus_dmamap_load(sc->status_tag,
3055 	    	sc->status_map,
3056 	    	sc->status_block,
3057 	    	BCE_STATUS_BLK_SZ,
3058 	    	bce_dma_map_addr,
3059 	    	&sc->status_block_paddr,
3060 	    	BUS_DMA_NOWAIT);
3061 
3062 	if (error) {
3063 		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
3064 			__FILE__, __LINE__);
3065 		rc = ENOMEM;
3066 		goto bce_dma_alloc_exit;
3067 	}
3068 
3069 	DBPRINT(sc, BCE_INFO, "%s(): status_block_paddr = 0x%jX\n",
3070 		__FUNCTION__, (uintmax_t) sc->status_block_paddr);
3071 
3072 	/*
3073 	 * Create a DMA tag for the statistics block, allocate and clear the
3074 	 * memory, map the memory into DMA space, and fetch the physical
3075 	 * address of the block.
3076 	 */
3077 	if (bus_dma_tag_create(sc->parent_tag,
3078 	    	BCE_DMA_ALIGN,
3079 	    	BCE_DMA_BOUNDARY,
3080 	    	sc->max_bus_addr,
3081 	    	BUS_SPACE_MAXADDR,
3082 	    	NULL, NULL,
3083 	    	BCE_STATS_BLK_SZ,
3084 	    	1,
3085 	    	BCE_STATS_BLK_SZ,
3086 	    	0,
3087 	    	NULL, NULL,
3088 	    	&sc->stats_tag)) {
3089 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
3090 			__FILE__, __LINE__);
3091 		rc = ENOMEM;
3092 		goto bce_dma_alloc_exit;
3093 	}
3094 
3095 	if (bus_dmamem_alloc(sc->stats_tag,
3096 	    	(void **)&sc->stats_block,
3097 	    	BUS_DMA_NOWAIT,
3098 	    	&sc->stats_map)) {
3099 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
3100 			__FILE__, __LINE__);
3101 		rc = ENOMEM;
3102 		goto bce_dma_alloc_exit;
3103 	}
3104 
3105 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
3106 
3107 	error = bus_dmamap_load(sc->stats_tag,
3108 	    	sc->stats_map,
3109 	    	sc->stats_block,
3110 	    	BCE_STATS_BLK_SZ,
3111 	    	bce_dma_map_addr,
3112 	    	&sc->stats_block_paddr,
3113 	    	BUS_DMA_NOWAIT);
3114 
3115 	if(error) {
3116 		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
3117 			__FILE__, __LINE__);
3118 		rc = ENOMEM;
3119 		goto bce_dma_alloc_exit;
3120 	}
3121 
3122 	DBPRINT(sc, BCE_INFO, "%s(): stats_block_paddr = 0x%jX\n",
3123 		__FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3124 
3125 	/* BCM5709 uses host memory as cache for context memory. */
3126 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3127 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3128 		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3129 		if (sc->ctx_pages == 0)
3130 			sc->ctx_pages = 1;
3131 
3132 		DBRUNIF((sc->ctx_pages > 512),
3133 			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3134 				__FILE__, __LINE__, sc->ctx_pages));
3135 
3136 		/*
3137 		 * Create a DMA tag for the context pages,
3138 		 * allocate and clear the memory, map the
3139 		 * memory into DMA space, and fetch the
3140 		 * physical address of the block.
3141 		 */
3142 		if(bus_dma_tag_create(sc->parent_tag,
3143 			BCM_PAGE_SIZE,
3144 		    BCE_DMA_BOUNDARY,
3145 			sc->max_bus_addr,
3146 			BUS_SPACE_MAXADDR,
3147 			NULL, NULL,
3148 			BCM_PAGE_SIZE,
3149 			1,
3150 			BCM_PAGE_SIZE,
3151 			0,
3152 			NULL, NULL,
3153 			&sc->ctx_tag)) {
3154 			BCE_PRINTF("%s(%d): Could not allocate CTX DMA tag!\n",
3155 				__FILE__, __LINE__);
3156 			rc = ENOMEM;
3157 			goto bce_dma_alloc_exit;
3158 		}
3159 
3160 		for (i = 0; i < sc->ctx_pages; i++) {
3161 
3162 			if(bus_dmamem_alloc(sc->ctx_tag,
3163 		    		(void **)&sc->ctx_block[i],
3164 	    		BUS_DMA_NOWAIT,
3165 		    	&sc->ctx_map[i])) {
3166 				BCE_PRINTF("%s(%d): Could not allocate CTX "
3167 					"DMA memory!\n", __FILE__, __LINE__);
3168 				rc = ENOMEM;
3169 				goto bce_dma_alloc_exit;
3170 			}
3171 
3172 			bzero((char *)sc->ctx_block[i], BCM_PAGE_SIZE);
3173 
3174 			error = bus_dmamap_load(sc->ctx_tag,
3175 	    		sc->ctx_map[i],
3176 	    		sc->ctx_block[i],
3177 		    	BCM_PAGE_SIZE,
3178 		    	bce_dma_map_addr,
3179 	    		&sc->ctx_paddr[i],
3180 	    		BUS_DMA_NOWAIT);
3181 
3182 			if (error) {
3183 				BCE_PRINTF("%s(%d): Could not map CTX DMA memory!\n",
3184 					__FILE__, __LINE__);
3185 				rc = ENOMEM;
3186 				goto bce_dma_alloc_exit;
3187 			}
3188 
3189 			DBPRINT(sc, BCE_INFO, "%s(): ctx_paddr[%d] = 0x%jX\n",
3190 				__FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]);
3191 		}
3192 	}
3193 
3194 	/*
3195 	 * Create a DMA tag for the TX buffer descriptor chain,
3196 	 * allocate and clear the  memory, and fetch the
3197 	 * physical address of the block.
3198 	 */
3199 	if(bus_dma_tag_create(sc->parent_tag,
3200 			BCM_PAGE_SIZE,
3201 		    BCE_DMA_BOUNDARY,
3202 			sc->max_bus_addr,
3203 			BUS_SPACE_MAXADDR,
3204 			NULL, NULL,
3205 			BCE_TX_CHAIN_PAGE_SZ,
3206 			1,
3207 			BCE_TX_CHAIN_PAGE_SZ,
3208 			0,
3209 			NULL, NULL,
3210 			&sc->tx_bd_chain_tag)) {
3211 		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
3212 			__FILE__, __LINE__);
3213 		rc = ENOMEM;
3214 		goto bce_dma_alloc_exit;
3215 	}
3216 
3217 	for (i = 0; i < TX_PAGES; i++) {
3218 
3219 		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3220 	    		(void **)&sc->tx_bd_chain[i],
3221 	    		BUS_DMA_NOWAIT,
3222 		    	&sc->tx_bd_chain_map[i])) {
3223 			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3224 				"chain DMA memory!\n", __FILE__, __LINE__);
3225 			rc = ENOMEM;
3226 			goto bce_dma_alloc_exit;
3227 		}
3228 
3229 		error = bus_dmamap_load(sc->tx_bd_chain_tag,
3230 	    		sc->tx_bd_chain_map[i],
3231 	    		sc->tx_bd_chain[i],
3232 		    	BCE_TX_CHAIN_PAGE_SZ,
3233 		    	bce_dma_map_addr,
3234 	    		&sc->tx_bd_chain_paddr[i],
3235 	    		BUS_DMA_NOWAIT);
3236 
3237 		if (error) {
3238 			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
3239 				__FILE__, __LINE__);
3240 			rc = ENOMEM;
3241 			goto bce_dma_alloc_exit;
3242 		}
3243 
3244 		DBPRINT(sc, BCE_INFO, "%s(): tx_bd_chain_paddr[%d] = 0x%jX\n",
3245 			__FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]);
3246 	}
3247 
3248 	/* Check the required size before mapping to conserve resources. */
3249 	if (bce_tso_enable) {
3250 		max_size     = BCE_TSO_MAX_SIZE;
3251 		max_segments = BCE_MAX_SEGMENTS;
3252 		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3253 	} else {
3254 		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
3255 		max_segments = BCE_MAX_SEGMENTS;
3256 		max_seg_size = MCLBYTES;
3257 	}
3258 
3259 	/* Create a DMA tag for TX mbufs. */
3260 	if (bus_dma_tag_create(sc->parent_tag,
3261 			1,
3262 			BCE_DMA_BOUNDARY,
3263 			sc->max_bus_addr,
3264 			BUS_SPACE_MAXADDR,
3265 			NULL, NULL,
3266 			max_size,
3267 			max_segments,
3268 			max_seg_size,
3269 			0,
3270 			NULL, NULL,
3271 			&sc->tx_mbuf_tag)) {
3272 		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3273 			__FILE__, __LINE__);
3274 		rc = ENOMEM;
3275 		goto bce_dma_alloc_exit;
3276 	}
3277 
3278 	/* Create DMA maps for the TX mbufs clusters. */
3279 	for (i = 0; i < TOTAL_TX_BD; i++) {
3280 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3281 			&sc->tx_mbuf_map[i])) {
3282 			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
3283 				__FILE__, __LINE__);
3284 			rc = ENOMEM;
3285 			goto bce_dma_alloc_exit;
3286 		}
3287 	}
3288 
3289 	/*
3290 	 * Create a DMA tag for the RX buffer descriptor chain,
3291 	 * allocate and clear the memory, and fetch the physical
3292 	 * address of the blocks.
3293 	 */
3294 	if (bus_dma_tag_create(sc->parent_tag,
3295 			BCM_PAGE_SIZE,
3296 			BCE_DMA_BOUNDARY,
3297 			BUS_SPACE_MAXADDR,
3298 			sc->max_bus_addr,
3299 			NULL, NULL,
3300 			BCE_RX_CHAIN_PAGE_SZ,
3301 			1,
3302 			BCE_RX_CHAIN_PAGE_SZ,
3303 			0,
3304 			NULL, NULL,
3305 			&sc->rx_bd_chain_tag)) {
3306 		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
3307 			__FILE__, __LINE__);
3308 		rc = ENOMEM;
3309 		goto bce_dma_alloc_exit;
3310 	}
3311 
3312 	for (i = 0; i < RX_PAGES; i++) {
3313 
3314 		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3315 	    		(void **)&sc->rx_bd_chain[i],
3316 	    		BUS_DMA_NOWAIT,
3317 		    	&sc->rx_bd_chain_map[i])) {
3318 			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3319 				"DMA memory!\n", __FILE__, __LINE__);
3320 			rc = ENOMEM;
3321 			goto bce_dma_alloc_exit;
3322 		}
3323 
3324 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3325 
3326 		error = bus_dmamap_load(sc->rx_bd_chain_tag,
3327 	    		sc->rx_bd_chain_map[i],
3328 	    		sc->rx_bd_chain[i],
3329 		    	BCE_RX_CHAIN_PAGE_SZ,
3330 		    	bce_dma_map_addr,
3331 	    		&sc->rx_bd_chain_paddr[i],
3332 	    		BUS_DMA_NOWAIT);
3333 
3334 		if (error) {
3335 			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
3336 				__FILE__, __LINE__);
3337 			rc = ENOMEM;
3338 			goto bce_dma_alloc_exit;
3339 		}
3340 
3341 		DBPRINT(sc, BCE_INFO, "%s(): rx_bd_chain_paddr[%d] = 0x%jX\n",
3342 			__FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]);
3343 	}
3344 
3345 	/*
3346 	 * Create a DMA tag for RX mbufs.
3347 	 */
3348 #ifdef BCE_JUMBO_HDRSPLIT
3349 	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3350 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3351 #else
3352 	max_size = max_seg_size = MJUM9BYTES;
3353 #endif
3354 	max_segments = 1;
3355 
3356 	DBPRINT(sc, BCE_INFO, "%s(): Creating rx_mbuf_tag (max size = 0x%jX "
3357 		"max segments = %d, max segment size = 0x%jX)\n", __FUNCTION__,
3358 		(uintmax_t) max_size, max_segments, (uintmax_t) max_seg_size);
3359 
3360 	if (bus_dma_tag_create(sc->parent_tag,
3361 			1,
3362 			BCE_DMA_BOUNDARY,
3363 			sc->max_bus_addr,
3364 			BUS_SPACE_MAXADDR,
3365 			NULL, NULL,
3366 			max_size,
3367 			max_segments,
3368 			max_seg_size,
3369 			0,
3370 			NULL, NULL,
3371 	    	&sc->rx_mbuf_tag)) {
3372 		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3373 			__FILE__, __LINE__);
3374 		rc = ENOMEM;
3375 		goto bce_dma_alloc_exit;
3376 	}
3377 
3378 	/* Create DMA maps for the RX mbuf clusters. */
3379 	for (i = 0; i < TOTAL_RX_BD; i++) {
3380 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3381 				&sc->rx_mbuf_map[i])) {
3382 			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
3383 				__FILE__, __LINE__);
3384 			rc = ENOMEM;
3385 			goto bce_dma_alloc_exit;
3386 		}
3387 	}
3388 
3389 #ifdef BCE_JUMBO_HDRSPLIT
3390 	/*
3391 	 * Create a DMA tag for the page buffer descriptor chain,
3392 	 * allocate and clear the memory, and fetch the physical
3393 	 * address of the blocks.
3394 	 */
3395 	if (bus_dma_tag_create(sc->parent_tag,
3396 			BCM_PAGE_SIZE,
3397 			BCE_DMA_BOUNDARY,
3398 			BUS_SPACE_MAXADDR,
3399 			sc->max_bus_addr,
3400 			NULL, NULL,
3401 			BCE_PG_CHAIN_PAGE_SZ,
3402 			1,
3403 			BCE_PG_CHAIN_PAGE_SZ,
3404 			0,
3405 			NULL, NULL,
3406 			&sc->pg_bd_chain_tag)) {
3407 		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
3408 			__FILE__, __LINE__);
3409 		rc = ENOMEM;
3410 		goto bce_dma_alloc_exit;
3411 	}
3412 
3413 	for (i = 0; i < PG_PAGES; i++) {
3414 
3415 		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3416 	    		(void **)&sc->pg_bd_chain[i],
3417 	    		BUS_DMA_NOWAIT,
3418 		    	&sc->pg_bd_chain_map[i])) {
3419 			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
3420 				"DMA memory!\n", __FILE__, __LINE__);
3421 			rc = ENOMEM;
3422 			goto bce_dma_alloc_exit;
3423 		}
3424 
3425 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
3426 
3427 		error = bus_dmamap_load(sc->pg_bd_chain_tag,
3428 	    		sc->pg_bd_chain_map[i],
3429 	    		sc->pg_bd_chain[i],
3430 		    	BCE_PG_CHAIN_PAGE_SZ,
3431 		    	bce_dma_map_addr,
3432 	    		&sc->pg_bd_chain_paddr[i],
3433 	    		BUS_DMA_NOWAIT);
3434 
3435 		if (error) {
3436 			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
3437 				__FILE__, __LINE__);
3438 			rc = ENOMEM;
3439 			goto bce_dma_alloc_exit;
3440 		}
3441 
3442 		DBPRINT(sc, BCE_INFO, "%s(): pg_bd_chain_paddr[%d] = 0x%jX\n",
3443 			__FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]);
3444 	}
3445 
3446 	/*
3447 	 * Create a DMA tag for page mbufs.
3448 	 */
3449 	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3450 		MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3451 
3452 	if (bus_dma_tag_create(sc->parent_tag,
3453 			1,
3454 			BCE_DMA_BOUNDARY,
3455 			sc->max_bus_addr,
3456 			BUS_SPACE_MAXADDR,
3457 			NULL, NULL,
3458 			max_size,
3459 			1,
3460 			max_seg_size,
3461 			0,
3462 			NULL, NULL,
3463 	    	&sc->pg_mbuf_tag)) {
3464 		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
3465 			__FILE__, __LINE__);
3466 		rc = ENOMEM;
3467 		goto bce_dma_alloc_exit;
3468 	}
3469 
3470 	/* Create DMA maps for the page mbuf clusters. */
3471 	for (i = 0; i < TOTAL_PG_BD; i++) {
3472 		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3473 				&sc->pg_mbuf_map[i])) {
3474 			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
3475 				__FILE__, __LINE__);
3476 			rc = ENOMEM;
3477 			goto bce_dma_alloc_exit;
3478 		}
3479 	}
3480 #endif
3481 
3482 bce_dma_alloc_exit:
3483 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3484 	return(rc);
3485 }
3486 
3487 
3488 /****************************************************************************/
3489 /* Release all resources used by the driver.                                */
3490 /*                                                                          */
3491 /* Releases all resources acquired by the driver including interrupts,      */
3492 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
3493 /*                                                                          */
3494 /* Returns:                                                                 */
3495 /*   Nothing.                                                               */
3496 /****************************************************************************/
3497 static void
3498 bce_release_resources(struct bce_softc *sc)
3499 {
3500 	device_t dev;
3501 
3502 	DBENTER(BCE_VERBOSE_RESET);
3503 
3504 	dev = sc->bce_dev;
3505 
3506 	bce_dma_free(sc);
3507 
3508 	if (sc->bce_intrhand != NULL) {
3509 		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3510 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3511 	}
3512 
3513 	if (sc->bce_res_irq != NULL) {
3514 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3515 		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3516 			sc->bce_res_irq);
3517 	}
3518 
3519 	if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3520 		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3521 		pci_release_msi(dev);
3522 	}
3523 
3524 	if (sc->bce_res_mem != NULL) {
3525 		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3526 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
3527 	}
3528 
3529 	if (sc->bce_ifp != NULL) {
3530 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3531 		if_free(sc->bce_ifp);
3532 	}
3533 
3534 	if (mtx_initialized(&sc->bce_mtx))
3535 		BCE_LOCK_DESTROY(sc);
3536 
3537 	DBEXIT(BCE_VERBOSE_RESET);
3538 }
3539 
3540 
3541 /****************************************************************************/
3542 /* Firmware synchronization.                                                */
3543 /*                                                                          */
3544 /* Before performing certain events such as a chip reset, synchronize with  */
3545 /* the firmware first.                                                      */
3546 /*                                                                          */
3547 /* Returns:                                                                 */
3548 /*   0 for success, positive value for failure.                             */
3549 /****************************************************************************/
3550 static int
3551 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3552 {
3553 	int i, rc = 0;
3554 	u32 val;
3555 
3556 	DBENTER(BCE_VERBOSE_RESET);
3557 
3558 	/* Don't waste any time if we've timed out before. */
3559 	if (sc->bce_fw_timed_out) {
3560 		rc = EBUSY;
3561 		goto bce_fw_sync_exit;
3562 	}
3563 
3564 	/* Increment the message sequence number. */
3565 	sc->bce_fw_wr_seq++;
3566 	msg_data |= sc->bce_fw_wr_seq;
3567 
3568  	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n",
3569  		msg_data);
3570 
3571 	/* Send the message to the bootcode driver mailbox. */
3572 	bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3573 
3574 	/* Wait for the bootcode to acknowledge the message. */
3575 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3576 		/* Check for a response in the bootcode firmware mailbox. */
3577 		val = bce_shmem_rd(sc, BCE_FW_MB);
3578 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3579 			break;
3580 		DELAY(1000);
3581 	}
3582 
3583 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
3584 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3585 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3586 
3587 		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3588 			"msg_data = 0x%08X\n",
3589 			__FILE__, __LINE__, msg_data);
3590 
3591 		msg_data &= ~BCE_DRV_MSG_CODE;
3592 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3593 
3594 		bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3595 
3596 		sc->bce_fw_timed_out = 1;
3597 		rc = EBUSY;
3598 	}
3599 
3600 bce_fw_sync_exit:
3601 	DBEXIT(BCE_VERBOSE_RESET);
3602 	return (rc);
3603 }
3604 
3605 
3606 /****************************************************************************/
3607 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
3608 /*                                                                          */
3609 /* Returns:                                                                 */
3610 /*   Nothing.                                                               */
3611 /****************************************************************************/
3612 static void
3613 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3614 	u32 rv2p_code_len, u32 rv2p_proc)
3615 {
3616 	int i;
3617 	u32 val;
3618 
3619 	DBENTER(BCE_VERBOSE_RESET);
3620 
3621 	/* Set the page size used by RV2P. */
3622 	if (rv2p_proc == RV2P_PROC2) {
3623 		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3624 	}
3625 
3626 	for (i = 0; i < rv2p_code_len; i += 8) {
3627 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3628 		rv2p_code++;
3629 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3630 		rv2p_code++;
3631 
3632 		if (rv2p_proc == RV2P_PROC1) {
3633 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3634 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3635 		}
3636 		else {
3637 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3638 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3639 		}
3640 	}
3641 
3642 	/* Reset the processor, un-stall is done later. */
3643 	if (rv2p_proc == RV2P_PROC1) {
3644 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3645 	}
3646 	else {
3647 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3648 	}
3649 
3650 	DBEXIT(BCE_VERBOSE_RESET);
3651 }
3652 
3653 
3654 /****************************************************************************/
3655 /* Load RISC processor firmware.                                            */
3656 /*                                                                          */
3657 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
3658 /* associated with a particular processor.                                  */
3659 /*                                                                          */
3660 /* Returns:                                                                 */
3661 /*   Nothing.                                                               */
3662 /****************************************************************************/
3663 static void
3664 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3665 	struct fw_info *fw)
3666 {
3667 	u32 offset;
3668 	u32 val;
3669 
3670 	DBENTER(BCE_VERBOSE_RESET);
3671 
3672 	/* Halt the CPU. */
3673 	val = REG_RD_IND(sc, cpu_reg->mode);
3674 	val |= cpu_reg->mode_value_halt;
3675 	REG_WR_IND(sc, cpu_reg->mode, val);
3676 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3677 
3678 	/* Load the Text area. */
3679 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3680 	if (fw->text) {
3681 		int j;
3682 
3683 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3684 			REG_WR_IND(sc, offset, fw->text[j]);
3685 	        }
3686 	}
3687 
3688 	/* Load the Data area. */
3689 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3690 	if (fw->data) {
3691 		int j;
3692 
3693 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3694 			REG_WR_IND(sc, offset, fw->data[j]);
3695 		}
3696 	}
3697 
3698 	/* Load the SBSS area. */
3699 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3700 	if (fw->sbss) {
3701 		int j;
3702 
3703 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3704 			REG_WR_IND(sc, offset, fw->sbss[j]);
3705 		}
3706 	}
3707 
3708 	/* Load the BSS area. */
3709 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3710 	if (fw->bss) {
3711 		int j;
3712 
3713 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3714 			REG_WR_IND(sc, offset, fw->bss[j]);
3715 		}
3716 	}
3717 
3718 	/* Load the Read-Only area. */
3719 	offset = cpu_reg->spad_base +
3720 		(fw->rodata_addr - cpu_reg->mips_view_base);
3721 	if (fw->rodata) {
3722 		int j;
3723 
3724 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3725 			REG_WR_IND(sc, offset, fw->rodata[j]);
3726 		}
3727 	}
3728 
3729 	/* Clear the pre-fetch instruction. */
3730 	REG_WR_IND(sc, cpu_reg->inst, 0);
3731 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3732 
3733 	/* Start the CPU. */
3734 	val = REG_RD_IND(sc, cpu_reg->mode);
3735 	val &= ~cpu_reg->mode_value_halt;
3736 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3737 	REG_WR_IND(sc, cpu_reg->mode, val);
3738 
3739 	DBEXIT(BCE_VERBOSE_RESET);
3740 }
3741 
3742 
3743 /****************************************************************************/
3744 /* Initialize the RX CPU.                                                   */
3745 /*                                                                          */
3746 /* Returns:                                                                 */
3747 /*   Nothing.                                                               */
3748 /****************************************************************************/
3749 static void
3750 bce_init_rxp_cpu(struct bce_softc *sc)
3751 {
3752 	struct cpu_reg cpu_reg;
3753 	struct fw_info fw;
3754 
3755 	DBENTER(BCE_VERBOSE_RESET);
3756 
3757 	cpu_reg.mode = BCE_RXP_CPU_MODE;
3758 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3759 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3760 	cpu_reg.state = BCE_RXP_CPU_STATE;
3761 	cpu_reg.state_value_clear = 0xffffff;
3762 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3763 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3764 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3765 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3766 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3767 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3768 	cpu_reg.mips_view_base = 0x8000000;
3769 
3770 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3771 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3772  		fw.ver_major = bce_RXP_b09FwReleaseMajor;
3773 		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3774 		fw.ver_fix = bce_RXP_b09FwReleaseFix;
3775 		fw.start_addr = bce_RXP_b09FwStartAddr;
3776 
3777 		fw.text_addr = bce_RXP_b09FwTextAddr;
3778 		fw.text_len = bce_RXP_b09FwTextLen;
3779 		fw.text_index = 0;
3780 		fw.text = bce_RXP_b09FwText;
3781 
3782 		fw.data_addr = bce_RXP_b09FwDataAddr;
3783 		fw.data_len = bce_RXP_b09FwDataLen;
3784 		fw.data_index = 0;
3785 		fw.data = bce_RXP_b09FwData;
3786 
3787 		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3788 		fw.sbss_len = bce_RXP_b09FwSbssLen;
3789 		fw.sbss_index = 0;
3790 		fw.sbss = bce_RXP_b09FwSbss;
3791 
3792 		fw.bss_addr = bce_RXP_b09FwBssAddr;
3793 		fw.bss_len = bce_RXP_b09FwBssLen;
3794 		fw.bss_index = 0;
3795 		fw.bss = bce_RXP_b09FwBss;
3796 
3797 		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3798 		fw.rodata_len = bce_RXP_b09FwRodataLen;
3799 		fw.rodata_index = 0;
3800 		fw.rodata = bce_RXP_b09FwRodata;
3801 	} else {
3802 		fw.ver_major = bce_RXP_b06FwReleaseMajor;
3803 		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3804 		fw.ver_fix = bce_RXP_b06FwReleaseFix;
3805 		fw.start_addr = bce_RXP_b06FwStartAddr;
3806 
3807 		fw.text_addr = bce_RXP_b06FwTextAddr;
3808 		fw.text_len = bce_RXP_b06FwTextLen;
3809 		fw.text_index = 0;
3810 		fw.text = bce_RXP_b06FwText;
3811 
3812 		fw.data_addr = bce_RXP_b06FwDataAddr;
3813 		fw.data_len = bce_RXP_b06FwDataLen;
3814 		fw.data_index = 0;
3815 		fw.data = bce_RXP_b06FwData;
3816 
3817 		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3818 		fw.sbss_len = bce_RXP_b06FwSbssLen;
3819 		fw.sbss_index = 0;
3820 		fw.sbss = bce_RXP_b06FwSbss;
3821 
3822 		fw.bss_addr = bce_RXP_b06FwBssAddr;
3823 		fw.bss_len = bce_RXP_b06FwBssLen;
3824 		fw.bss_index = 0;
3825 		fw.bss = bce_RXP_b06FwBss;
3826 
3827 		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3828 		fw.rodata_len = bce_RXP_b06FwRodataLen;
3829 		fw.rodata_index = 0;
3830 		fw.rodata = bce_RXP_b06FwRodata;
3831 	}
3832 
3833 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3834 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3835 
3836 	DBEXIT(BCE_VERBOSE_RESET);
3837 }
3838 
3839 
3840 /****************************************************************************/
3841 /* Initialize the TX CPU.                                                   */
3842 /*                                                                          */
3843 /* Returns:                                                                 */
3844 /*   Nothing.                                                               */
3845 /****************************************************************************/
3846 static void
3847 bce_init_txp_cpu(struct bce_softc *sc)
3848 {
3849 	struct cpu_reg cpu_reg;
3850 	struct fw_info fw;
3851 
3852 	DBENTER(BCE_VERBOSE_RESET);
3853 
3854 	cpu_reg.mode = BCE_TXP_CPU_MODE;
3855 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3856 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3857 	cpu_reg.state = BCE_TXP_CPU_STATE;
3858 	cpu_reg.state_value_clear = 0xffffff;
3859 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3860 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3861 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3862 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3863 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3864 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3865 	cpu_reg.mips_view_base = 0x8000000;
3866 
3867 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3868 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3869 		fw.ver_major = bce_TXP_b09FwReleaseMajor;
3870 		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3871 		fw.ver_fix = bce_TXP_b09FwReleaseFix;
3872 		fw.start_addr = bce_TXP_b09FwStartAddr;
3873 
3874 		fw.text_addr = bce_TXP_b09FwTextAddr;
3875 		fw.text_len = bce_TXP_b09FwTextLen;
3876 		fw.text_index = 0;
3877 		fw.text = bce_TXP_b09FwText;
3878 
3879 		fw.data_addr = bce_TXP_b09FwDataAddr;
3880 		fw.data_len = bce_TXP_b09FwDataLen;
3881 		fw.data_index = 0;
3882 		fw.data = bce_TXP_b09FwData;
3883 
3884 		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
3885 		fw.sbss_len = bce_TXP_b09FwSbssLen;
3886 		fw.sbss_index = 0;
3887 		fw.sbss = bce_TXP_b09FwSbss;
3888 
3889 		fw.bss_addr = bce_TXP_b09FwBssAddr;
3890 		fw.bss_len = bce_TXP_b09FwBssLen;
3891 		fw.bss_index = 0;
3892 		fw.bss = bce_TXP_b09FwBss;
3893 
3894 		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
3895 		fw.rodata_len = bce_TXP_b09FwRodataLen;
3896 		fw.rodata_index = 0;
3897 		fw.rodata = bce_TXP_b09FwRodata;
3898 	} else {
3899 		fw.ver_major = bce_TXP_b06FwReleaseMajor;
3900 		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3901 		fw.ver_fix = bce_TXP_b06FwReleaseFix;
3902 		fw.start_addr = bce_TXP_b06FwStartAddr;
3903 
3904 		fw.text_addr = bce_TXP_b06FwTextAddr;
3905 		fw.text_len = bce_TXP_b06FwTextLen;
3906 		fw.text_index = 0;
3907 		fw.text = bce_TXP_b06FwText;
3908 
3909 		fw.data_addr = bce_TXP_b06FwDataAddr;
3910 		fw.data_len = bce_TXP_b06FwDataLen;
3911 		fw.data_index = 0;
3912 		fw.data = bce_TXP_b06FwData;
3913 
3914 		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3915 		fw.sbss_len = bce_TXP_b06FwSbssLen;
3916 		fw.sbss_index = 0;
3917 		fw.sbss = bce_TXP_b06FwSbss;
3918 
3919 		fw.bss_addr = bce_TXP_b06FwBssAddr;
3920 		fw.bss_len = bce_TXP_b06FwBssLen;
3921 		fw.bss_index = 0;
3922 		fw.bss = bce_TXP_b06FwBss;
3923 
3924 		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3925 		fw.rodata_len = bce_TXP_b06FwRodataLen;
3926 		fw.rodata_index = 0;
3927 		fw.rodata = bce_TXP_b06FwRodata;
3928 	}
3929 
3930 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3931 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3932 
3933 	DBEXIT(BCE_VERBOSE_RESET);
3934 }
3935 
3936 
3937 /****************************************************************************/
3938 /* Initialize the TPAT CPU.                                                 */
3939 /*                                                                          */
3940 /* Returns:                                                                 */
3941 /*   Nothing.                                                               */
3942 /****************************************************************************/
3943 static void
3944 bce_init_tpat_cpu(struct bce_softc *sc)
3945 {
3946 	struct cpu_reg cpu_reg;
3947 	struct fw_info fw;
3948 
3949 	DBENTER(BCE_VERBOSE_RESET);
3950 
3951 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3952 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3953 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3954 	cpu_reg.state = BCE_TPAT_CPU_STATE;
3955 	cpu_reg.state_value_clear = 0xffffff;
3956 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3957 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3958 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3959 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3960 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3961 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3962 	cpu_reg.mips_view_base = 0x8000000;
3963 
3964 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3965 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3966 		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3967 		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3968 		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3969 		fw.start_addr = bce_TPAT_b09FwStartAddr;
3970 
3971 		fw.text_addr = bce_TPAT_b09FwTextAddr;
3972 		fw.text_len = bce_TPAT_b09FwTextLen;
3973 		fw.text_index = 0;
3974 		fw.text = bce_TPAT_b09FwText;
3975 
3976 		fw.data_addr = bce_TPAT_b09FwDataAddr;
3977 		fw.data_len = bce_TPAT_b09FwDataLen;
3978 		fw.data_index = 0;
3979 		fw.data = bce_TPAT_b09FwData;
3980 
3981 		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3982 		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3983 		fw.sbss_index = 0;
3984 		fw.sbss = bce_TPAT_b09FwSbss;
3985 
3986 		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3987 		fw.bss_len = bce_TPAT_b09FwBssLen;
3988 		fw.bss_index = 0;
3989 		fw.bss = bce_TPAT_b09FwBss;
3990 
3991 		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3992 		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3993 		fw.rodata_index = 0;
3994 		fw.rodata = bce_TPAT_b09FwRodata;
3995 	} else {
3996 		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3997 		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3998 		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3999 		fw.start_addr = bce_TPAT_b06FwStartAddr;
4000 
4001 		fw.text_addr = bce_TPAT_b06FwTextAddr;
4002 		fw.text_len = bce_TPAT_b06FwTextLen;
4003 		fw.text_index = 0;
4004 		fw.text = bce_TPAT_b06FwText;
4005 
4006 		fw.data_addr = bce_TPAT_b06FwDataAddr;
4007 		fw.data_len = bce_TPAT_b06FwDataLen;
4008 		fw.data_index = 0;
4009 		fw.data = bce_TPAT_b06FwData;
4010 
4011 		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
4012 		fw.sbss_len = bce_TPAT_b06FwSbssLen;
4013 		fw.sbss_index = 0;
4014 		fw.sbss = bce_TPAT_b06FwSbss;
4015 
4016 		fw.bss_addr = bce_TPAT_b06FwBssAddr;
4017 		fw.bss_len = bce_TPAT_b06FwBssLen;
4018 		fw.bss_index = 0;
4019 		fw.bss = bce_TPAT_b06FwBss;
4020 
4021 		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
4022 		fw.rodata_len = bce_TPAT_b06FwRodataLen;
4023 		fw.rodata_index = 0;
4024 		fw.rodata = bce_TPAT_b06FwRodata;
4025 	}
4026 
4027 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
4028 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4029 
4030 	DBEXIT(BCE_VERBOSE_RESET);
4031 }
4032 
4033 
4034 /****************************************************************************/
4035 /* Initialize the CP CPU.                                                   */
4036 /*                                                                          */
4037 /* Returns:                                                                 */
4038 /*   Nothing.                                                               */
4039 /****************************************************************************/
4040 static void
4041 bce_init_cp_cpu(struct bce_softc *sc)
4042 {
4043 	struct cpu_reg cpu_reg;
4044 	struct fw_info fw;
4045 
4046 	DBENTER(BCE_VERBOSE_RESET);
4047 
4048 	cpu_reg.mode = BCE_CP_CPU_MODE;
4049 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
4050 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
4051 	cpu_reg.state = BCE_CP_CPU_STATE;
4052 	cpu_reg.state_value_clear = 0xffffff;
4053 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
4054 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
4055 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
4056 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
4057 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
4058 	cpu_reg.spad_base = BCE_CP_SCRATCH;
4059 	cpu_reg.mips_view_base = 0x8000000;
4060 
4061 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4062 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4063 		fw.ver_major = bce_CP_b09FwReleaseMajor;
4064 		fw.ver_minor = bce_CP_b09FwReleaseMinor;
4065 		fw.ver_fix = bce_CP_b09FwReleaseFix;
4066 		fw.start_addr = bce_CP_b09FwStartAddr;
4067 
4068 		fw.text_addr = bce_CP_b09FwTextAddr;
4069 		fw.text_len = bce_CP_b09FwTextLen;
4070 		fw.text_index = 0;
4071 		fw.text = bce_CP_b09FwText;
4072 
4073 		fw.data_addr = bce_CP_b09FwDataAddr;
4074 		fw.data_len = bce_CP_b09FwDataLen;
4075 		fw.data_index = 0;
4076 		fw.data = bce_CP_b09FwData;
4077 
4078 		fw.sbss_addr = bce_CP_b09FwSbssAddr;
4079 		fw.sbss_len = bce_CP_b09FwSbssLen;
4080 		fw.sbss_index = 0;
4081 		fw.sbss = bce_CP_b09FwSbss;
4082 
4083 		fw.bss_addr = bce_CP_b09FwBssAddr;
4084 		fw.bss_len = bce_CP_b09FwBssLen;
4085 		fw.bss_index = 0;
4086 		fw.bss = bce_CP_b09FwBss;
4087 
4088 		fw.rodata_addr = bce_CP_b09FwRodataAddr;
4089 		fw.rodata_len = bce_CP_b09FwRodataLen;
4090 		fw.rodata_index = 0;
4091 		fw.rodata = bce_CP_b09FwRodata;
4092 	} else {
4093 		fw.ver_major = bce_CP_b06FwReleaseMajor;
4094 		fw.ver_minor = bce_CP_b06FwReleaseMinor;
4095 		fw.ver_fix = bce_CP_b06FwReleaseFix;
4096 		fw.start_addr = bce_CP_b06FwStartAddr;
4097 
4098 		fw.text_addr = bce_CP_b06FwTextAddr;
4099 		fw.text_len = bce_CP_b06FwTextLen;
4100 		fw.text_index = 0;
4101 		fw.text = bce_CP_b06FwText;
4102 
4103 		fw.data_addr = bce_CP_b06FwDataAddr;
4104 		fw.data_len = bce_CP_b06FwDataLen;
4105 		fw.data_index = 0;
4106 		fw.data = bce_CP_b06FwData;
4107 
4108 		fw.sbss_addr = bce_CP_b06FwSbssAddr;
4109 		fw.sbss_len = bce_CP_b06FwSbssLen;
4110 		fw.sbss_index = 0;
4111 		fw.sbss = bce_CP_b06FwSbss;
4112 
4113 		fw.bss_addr = bce_CP_b06FwBssAddr;
4114 		fw.bss_len = bce_CP_b06FwBssLen;
4115 		fw.bss_index = 0;
4116 		fw.bss = bce_CP_b06FwBss;
4117 
4118 		fw.rodata_addr = bce_CP_b06FwRodataAddr;
4119 		fw.rodata_len = bce_CP_b06FwRodataLen;
4120 		fw.rodata_index = 0;
4121 		fw.rodata = bce_CP_b06FwRodata;
4122 	}
4123 
4124 	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4125 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4126 
4127 	DBEXIT(BCE_VERBOSE_RESET);
4128 }
4129 
4130 
4131 /****************************************************************************/
4132 /* Initialize the COM CPU.                                                 */
4133 /*                                                                          */
4134 /* Returns:                                                                 */
4135 /*   Nothing.                                                               */
4136 /****************************************************************************/
4137 static void
4138 bce_init_com_cpu(struct bce_softc *sc)
4139 {
4140 	struct cpu_reg cpu_reg;
4141 	struct fw_info fw;
4142 
4143 	DBENTER(BCE_VERBOSE_RESET);
4144 
4145 	cpu_reg.mode = BCE_COM_CPU_MODE;
4146 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4147 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4148 	cpu_reg.state = BCE_COM_CPU_STATE;
4149 	cpu_reg.state_value_clear = 0xffffff;
4150 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4151 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4152 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4153 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4154 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4155 	cpu_reg.spad_base = BCE_COM_SCRATCH;
4156 	cpu_reg.mips_view_base = 0x8000000;
4157 
4158 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4159 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4160 		fw.ver_major = bce_COM_b09FwReleaseMajor;
4161 		fw.ver_minor = bce_COM_b09FwReleaseMinor;
4162 		fw.ver_fix = bce_COM_b09FwReleaseFix;
4163 		fw.start_addr = bce_COM_b09FwStartAddr;
4164 
4165 		fw.text_addr = bce_COM_b09FwTextAddr;
4166 		fw.text_len = bce_COM_b09FwTextLen;
4167 		fw.text_index = 0;
4168 		fw.text = bce_COM_b09FwText;
4169 
4170 		fw.data_addr = bce_COM_b09FwDataAddr;
4171 		fw.data_len = bce_COM_b09FwDataLen;
4172 		fw.data_index = 0;
4173 		fw.data = bce_COM_b09FwData;
4174 
4175 		fw.sbss_addr = bce_COM_b09FwSbssAddr;
4176 		fw.sbss_len = bce_COM_b09FwSbssLen;
4177 		fw.sbss_index = 0;
4178 		fw.sbss = bce_COM_b09FwSbss;
4179 
4180 		fw.bss_addr = bce_COM_b09FwBssAddr;
4181 		fw.bss_len = bce_COM_b09FwBssLen;
4182 		fw.bss_index = 0;
4183 		fw.bss = bce_COM_b09FwBss;
4184 
4185 		fw.rodata_addr = bce_COM_b09FwRodataAddr;
4186 		fw.rodata_len = bce_COM_b09FwRodataLen;
4187 		fw.rodata_index = 0;
4188 		fw.rodata = bce_COM_b09FwRodata;
4189 	} else {
4190 		fw.ver_major = bce_COM_b06FwReleaseMajor;
4191 		fw.ver_minor = bce_COM_b06FwReleaseMinor;
4192 		fw.ver_fix = bce_COM_b06FwReleaseFix;
4193 		fw.start_addr = bce_COM_b06FwStartAddr;
4194 
4195 		fw.text_addr = bce_COM_b06FwTextAddr;
4196 		fw.text_len = bce_COM_b06FwTextLen;
4197 		fw.text_index = 0;
4198 		fw.text = bce_COM_b06FwText;
4199 
4200 		fw.data_addr = bce_COM_b06FwDataAddr;
4201 		fw.data_len = bce_COM_b06FwDataLen;
4202 		fw.data_index = 0;
4203 		fw.data = bce_COM_b06FwData;
4204 
4205 		fw.sbss_addr = bce_COM_b06FwSbssAddr;
4206 		fw.sbss_len = bce_COM_b06FwSbssLen;
4207 		fw.sbss_index = 0;
4208 		fw.sbss = bce_COM_b06FwSbss;
4209 
4210 		fw.bss_addr = bce_COM_b06FwBssAddr;
4211 		fw.bss_len = bce_COM_b06FwBssLen;
4212 		fw.bss_index = 0;
4213 		fw.bss = bce_COM_b06FwBss;
4214 
4215 		fw.rodata_addr = bce_COM_b06FwRodataAddr;
4216 		fw.rodata_len = bce_COM_b06FwRodataLen;
4217 		fw.rodata_index = 0;
4218 		fw.rodata = bce_COM_b06FwRodata;
4219 	}
4220 
4221 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4222 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4223 
4224 	DBEXIT(BCE_VERBOSE_RESET);
4225 }
4226 
4227 
4228 /****************************************************************************/
4229 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
4230 /*                                                                          */
4231 /* Loads the firmware for each CPU and starts the CPU.                      */
4232 /*                                                                          */
4233 /* Returns:                                                                 */
4234 /*   Nothing.                                                               */
4235 /****************************************************************************/
4236 static void
4237 bce_init_cpus(struct bce_softc *sc)
4238 {
4239 	DBENTER(BCE_VERBOSE_RESET);
4240 
4241 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4242 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4243 
4244 		if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
4245 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
4246 				sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
4247 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
4248 				sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
4249 		} else {
4250 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
4251 				sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
4252 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
4253 				sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
4254 		}
4255 
4256 	} else {
4257 		bce_load_rv2p_fw(sc, bce_rv2p_proc1,
4258 			sizeof(bce_rv2p_proc1),	RV2P_PROC1);
4259 		bce_load_rv2p_fw(sc, bce_rv2p_proc2,
4260 			sizeof(bce_rv2p_proc2),	RV2P_PROC2);
4261 	}
4262 
4263 	bce_init_rxp_cpu(sc);
4264 	bce_init_txp_cpu(sc);
4265 	bce_init_tpat_cpu(sc);
4266 	bce_init_com_cpu(sc);
4267 	bce_init_cp_cpu(sc);
4268 
4269 	DBEXIT(BCE_VERBOSE_RESET);
4270 }
4271 
4272 
4273 /****************************************************************************/
4274 /* Initialize context memory.                                               */
4275 /*                                                                          */
4276 /* Clears the memory associated with each Context ID (CID).                 */
4277 /*                                                                          */
4278 /* Returns:                                                                 */
4279 /*   Nothing.                                                               */
4280 /****************************************************************************/
4281 static void
4282 bce_init_ctx(struct bce_softc *sc)
4283 {
4284 
4285 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4286 
4287 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4288 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4289 		int i, retry_cnt = CTX_INIT_RETRY_COUNT;
4290 		u32 val;
4291 
4292 		DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4293 
4294 		/*
4295 		 * BCM5709 context memory may be cached
4296 		 * in host memory so prepare the host memory
4297 		 * for access.
4298 		 */
4299 		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4300 		val |= (BCM_PAGE_BITS - 8) << 16;
4301 		REG_WR(sc, BCE_CTX_COMMAND, val);
4302 
4303 		/* Wait for mem init command to complete. */
4304 		for (i = 0; i < retry_cnt; i++) {
4305 			val = REG_RD(sc, BCE_CTX_COMMAND);
4306 			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4307 				break;
4308 			DELAY(2);
4309 		}
4310 
4311 		/* ToDo: Consider returning an error here. */
4312 		DBRUNIF((val & BCE_CTX_COMMAND_MEM_INIT),
4313 			BCE_PRINTF("%s(): Context memory initialization failed!\n",
4314 			__FUNCTION__));
4315 
4316 		for (i = 0; i < sc->ctx_pages; i++) {
4317 			int j;
4318 
4319 			/* Set the physical address of the context memory cache. */
4320 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4321 				BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4322 				BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4323 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4324 				BCE_ADDR_HI(sc->ctx_paddr[i]));
4325 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4326 				BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4327 
4328 			/* Verify that the context memory write was successful. */
4329 			for (j = 0; j < retry_cnt; j++) {
4330 				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4331 				if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4332 					break;
4333 				DELAY(5);
4334 			}
4335 
4336 			/* ToDo: Consider returning an error here. */
4337 			DBRUNIF((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ),
4338 				BCE_PRINTF("%s(): Failed to initialize context page %d!\n",
4339 				__FUNCTION__, i));
4340 		}
4341 	} else {
4342 		u32 vcid_addr, offset;
4343 
4344 		DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4345 
4346 		/*
4347 		 * For the 5706/5708, context memory is local to
4348 		 * the controller, so initialize the controller
4349 		 * context memory.
4350 		 */
4351 
4352 		vcid_addr = GET_CID_ADDR(96);
4353 		while (vcid_addr) {
4354 
4355 			vcid_addr -= PHY_CTX_SIZE;
4356 
4357 			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4358 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4359 
4360             for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4361                 CTX_WR(sc, 0x00, offset, 0);
4362             }
4363 
4364 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4365 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4366 		}
4367 
4368 	}
4369 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4370 }
4371 
4372 
4373 /****************************************************************************/
4374 /* Fetch the permanent MAC address of the controller.                       */
4375 /*                                                                          */
4376 /* Returns:                                                                 */
4377 /*   Nothing.                                                               */
4378 /****************************************************************************/
4379 static void
4380 bce_get_mac_addr(struct bce_softc *sc)
4381 {
4382 	u32 mac_lo = 0, mac_hi = 0;
4383 
4384 	DBENTER(BCE_VERBOSE_RESET);
4385 	/*
4386 	 * The NetXtreme II bootcode populates various NIC
4387 	 * power-on and runtime configuration items in a
4388 	 * shared memory area.  The factory configured MAC
4389 	 * address is available from both NVRAM and the
4390 	 * shared memory area so we'll read the value from
4391 	 * shared memory for speed.
4392 	 */
4393 
4394 	mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
4395 	mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
4396 
4397 	if ((mac_lo == 0) && (mac_hi == 0)) {
4398 		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4399 			__FILE__, __LINE__);
4400 	} else {
4401 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
4402 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
4403 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
4404 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
4405 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
4406 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
4407 	}
4408 
4409 	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
4410 	DBEXIT(BCE_VERBOSE_RESET);
4411 }
4412 
4413 
4414 /****************************************************************************/
4415 /* Program the MAC address.                                                 */
4416 /*                                                                          */
4417 /* Returns:                                                                 */
4418 /*   Nothing.                                                               */
4419 /****************************************************************************/
4420 static void
4421 bce_set_mac_addr(struct bce_softc *sc)
4422 {
4423 	u32 val;
4424 	u8 *mac_addr = sc->eaddr;
4425 
4426 	/* ToDo: Add support for setting multiple MAC addresses. */
4427 
4428 	DBENTER(BCE_VERBOSE_RESET);
4429 	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
4430 
4431 	val = (mac_addr[0] << 8) | mac_addr[1];
4432 
4433 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4434 
4435 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4436 		(mac_addr[4] << 8) | mac_addr[5];
4437 
4438 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4439 
4440 	DBEXIT(BCE_VERBOSE_RESET);
4441 }
4442 
4443 
4444 /****************************************************************************/
4445 /* Stop the controller.                                                     */
4446 /*                                                                          */
4447 /* Returns:                                                                 */
4448 /*   Nothing.                                                               */
4449 /****************************************************************************/
4450 static void
4451 bce_stop(struct bce_softc *sc)
4452 {
4453 	struct ifnet *ifp;
4454 	struct ifmedia_entry *ifm;
4455 	struct mii_data *mii = NULL;
4456 	int mtmp, itmp;
4457 
4458 	DBENTER(BCE_VERBOSE_RESET);
4459 
4460 	BCE_LOCK_ASSERT(sc);
4461 
4462 	ifp = sc->bce_ifp;
4463 
4464 	mii = device_get_softc(sc->bce_miibus);
4465 
4466 	callout_stop(&sc->bce_tick_callout);
4467 
4468 	/* Disable the transmit/receive blocks. */
4469 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4470 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4471 	DELAY(20);
4472 
4473 	bce_disable_intr(sc);
4474 
4475 	/* Free RX buffers. */
4476 #ifdef BCE_JUMBO_HDRSPLIT
4477 	bce_free_pg_chain(sc);
4478 #endif
4479 	bce_free_rx_chain(sc);
4480 
4481 	/* Free TX buffers. */
4482 	bce_free_tx_chain(sc);
4483 
4484 	/*
4485 	 * Isolate/power down the PHY, but leave the media selection
4486 	 * unchanged so that things will be put back to normal when
4487 	 * we bring the interface back up.
4488 	 */
4489 
4490 	itmp = ifp->if_flags;
4491 	ifp->if_flags |= IFF_UP;
4492 
4493 	/* If we are called from bce_detach(), mii is already NULL. */
4494 	if (mii != NULL) {
4495 		ifm = mii->mii_media.ifm_cur;
4496 		mtmp = ifm->ifm_media;
4497 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
4498 		mii_mediachg(mii);
4499 		ifm->ifm_media = mtmp;
4500 	}
4501 
4502 	ifp->if_flags = itmp;
4503 	sc->watchdog_timer = 0;
4504 
4505 	sc->bce_link = 0;
4506 
4507 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4508 
4509 	DBEXIT(BCE_VERBOSE_RESET);
4510 }
4511 
4512 
4513 static int
4514 bce_reset(struct bce_softc *sc, u32 reset_code)
4515 {
4516 	u32 val;
4517 	int i, rc = 0;
4518 
4519 	DBENTER(BCE_VERBOSE_RESET);
4520 
4521 	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4522 		__FUNCTION__, reset_code);
4523 
4524 	/* Wait for pending PCI transactions to complete. */
4525 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4526 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4527 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4528 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4529 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4530 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4531 	DELAY(5);
4532 
4533 	/* Disable DMA */
4534 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4535 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4536 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4537 		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4538 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4539 	}
4540 
4541 	/* Assume bootcode is running. */
4542 	sc->bce_fw_timed_out = 0;
4543 
4544 	/* Give the firmware a chance to prepare for the reset. */
4545 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4546 	if (rc)
4547 		goto bce_reset_exit;
4548 
4549 	/* Set a firmware reminder that this is a soft reset. */
4550 	bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
4551 
4552 	/* Dummy read to force the chip to complete all current transactions. */
4553 	val = REG_RD(sc, BCE_MISC_ID);
4554 
4555 	/* Chip reset. */
4556 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4557 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4558 		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4559 		REG_RD(sc, BCE_MISC_COMMAND);
4560 		DELAY(5);
4561 
4562 		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4563 		      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4564 
4565 		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4566 	} else {
4567 		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4568 			BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4569 			BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4570 		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4571 
4572 		/* Allow up to 30us for reset to complete. */
4573 		for (i = 0; i < 10; i++) {
4574 			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4575 			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4576 				BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4577 				break;
4578 			}
4579 			DELAY(10);
4580 		}
4581 
4582 		/* Check that reset completed successfully. */
4583 		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4584 			BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4585 			BCE_PRINTF("%s(%d): Reset failed!\n",
4586 				__FILE__, __LINE__);
4587 			rc = EBUSY;
4588 			goto bce_reset_exit;
4589 		}
4590 	}
4591 
4592 	/* Make sure byte swapping is properly configured. */
4593 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4594 	if (val != 0x01020304) {
4595 		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4596 			__FILE__, __LINE__);
4597 		rc = ENODEV;
4598 		goto bce_reset_exit;
4599 	}
4600 
4601 	/* Just completed a reset, assume that firmware is running again. */
4602 	sc->bce_fw_timed_out = 0;
4603 
4604 	/* Wait for the firmware to finish its initialization. */
4605 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4606 	if (rc)
4607 		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
4608 			__FILE__, __LINE__);
4609 
4610 bce_reset_exit:
4611 	DBEXIT(BCE_VERBOSE_RESET);
4612 	return (rc);
4613 }
4614 
4615 
4616 static int
4617 bce_chipinit(struct bce_softc *sc)
4618 {
4619 	u32 val;
4620 	int rc = 0;
4621 
4622 	DBENTER(BCE_VERBOSE_RESET);
4623 
4624 	bce_disable_intr(sc);
4625 
4626 	/*
4627 	 * Initialize DMA byte/word swapping, configure the number of DMA
4628 	 * channels and PCI clock compensation delay.
4629 	 */
4630 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4631 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
4632 #if BYTE_ORDER == BIG_ENDIAN
4633 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4634 #endif
4635 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4636 	      DMA_READ_CHANS << 12 |
4637 	      DMA_WRITE_CHANS << 16;
4638 
4639 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4640 
4641 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4642 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4643 
4644 	/*
4645 	 * This setting resolves a problem observed on certain Intel PCI
4646 	 * chipsets that cannot handle multiple outstanding DMA operations.
4647 	 * See errata E9_5706A1_65.
4648 	 */
4649 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4650 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4651 	    !(sc->bce_flags & BCE_PCIX_FLAG))
4652 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4653 
4654 	REG_WR(sc, BCE_DMA_CONFIG, val);
4655 
4656 	/* Enable the RX_V2P and Context state machines before access. */
4657 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4658 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4659 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4660 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4661 
4662 	/* Initialize context mapping and zero out the quick contexts. */
4663 	bce_init_ctx(sc);
4664 
4665 	/* Initialize the on-boards CPUs */
4666 	bce_init_cpus(sc);
4667 
4668 	/* Prepare NVRAM for access. */
4669 	if (bce_init_nvram(sc)) {
4670 		rc = ENODEV;
4671 		goto bce_chipinit_exit;
4672 	}
4673 
4674 	/* Set the kernel bypass block size */
4675 	val = REG_RD(sc, BCE_MQ_CONFIG);
4676 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4677 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4678 
4679 	/* Enable bins used on the 5709. */
4680 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4681 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4682 		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4683 		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4684 			val |= BCE_MQ_CONFIG_HALT_DIS;
4685 	}
4686 
4687 	REG_WR(sc, BCE_MQ_CONFIG, val);
4688 
4689 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4690 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4691 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4692 
4693 	/* Set the page size and clear the RV2P processor stall bits. */
4694 	val = (BCM_PAGE_BITS - 8) << 24;
4695 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4696 
4697 	/* Configure page size. */
4698 	val = REG_RD(sc, BCE_TBDR_CONFIG);
4699 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4700 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4701 	REG_WR(sc, BCE_TBDR_CONFIG, val);
4702 
4703 	/* Set the perfect match control register to default. */
4704 	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4705 
4706 bce_chipinit_exit:
4707 	DBEXIT(BCE_VERBOSE_RESET);
4708 
4709 	return(rc);
4710 }
4711 
4712 
4713 /****************************************************************************/
4714 /* Initialize the controller in preparation to send/receive traffic.        */
4715 /*                                                                          */
4716 /* Returns:                                                                 */
4717 /*   0 for success, positive value for failure.                             */
4718 /****************************************************************************/
4719 static int
4720 bce_blockinit(struct bce_softc *sc)
4721 {
4722 	u32 reg, val;
4723 	int rc = 0;
4724 
4725 	DBENTER(BCE_VERBOSE_RESET);
4726 
4727 	/* Load the hardware default MAC address. */
4728 	bce_set_mac_addr(sc);
4729 
4730 	/* Set the Ethernet backoff seed value */
4731 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
4732 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
4733 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
4734 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4735 
4736 	sc->last_status_idx = 0;
4737 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4738 
4739 	/* Set up link change interrupt generation. */
4740 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4741 
4742 	/* Program the physical address of the status block. */
4743 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4744 		BCE_ADDR_LO(sc->status_block_paddr));
4745 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4746 		BCE_ADDR_HI(sc->status_block_paddr));
4747 
4748 	/* Program the physical address of the statistics block. */
4749 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4750 		BCE_ADDR_LO(sc->stats_block_paddr));
4751 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4752 		BCE_ADDR_HI(sc->stats_block_paddr));
4753 
4754 	/* Program various host coalescing parameters. */
4755 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4756 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4757 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4758 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4759 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4760 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4761 	REG_WR(sc, BCE_HC_TX_TICKS,
4762 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4763 	REG_WR(sc, BCE_HC_RX_TICKS,
4764 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4765 	REG_WR(sc, BCE_HC_COM_TICKS,
4766 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4767 	REG_WR(sc, BCE_HC_CMD_TICKS,
4768 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4769 	REG_WR(sc, BCE_HC_STATS_TICKS,
4770 		(sc->bce_stats_ticks & 0xffff00));
4771 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4772 
4773 	/* Configure the Host Coalescing block. */
4774 	val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4775 		      BCE_HC_CONFIG_COLLECT_STATS;
4776 
4777 #if 0
4778 	/* ToDo: Add MSI-X support. */
4779 	if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4780 		u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4781 			   BCE_HC_SB_CONFIG_1;
4782 
4783 		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4784 
4785 		REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4786 			BCE_HC_SB_CONFIG_1_ONE_SHOT);
4787 
4788 		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4789 			(sc->tx_quick_cons_trip_int << 16) |
4790 			 sc->tx_quick_cons_trip);
4791 
4792 		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4793 			(sc->tx_ticks_int << 16) | sc->tx_ticks);
4794 
4795 		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4796 	}
4797 
4798 	/*
4799 	 * Tell the HC block to automatically set the
4800 	 * INT_MASK bit after an MSI/MSI-X interrupt
4801 	 * is generated so the driver doesn't have to.
4802 	 */
4803 	if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4804 		val |= BCE_HC_CONFIG_ONE_SHOT;
4805 
4806 	/* Set the MSI-X status blocks to 128 byte boundaries. */
4807 	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4808 		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4809 #endif
4810 
4811 	REG_WR(sc, BCE_HC_CONFIG, val);
4812 
4813 	/* Clear the internal statistics counters. */
4814 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4815 
4816 	/* Verify that bootcode is running. */
4817 	reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
4818 
4819 	DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
4820 		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4821 			__FILE__, __LINE__);
4822 		reg = 0);
4823 
4824 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4825 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
4826 		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4827 			"Expected: 08%08X\n", __FILE__, __LINE__,
4828 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4829 			BCE_DEV_INFO_SIGNATURE_MAGIC);
4830 		rc = ENODEV;
4831 		goto bce_blockinit_exit;
4832 	}
4833 
4834 	/* Enable DMA */
4835 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4836 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4837 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4838 		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4839 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4840 	}
4841 
4842 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
4843 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
4844 
4845 	/* Enable link state change interrupt generation. */
4846 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4847 
4848 	/* Enable all remaining blocks in the MAC. */
4849 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
4850 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4851 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI);
4852 	else
4853 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4854 
4855 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4856 	DELAY(20);
4857 
4858 	/* Save the current host coalescing block settings. */
4859 	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4860 
4861 bce_blockinit_exit:
4862 	DBEXIT(BCE_VERBOSE_RESET);
4863 
4864 	return (rc);
4865 }
4866 
4867 
4868 /****************************************************************************/
4869 /* Encapsulate an mbuf into the rx_bd chain.                                */
4870 /*                                                                          */
4871 /* Returns:                                                                 */
4872 /*   0 for success, positive value for failure.                             */
4873 /****************************************************************************/
4874 static int
4875 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4876 	u16 *chain_prod, u32 *prod_bseq)
4877 {
4878 	bus_dmamap_t map;
4879 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4880 	struct mbuf *m_new = NULL;
4881 	struct rx_bd *rxbd;
4882 	int nsegs, error, rc = 0;
4883 #ifdef BCE_DEBUG
4884 	u16 debug_chain_prod = *chain_prod;
4885 #endif
4886 
4887 	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4888 
4889 	/* Make sure the inputs are valid. */
4890 	DBRUNIF((*chain_prod > MAX_RX_BD),
4891 		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
4892 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
4893 
4894 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
4895 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4896 
4897 	/* Update some debug statistic counters */
4898 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4899 		sc->rx_low_watermark = sc->free_rx_bd);
4900 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4901 
4902 	/* Check whether this is a new mbuf allocation. */
4903 	if (m == NULL) {
4904 
4905 		/* Simulate an mbuf allocation failure. */
4906 		DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
4907 			sc->mbuf_alloc_failed_count++;
4908 			sc->mbuf_alloc_failed_sim_count++;
4909 			rc = ENOBUFS;
4910 			goto bce_get_rx_buf_exit);
4911 
4912 		/* This is a new mbuf allocation. */
4913 #ifdef BCE_JUMBO_HDRSPLIT
4914 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
4915 #else
4916 		if (sc->rx_bd_mbuf_alloc_size <= MCLBYTES)
4917 			m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4918 		else
4919 			m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size);
4920 #endif
4921 
4922 		if (m_new == NULL) {
4923 			sc->mbuf_alloc_failed_count++;
4924 			rc = ENOBUFS;
4925 			goto bce_get_rx_buf_exit;
4926 		}
4927 
4928 		DBRUN(sc->debug_rx_mbuf_alloc++);
4929 	} else {
4930 		/* Reuse an existing mbuf. */
4931 		m_new = m;
4932 	}
4933 
4934 	/* Make sure we have a valid packet header. */
4935 	M_ASSERTPKTHDR(m_new);
4936 
4937 	/* Initialize the mbuf size and pad if necessary for alignment. */
4938 	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
4939 	m_adj(m_new, sc->rx_bd_mbuf_align_pad);
4940 
4941 	/* ToDo: Consider calling m_fragment() to test error handling. */
4942 
4943 	/* Map the mbuf cluster into device memory. */
4944 	map = sc->rx_mbuf_map[*chain_prod];
4945 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
4946 	    segs, &nsegs, BUS_DMA_NOWAIT);
4947 
4948 	/* Handle any mapping errors. */
4949 	if (error) {
4950 		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n",
4951 			__FILE__, __LINE__, error);
4952 
4953 		sc->dma_map_addr_rx_failed_count++;
4954 		m_freem(m_new);
4955 
4956 		DBRUN(sc->debug_rx_mbuf_alloc--);
4957 
4958 		rc = ENOBUFS;
4959 		goto bce_get_rx_buf_exit;
4960 	}
4961 
4962 	/* All mbufs must map to a single segment. */
4963 	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
4964 		 __FUNCTION__, nsegs));
4965 
4966 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
4967 
4968 	/* Setup the rx_bd for the segment. */
4969 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
4970 
4971 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
4972 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
4973 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
4974 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
4975 	*prod_bseq += segs[0].ds_len;
4976 
4977 	/* Save the mbuf and update our counter. */
4978 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
4979 	sc->free_rx_bd -= nsegs;
4980 
4981 	DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
4982 		nsegs));
4983 
4984 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
4985 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4986 
4987 bce_get_rx_buf_exit:
4988 	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4989 
4990 	return(rc);
4991 }
4992 
4993 
4994 #ifdef BCE_JUMBO_HDRSPLIT
4995 /****************************************************************************/
4996 /* Encapsulate an mbuf cluster into the page chain.                        */
4997 /*                                                                          */
4998 /* Returns:                                                                 */
4999 /*   0 for success, positive value for failure.                             */
5000 /****************************************************************************/
5001 static int
5002 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5003 	u16 *prod_idx)
5004 {
5005 	bus_dmamap_t map;
5006 	bus_addr_t busaddr;
5007 	struct mbuf *m_new = NULL;
5008 	struct rx_bd *pgbd;
5009 	int error, rc = 0;
5010 #ifdef BCE_DEBUG
5011 	u16 debug_prod_idx = *prod_idx;
5012 #endif
5013 
5014 	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5015 
5016 	/* Make sure the inputs are valid. */
5017 	DBRUNIF((*prod_idx > MAX_PG_BD),
5018 		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
5019 		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
5020 
5021 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5022 		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5023 
5024 	/* Update counters if we've hit a new low or run out of pages. */
5025 	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
5026 		sc->pg_low_watermark = sc->free_pg_bd);
5027 	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
5028 
5029 	/* Check whether this is a new mbuf allocation. */
5030 	if (m == NULL) {
5031 
5032 		/* Simulate an mbuf allocation failure. */
5033 		DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5034 			sc->mbuf_alloc_failed_count++;
5035 			sc->mbuf_alloc_failed_sim_count++;
5036 			rc = ENOBUFS;
5037 			goto bce_get_pg_buf_exit);
5038 
5039 		/* This is a new mbuf allocation. */
5040 		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
5041 		if (m_new == NULL) {
5042 			sc->mbuf_alloc_failed_count++;
5043 			rc = ENOBUFS;
5044 			goto bce_get_pg_buf_exit;
5045 		}
5046 
5047 		DBRUN(sc->debug_pg_mbuf_alloc++);
5048 	} else {
5049 		/* Reuse an existing mbuf. */
5050 		m_new = m;
5051 		m_new->m_data = m_new->m_ext.ext_buf;
5052 	}
5053 
5054 	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
5055 
5056 	/* ToDo: Consider calling m_fragment() to test error handling. */
5057 
5058 	/* Map the mbuf cluster into device memory. */
5059 	map = sc->pg_mbuf_map[*prod_idx];
5060 	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
5061 	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
5062 
5063 	/* Handle any mapping errors. */
5064 	if (error) {
5065 		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
5066 			__FILE__, __LINE__);
5067 
5068 		m_freem(m_new);
5069 		DBRUN(sc->debug_pg_mbuf_alloc--);
5070 
5071 		rc = ENOBUFS;
5072 		goto bce_get_pg_buf_exit;
5073 	}
5074 
5075 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5076 
5077 	/*
5078 	 * The page chain uses the same rx_bd data structure
5079 	 * as the receive chain but doesn't require a byte sequence (bseq).
5080 	 */
5081 	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
5082 
5083 	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
5084 	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
5085 	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
5086 	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5087 
5088 	/* Save the mbuf and update our counter. */
5089 	sc->pg_mbuf_ptr[*prod_idx] = m_new;
5090 	sc->free_pg_bd--;
5091 
5092 	DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
5093 		1));
5094 
5095 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5096 		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5097 
5098 bce_get_pg_buf_exit:
5099 	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5100 
5101 	return(rc);
5102 }
5103 #endif /* BCE_JUMBO_HDRSPLIT */
5104 
5105 /****************************************************************************/
5106 /* Initialize the TX context memory.                                        */
5107 /*                                                                          */
5108 /* Returns:                                                                 */
5109 /*   Nothing                                                                */
5110 /****************************************************************************/
5111 static void
5112 bce_init_tx_context(struct bce_softc *sc)
5113 {
5114 	u32 val;
5115 
5116 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5117 
5118 	/* Initialize the context ID for an L2 TX chain. */
5119 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5120 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5121 		/* Set the CID type to support an L2 connection. */
5122 		val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5123 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5124 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5125 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val);
5126 
5127 		/* Point the hardware to the first page in the chain. */
5128 		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5129 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5130 		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5131 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5132 	} else {
5133 		/* Set the CID type to support an L2 connection. */
5134 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5135 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5136 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5137 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5138 
5139 		/* Point the hardware to the first page in the chain. */
5140 		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5141 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5142 		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5143 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5144 	}
5145 
5146 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5147 }
5148 
5149 
5150 /****************************************************************************/
5151 /* Allocate memory and initialize the TX data structures.                   */
5152 /*                                                                          */
5153 /* Returns:                                                                 */
5154 /*   0 for success, positive value for failure.                             */
5155 /****************************************************************************/
5156 static int
5157 bce_init_tx_chain(struct bce_softc *sc)
5158 {
5159 	struct tx_bd *txbd;
5160 	int i, rc = 0;
5161 
5162 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5163 
5164 	/* Set the initial TX producer/consumer indices. */
5165 	sc->tx_prod        = 0;
5166 	sc->tx_cons        = 0;
5167 	sc->tx_prod_bseq   = 0;
5168 	sc->used_tx_bd     = 0;
5169 	sc->max_tx_bd      = USABLE_TX_BD;
5170 	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
5171 	DBRUN(sc->tx_full_count = 0);
5172 
5173 	/*
5174 	 * The NetXtreme II supports a linked-list structre called
5175 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
5176 	 * consists of a series of 1 or more chain pages, each of which
5177 	 * consists of a fixed number of BD entries.
5178 	 * The last BD entry on each page is a pointer to the next page
5179 	 * in the chain, and the last pointer in the BD chain
5180 	 * points back to the beginning of the chain.
5181 	 */
5182 
5183 	/* Set the TX next pointer chain entries. */
5184 	for (i = 0; i < TX_PAGES; i++) {
5185 		int j;
5186 
5187 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5188 
5189 		/* Check if we've reached the last page. */
5190 		if (i == (TX_PAGES - 1))
5191 			j = 0;
5192 		else
5193 			j = i + 1;
5194 
5195 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5196 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5197 	}
5198 
5199 	bce_init_tx_context(sc);
5200 
5201 	DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5202 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5203 
5204 	return(rc);
5205 }
5206 
5207 
5208 /****************************************************************************/
5209 /* Free memory and clear the TX data structures.                            */
5210 /*                                                                          */
5211 /* Returns:                                                                 */
5212 /*   Nothing.                                                               */
5213 /****************************************************************************/
5214 static void
5215 bce_free_tx_chain(struct bce_softc *sc)
5216 {
5217 	int i;
5218 
5219 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5220 
5221 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5222 	for (i = 0; i < TOTAL_TX_BD; i++) {
5223 		if (sc->tx_mbuf_ptr[i] != NULL) {
5224 			if (sc->tx_mbuf_map[i] != NULL)
5225 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
5226 					BUS_DMASYNC_POSTWRITE);
5227 			m_freem(sc->tx_mbuf_ptr[i]);
5228 			sc->tx_mbuf_ptr[i] = NULL;
5229 			DBRUN(sc->debug_tx_mbuf_alloc--);
5230 		}
5231 	}
5232 
5233 	/* Clear each TX chain page. */
5234 	for (i = 0; i < TX_PAGES; i++)
5235 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5236 
5237 	sc->used_tx_bd     = 0;
5238 
5239 	/* Check if we lost any mbufs in the process. */
5240 	DBRUNIF((sc->debug_tx_mbuf_alloc),
5241 		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5242 			"from tx chain!\n",
5243 			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
5244 
5245 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5246 }
5247 
5248 
5249 /****************************************************************************/
5250 /* Initialize the RX context memory.                                        */
5251 /*                                                                          */
5252 /* Returns:                                                                 */
5253 /*   Nothing                                                                */
5254 /****************************************************************************/
5255 static void
5256 bce_init_rx_context(struct bce_softc *sc)
5257 {
5258 	u32 val;
5259 
5260 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5261 
5262 	/* Initialize the type, size, and BD cache levels for the RX context. */
5263 	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5264 		BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5265 		(0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5266 
5267 	/*
5268 	 * Set the level for generating pause frames
5269 	 * when the number of available rx_bd's gets
5270 	 * too low (the low watermark) and the level
5271 	 * when pause frames can be stopped (the high
5272 	 * watermark).
5273 	 */
5274 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5275 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5276 		u32 lo_water, hi_water;
5277 
5278 		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5279 		hi_water = USABLE_RX_BD / 4;
5280 
5281 		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5282 		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5283 
5284 		if (hi_water > 0xf)
5285 			hi_water = 0xf;
5286 		else if (hi_water == 0)
5287 			lo_water = 0;
5288 		val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5289 			(hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5290 	}
5291 
5292  	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5293 
5294 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5295 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5296 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5297 		val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5298 		REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5299 	}
5300 
5301 	/* Point the hardware to the first page in the chain. */
5302 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5303 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5304 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5305 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5306 
5307 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5308 }
5309 
5310 
5311 /****************************************************************************/
5312 /* Allocate memory and initialize the RX data structures.                   */
5313 /*                                                                          */
5314 /* Returns:                                                                 */
5315 /*   0 for success, positive value for failure.                             */
5316 /****************************************************************************/
5317 static int
5318 bce_init_rx_chain(struct bce_softc *sc)
5319 {
5320 	struct rx_bd *rxbd;
5321 	int i, rc = 0;
5322 
5323 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5324 		BCE_VERBOSE_CTX);
5325 
5326 	/* Initialize the RX producer and consumer indices. */
5327 	sc->rx_prod        = 0;
5328 	sc->rx_cons        = 0;
5329 	sc->rx_prod_bseq   = 0;
5330 	sc->free_rx_bd     = USABLE_RX_BD;
5331 	sc->max_rx_bd      = USABLE_RX_BD;
5332 	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
5333 	DBRUN(sc->rx_empty_count = 0);
5334 
5335 	/* Initialize the RX next pointer chain entries. */
5336 	for (i = 0; i < RX_PAGES; i++) {
5337 		int j;
5338 
5339 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5340 
5341 		/* Check if we've reached the last page. */
5342 		if (i == (RX_PAGES - 1))
5343 			j = 0;
5344 		else
5345 			j = i + 1;
5346 
5347 		/* Setup the chain page pointers. */
5348 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5349 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5350 	}
5351 
5352 	/* Fill up the RX chain. */
5353 	bce_fill_rx_chain(sc);
5354 
5355 	for (i = 0; i < RX_PAGES; i++) {
5356 		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
5357 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5358 	}
5359 
5360 	bce_init_rx_context(sc);
5361 
5362 	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
5363 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5364 		BCE_VERBOSE_CTX);
5365 	/* ToDo: Are there possible failure modes here? */
5366 	return(rc);
5367 }
5368 
5369 
5370 /****************************************************************************/
5371 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
5372 /* occurs.                                                                  */
5373 /*                                                                          */
5374 /* Returns:                                                                 */
5375 /*   Nothing                                                                */
5376 /****************************************************************************/
5377 static void
5378 bce_fill_rx_chain(struct bce_softc *sc)
5379 {
5380 	u16 prod, prod_idx;
5381 	u32 prod_bseq;
5382 
5383 	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5384 		BCE_VERBOSE_CTX);
5385 
5386 	/* Get the RX chain producer indices. */
5387 	prod      = sc->rx_prod;
5388 	prod_bseq = sc->rx_prod_bseq;
5389 
5390 	/* Keep filling the RX chain until it's full. */
5391 	while (sc->free_rx_bd > 0) {
5392 		prod_idx = RX_CHAIN_IDX(prod);
5393 		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5394 			/* Bail out if we can't add an mbuf to the chain. */
5395 			break;
5396 		}
5397 		prod = NEXT_RX_BD(prod);
5398 	}
5399 
5400 	/* Save the RX chain producer indices. */
5401 	sc->rx_prod      = prod;
5402 	sc->rx_prod_bseq = prod_bseq;
5403 
5404 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5405 		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5406 		__FUNCTION__, sc->rx_prod));
5407 
5408 	/* Write the mailbox and tell the chip about the waiting rx_bd's. */
5409 	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
5410 		sc->rx_prod);
5411 	REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
5412 		sc->rx_prod_bseq);
5413 
5414 	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5415 		BCE_VERBOSE_CTX);
5416 }
5417 
5418 
5419 /****************************************************************************/
5420 /* Free memory and clear the RX data structures.                            */
5421 /*                                                                          */
5422 /* Returns:                                                                 */
5423 /*   Nothing.                                                               */
5424 /****************************************************************************/
5425 static void
5426 bce_free_rx_chain(struct bce_softc *sc)
5427 {
5428 	int i;
5429 
5430 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5431 
5432 	/* Free any mbufs still in the RX mbuf chain. */
5433 	for (i = 0; i < TOTAL_RX_BD; i++) {
5434 		if (sc->rx_mbuf_ptr[i] != NULL) {
5435 			if (sc->rx_mbuf_map[i] != NULL)
5436 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
5437 					BUS_DMASYNC_POSTREAD);
5438 			m_freem(sc->rx_mbuf_ptr[i]);
5439 			sc->rx_mbuf_ptr[i] = NULL;
5440 			DBRUN(sc->debug_rx_mbuf_alloc--);
5441 		}
5442 	}
5443 
5444 	/* Clear each RX chain page. */
5445 	for (i = 0; i < RX_PAGES; i++)
5446 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
5447 
5448 	sc->free_rx_bd = sc->max_rx_bd;
5449 
5450 	/* Check if we lost any mbufs in the process. */
5451 	DBRUNIF((sc->debug_rx_mbuf_alloc),
5452 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5453 			__FUNCTION__, sc->debug_rx_mbuf_alloc));
5454 
5455 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5456 }
5457 
5458 
5459 #ifdef BCE_JUMBO_HDRSPLIT
5460 /****************************************************************************/
5461 /* Allocate memory and initialize the page data structures.                 */
5462 /* Assumes that bce_init_rx_chain() has not already been called.            */
5463 /*                                                                          */
5464 /* Returns:                                                                 */
5465 /*   0 for success, positive value for failure.                             */
5466 /****************************************************************************/
5467 static int
5468 bce_init_pg_chain(struct bce_softc *sc)
5469 {
5470 	struct rx_bd *pgbd;
5471 	int i, rc = 0;
5472 	u32 val;
5473 
5474 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5475 		BCE_VERBOSE_CTX);
5476 
5477 	/* Initialize the page producer and consumer indices. */
5478 	sc->pg_prod        = 0;
5479 	sc->pg_cons        = 0;
5480 	sc->free_pg_bd     = USABLE_PG_BD;
5481 	sc->max_pg_bd      = USABLE_PG_BD;
5482 	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5483 	DBRUN(sc->pg_empty_count = 0);
5484 
5485 	/* Initialize the page next pointer chain entries. */
5486 	for (i = 0; i < PG_PAGES; i++) {
5487 		int j;
5488 
5489 		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5490 
5491 		/* Check if we've reached the last page. */
5492 		if (i == (PG_PAGES - 1))
5493 			j = 0;
5494 		else
5495 			j = i + 1;
5496 
5497 		/* Setup the chain page pointers. */
5498 		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5499 		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5500 	}
5501 
5502 	/* Setup the MQ BIN mapping for host_pg_bidx. */
5503 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
5504 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5505 		REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5506 
5507 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5508 
5509 	/* Configure the rx_bd and page chain mbuf cluster size. */
5510 	val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5511 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5512 
5513 	/* Configure the context reserved for jumbo support. */
5514 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5515 		BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5516 
5517 	/* Point the hardware to the first page in the page chain. */
5518 	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5519 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5520 	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5521 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5522 
5523 	/* Fill up the page chain. */
5524 	bce_fill_pg_chain(sc);
5525 
5526 	for (i = 0; i < PG_PAGES; i++) {
5527 		bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
5528 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5529 	}
5530 
5531 	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5532 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5533 		BCE_VERBOSE_CTX);
5534 	return(rc);
5535 }
5536 
5537 
5538 /****************************************************************************/
5539 /* Add mbufs to the page chain until its full or an mbuf allocation error   */
5540 /* occurs.                                                                  */
5541 /*                                                                          */
5542 /* Returns:                                                                 */
5543 /*   Nothing                                                                */
5544 /****************************************************************************/
5545 static void
5546 bce_fill_pg_chain(struct bce_softc *sc)
5547 {
5548 	u16 prod, prod_idx;
5549 
5550 	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5551 		BCE_VERBOSE_CTX);
5552 
5553 	/* Get the page chain prodcuer index. */
5554 	prod = sc->pg_prod;
5555 
5556 	/* Keep filling the page chain until it's full. */
5557 	while (sc->free_pg_bd > 0) {
5558 		prod_idx = PG_CHAIN_IDX(prod);
5559 		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5560 			/* Bail out if we can't add an mbuf to the chain. */
5561 			break;
5562 		}
5563 		prod = NEXT_PG_BD(prod);
5564 	}
5565 
5566 	/* Save the page chain producer index. */
5567 	sc->pg_prod = prod;
5568 
5569 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5570 		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5571 		__FUNCTION__, sc->pg_prod));
5572 
5573 	/*
5574 	 * Write the mailbox and tell the chip about
5575 	 * the new rx_bd's in the page chain.
5576 	 */
5577 	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
5578 		sc->pg_prod);
5579 
5580 	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5581 		BCE_VERBOSE_CTX);
5582 }
5583 
5584 
5585 /****************************************************************************/
5586 /* Free memory and clear the RX data structures.                            */
5587 /*                                                                          */
5588 /* Returns:                                                                 */
5589 /*   Nothing.                                                               */
5590 /****************************************************************************/
5591 static void
5592 bce_free_pg_chain(struct bce_softc *sc)
5593 {
5594 	int i;
5595 
5596 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5597 
5598 	/* Free any mbufs still in the mbuf page chain. */
5599 	for (i = 0; i < TOTAL_PG_BD; i++) {
5600 		if (sc->pg_mbuf_ptr[i] != NULL) {
5601 			if (sc->pg_mbuf_map[i] != NULL)
5602 				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
5603 					BUS_DMASYNC_POSTREAD);
5604 			m_freem(sc->pg_mbuf_ptr[i]);
5605 			sc->pg_mbuf_ptr[i] = NULL;
5606 			DBRUN(sc->debug_pg_mbuf_alloc--);
5607 		}
5608 	}
5609 
5610 	/* Clear each page chain pages. */
5611 	for (i = 0; i < PG_PAGES; i++)
5612 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5613 
5614 	sc->free_pg_bd = sc->max_pg_bd;
5615 
5616 	/* Check if we lost any mbufs in the process. */
5617 	DBRUNIF((sc->debug_pg_mbuf_alloc),
5618 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5619 			__FUNCTION__, sc->debug_pg_mbuf_alloc));
5620 
5621 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5622 }
5623 #endif /* BCE_JUMBO_HDRSPLIT */
5624 
5625 
5626 /****************************************************************************/
5627 /* Set media options.                                                       */
5628 /*                                                                          */
5629 /* Returns:                                                                 */
5630 /*   0 for success, positive value for failure.                             */
5631 /****************************************************************************/
5632 static int
5633 bce_ifmedia_upd(struct ifnet *ifp)
5634 {
5635 	struct bce_softc *sc = ifp->if_softc;
5636 
5637 	DBENTER(BCE_VERBOSE);
5638 
5639 	BCE_LOCK(sc);
5640 	bce_ifmedia_upd_locked(ifp);
5641 	BCE_UNLOCK(sc);
5642 
5643 	DBEXIT(BCE_VERBOSE);
5644 	return (0);
5645 }
5646 
5647 
5648 /****************************************************************************/
5649 /* Set media options.                                                       */
5650 /*                                                                          */
5651 /* Returns:                                                                 */
5652 /*   Nothing.                                                               */
5653 /****************************************************************************/
5654 static void
5655 bce_ifmedia_upd_locked(struct ifnet *ifp)
5656 {
5657 	struct bce_softc *sc = ifp->if_softc;
5658 	struct mii_data *mii;
5659 
5660 	DBENTER(BCE_VERBOSE);
5661 
5662 	BCE_LOCK_ASSERT(sc);
5663 
5664 	mii = device_get_softc(sc->bce_miibus);
5665 
5666 	/* Make sure the MII bus has been enumerated. */
5667 	if (mii) {
5668 		sc->bce_link = 0;
5669 		if (mii->mii_instance) {
5670 			struct mii_softc *miisc;
5671 
5672 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5673 				mii_phy_reset(miisc);
5674 		}
5675 		mii_mediachg(mii);
5676 	}
5677 
5678 	DBEXIT(BCE_VERBOSE);
5679 }
5680 
5681 
5682 /****************************************************************************/
5683 /* Reports current media status.                                            */
5684 /*                                                                          */
5685 /* Returns:                                                                 */
5686 /*   Nothing.                                                               */
5687 /****************************************************************************/
5688 static void
5689 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5690 {
5691 	struct bce_softc *sc = ifp->if_softc;
5692 	struct mii_data *mii;
5693 
5694 	DBENTER(BCE_VERBOSE);
5695 
5696 	BCE_LOCK(sc);
5697 
5698 	mii = device_get_softc(sc->bce_miibus);
5699 
5700 	mii_pollstat(mii);
5701 	ifmr->ifm_active = mii->mii_media_active;
5702 	ifmr->ifm_status = mii->mii_media_status;
5703 
5704 	BCE_UNLOCK(sc);
5705 
5706 	DBEXIT(BCE_VERBOSE);
5707 }
5708 
5709 
5710 /****************************************************************************/
5711 /* Handles PHY generated interrupt events.                                  */
5712 /*                                                                          */
5713 /* Returns:                                                                 */
5714 /*   Nothing.                                                               */
5715 /****************************************************************************/
5716 static void
5717 bce_phy_intr(struct bce_softc *sc)
5718 {
5719 	u32 new_link_state, old_link_state;
5720 
5721 	DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5722 
5723 	new_link_state = sc->status_block->status_attn_bits &
5724 		STATUS_ATTN_BITS_LINK_STATE;
5725 	old_link_state = sc->status_block->status_attn_bits_ack &
5726 		STATUS_ATTN_BITS_LINK_STATE;
5727 
5728 	/* Handle any changes if the link state has changed. */
5729 	if (new_link_state != old_link_state) {
5730 
5731 		/* Update the status_attn_bits_ack field in the status block. */
5732 		if (new_link_state) {
5733 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5734 				STATUS_ATTN_BITS_LINK_STATE);
5735 			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5736 				__FUNCTION__);
5737 		}
5738 		else {
5739 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5740 				STATUS_ATTN_BITS_LINK_STATE);
5741 			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5742 				__FUNCTION__);
5743 		}
5744 
5745 		/*
5746 		 * Assume link is down and allow
5747 		 * tick routine to update the state
5748 		 * based on the actual media state.
5749 		 */
5750 		sc->bce_link = 0;
5751 		callout_stop(&sc->bce_tick_callout);
5752 		bce_tick(sc);
5753 	}
5754 
5755 	/* Acknowledge the link change interrupt. */
5756 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5757 
5758 	DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5759 }
5760 
5761 
5762 /****************************************************************************/
5763 /* Reads the receive consumer value from the status block (skipping over    */
5764 /* chain page pointer if necessary).                                        */
5765 /*                                                                          */
5766 /* Returns:                                                                 */
5767 /*   hw_cons                                                                */
5768 /****************************************************************************/
5769 static inline u16
5770 bce_get_hw_rx_cons(struct bce_softc *sc)
5771 {
5772 	u16 hw_cons;
5773 
5774 	rmb();
5775 	hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5776 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5777 		hw_cons++;
5778 
5779 	return hw_cons;
5780 }
5781 
5782 /****************************************************************************/
5783 /* Handles received frame interrupt events.                                 */
5784 /*                                                                          */
5785 /* Returns:                                                                 */
5786 /*   Nothing.                                                               */
5787 /****************************************************************************/
5788 static void
5789 bce_rx_intr(struct bce_softc *sc)
5790 {
5791 	struct ifnet *ifp = sc->bce_ifp;
5792 	struct l2_fhdr *l2fhdr;
5793 	unsigned int pkt_len;
5794 	u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5795 	u32 status;
5796 #ifdef BCE_JUMBO_HDRSPLIT
5797 	unsigned int rem_len;
5798 	u16 sw_pg_cons, sw_pg_cons_idx;
5799 #endif
5800 
5801 	DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5802 	DBRUN(sc->rx_interrupts++);
5803 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5804 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5805 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5806 
5807 	/* Prepare the RX chain pages to be accessed by the host CPU. */
5808 	for (int i = 0; i < RX_PAGES; i++)
5809 		bus_dmamap_sync(sc->rx_bd_chain_tag,
5810 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5811 
5812 #ifdef BCE_JUMBO_HDRSPLIT
5813 	/* Prepare the page chain pages to be accessed by the host CPU. */
5814 	for (int i = 0; i < PG_PAGES; i++)
5815 		bus_dmamap_sync(sc->pg_bd_chain_tag,
5816 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5817 #endif
5818 
5819 	/* Get the hardware's view of the RX consumer index. */
5820 	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5821 
5822 	/* Get working copies of the driver's view of the consumer indices. */
5823 	sw_rx_cons = sc->rx_cons;
5824 #ifdef BCE_JUMBO_HDRSPLIT
5825 	sw_pg_cons = sc->pg_cons;
5826 #endif
5827 
5828 	/* Update some debug statistics counters */
5829 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5830 		sc->rx_low_watermark = sc->free_rx_bd);
5831 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
5832 
5833 	/* Scan through the receive chain as long as there is work to do */
5834 	/* ToDo: Consider setting a limit on the number of packets processed. */
5835 	rmb();
5836 	while (sw_rx_cons != hw_rx_cons) {
5837 		struct mbuf *m0;
5838 
5839 		/* Convert the producer/consumer indices to an actual rx_bd index. */
5840 		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
5841 
5842 		/* Unmap the mbuf from DMA space. */
5843 		bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx],
5844 		    BUS_DMASYNC_POSTREAD);
5845 		bus_dmamap_unload(sc->rx_mbuf_tag,
5846 		    sc->rx_mbuf_map[sw_rx_cons_idx]);
5847 
5848 		/* Remove the mbuf from the RX chain. */
5849 		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
5850 		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
5851 		DBRUN(sc->debug_rx_mbuf_alloc--);
5852 		sc->free_rx_bd++;
5853 
5854 		/*
5855 		 * Frames received on the NetXteme II are prepended	with an
5856 		 * l2_fhdr structure which provides status information about
5857 		 * the received frame (including VLAN tags and checksum info).
5858 		 * The frames are also automatically adjusted to align the IP
5859 		 * header (i.e. two null bytes are inserted before the Ethernet
5860 		 * header).  As a result the data DMA'd by the controller into
5861 		 * the mbuf is as follows:
5862 		 *
5863 		 * +---------+-----+---------------------+-----+
5864 		 * | l2_fhdr | pad | packet data         | FCS |
5865 		 * +---------+-----+---------------------+-----+
5866 		 *
5867 		 * The l2_fhdr needs to be checked and skipped and the FCS needs
5868 		 * to be stripped before sending the packet up the stack.
5869 		 */
5870 		l2fhdr  = mtod(m0, struct l2_fhdr *);
5871 
5872 		/* Get the packet data + FCS length and the status. */
5873 		pkt_len = l2fhdr->l2_fhdr_pkt_len;
5874 		status  = l2fhdr->l2_fhdr_status;
5875 
5876 		/*
5877 		 * Skip over the l2_fhdr and pad, resulting in the
5878 		 * following data in the mbuf:
5879 		 * +---------------------+-----+
5880 		 * | packet data         | FCS |
5881 		 * +---------------------+-----+
5882 		 */
5883 		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
5884 
5885 #ifdef BCE_JUMBO_HDRSPLIT
5886 		/*
5887 		 * Check whether the received frame fits in a single
5888 		 * mbuf or not (i.e. packet data + FCS <=
5889 		 * sc->rx_bd_mbuf_data_len bytes).
5890 		 */
5891 		if (pkt_len > m0->m_len) {
5892 			/*
5893 			 * The received frame is larger than a single mbuf.
5894 			 * If the frame was a TCP frame then only the TCP
5895 			 * header is placed in the mbuf, the remaining
5896 			 * payload (including FCS) is placed in the page
5897 			 * chain, the SPLIT flag is set, and the header
5898 			 * length is placed in the IP checksum field.
5899 			 * If the frame is not a TCP frame then the mbuf
5900 			 * is filled and the remaining bytes are placed
5901 			 * in the page chain.
5902 			 */
5903 
5904 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
5905 				__FUNCTION__);
5906 
5907 			/*
5908 			 * When the page chain is enabled and the TCP
5909 			 * header has been split from the TCP payload,
5910 			 * the ip_xsum structure will reflect the length
5911 			 * of the TCP header, not the IP checksum.  Set
5912 			 * the packet length of the mbuf accordingly.
5913 			 */
5914 		 	if (status & L2_FHDR_STATUS_SPLIT)
5915 				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
5916 
5917 			rem_len = pkt_len - m0->m_len;
5918 
5919 			/* Pull mbufs off the page chain for the remaining data. */
5920 			while (rem_len > 0) {
5921 				struct mbuf *m_pg;
5922 
5923 				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
5924 
5925 				/* Remove the mbuf from the page chain. */
5926 				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
5927 				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
5928 				DBRUN(sc->debug_pg_mbuf_alloc--);
5929 				sc->free_pg_bd++;
5930 
5931 				/* Unmap the page chain mbuf from DMA space. */
5932 				bus_dmamap_sync(sc->pg_mbuf_tag,
5933 					sc->pg_mbuf_map[sw_pg_cons_idx],
5934 					BUS_DMASYNC_POSTREAD);
5935 				bus_dmamap_unload(sc->pg_mbuf_tag,
5936 					sc->pg_mbuf_map[sw_pg_cons_idx]);
5937 
5938 				/* Adjust the mbuf length. */
5939 				if (rem_len < m_pg->m_len) {
5940 					/* The mbuf chain is complete. */
5941 					m_pg->m_len = rem_len;
5942 					rem_len = 0;
5943 				} else {
5944 					/* More packet data is waiting. */
5945 					rem_len -= m_pg->m_len;
5946 				}
5947 
5948 				/* Concatenate the mbuf cluster to the mbuf. */
5949 				m_cat(m0, m_pg);
5950 
5951 				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
5952 			}
5953 
5954 			/* Set the total packet length. */
5955 			m0->m_pkthdr.len = pkt_len;
5956 
5957 		} else {
5958 			/*
5959 			 * The received packet is small and fits in a
5960 			 * single mbuf (i.e. the l2_fhdr + pad + packet +
5961 			 * FCS <= MHLEN).  In other words, the packet is
5962 			 * 154 bytes or less in size.
5963 			 */
5964 
5965 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
5966 				__FUNCTION__);
5967 
5968 			/* Set the total packet length. */
5969 			m0->m_pkthdr.len = m0->m_len = pkt_len;
5970 		}
5971 #else
5972         /* Set the total packet length. */
5973 		m0->m_pkthdr.len = m0->m_len = pkt_len;
5974 #endif
5975 
5976 		/* Remove the trailing Ethernet FCS. */
5977 		m_adj(m0, -ETHER_CRC_LEN);
5978 
5979 		/* Check that the resulting mbuf chain is valid. */
5980 		DBRUN(m_sanity(m0, FALSE));
5981 		DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
5982 			(m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
5983 			BCE_PRINTF("Invalid Ethernet frame size!\n");
5984 			m_print(m0, 128));
5985 
5986 		DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
5987 			BCE_PRINTF("Simulating l2_fhdr status error.\n");
5988 			sc->l2fhdr_error_sim_count++;
5989 			status = status | L2_FHDR_ERRORS_PHY_DECODE);
5990 
5991 		/* Check the received frame for errors. */
5992 		if (status & (L2_FHDR_ERRORS_BAD_CRC |
5993 			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
5994 			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
5995 
5996 			/* Log the error and release the mbuf. */
5997 			ifp->if_ierrors++;
5998 			sc->l2fhdr_error_count++;
5999 
6000 			m_freem(m0);
6001 			m0 = NULL;
6002 			goto bce_rx_int_next_rx;
6003 		}
6004 
6005 		/* Send the packet to the appropriate interface. */
6006 		m0->m_pkthdr.rcvif = ifp;
6007 
6008 		/* Assume no hardware checksum. */
6009 		m0->m_pkthdr.csum_flags = 0;
6010 
6011 		/* Validate the checksum if offload enabled. */
6012 		if (ifp->if_capenable & IFCAP_RXCSUM) {
6013 
6014 			/* Check for an IP datagram. */
6015 		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
6016 				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
6017 				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
6018 
6019 				/* Check if the IP checksum is valid. */
6020 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
6021 					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
6022 			}
6023 
6024 			/* Check for a valid TCP/UDP frame. */
6025 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
6026 				L2_FHDR_STATUS_UDP_DATAGRAM)) {
6027 
6028 				/* Check for a good TCP/UDP checksum. */
6029 				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
6030 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
6031 					m0->m_pkthdr.csum_data =
6032 					    l2fhdr->l2_fhdr_tcp_udp_xsum;
6033 					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
6034 						| CSUM_PSEUDO_HDR);
6035 				}
6036 			}
6037 		}
6038 
6039 		/* Attach the VLAN tag.	*/
6040 		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
6041 #if __FreeBSD_version < 700000
6042 			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
6043 #else
6044 			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
6045 			m0->m_flags |= M_VLANTAG;
6046 #endif
6047 		}
6048 
6049 		/* Increment received packet statistics. */
6050 		ifp->if_ipackets++;
6051 
6052 bce_rx_int_next_rx:
6053 		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
6054 
6055 		/* If we have a packet, pass it up the stack */
6056 		if (m0) {
6057 			/* Make sure we don't lose our place when we release the lock. */
6058 			sc->rx_cons = sw_rx_cons;
6059 #ifdef BCE_JUMBO_HDRSPLIT
6060 			sc->pg_cons = sw_pg_cons;
6061 #endif
6062 
6063 			BCE_UNLOCK(sc);
6064 			(*ifp->if_input)(ifp, m0);
6065 			BCE_LOCK(sc);
6066 
6067 			/* Recover our place. */
6068 			sw_rx_cons = sc->rx_cons;
6069 #ifdef BCE_JUMBO_HDRSPLIT
6070 			sw_pg_cons = sc->pg_cons;
6071 #endif
6072 		}
6073 
6074 		/* Refresh hw_cons to see if there's new work */
6075 		if (sw_rx_cons == hw_rx_cons)
6076 			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
6077 	}
6078 
6079 	/* No new packets to process.  Refill the RX and page chains and exit. */
6080 #ifdef BCE_JUMBO_HDRSPLIT
6081 	sc->pg_cons = sw_pg_cons;
6082 	bce_fill_pg_chain(sc);
6083 #endif
6084 
6085 	sc->rx_cons = sw_rx_cons;
6086 	bce_fill_rx_chain(sc);
6087 
6088 	/* Prepare the page chain pages to be accessed by the NIC. */
6089 	for (int i = 0; i < RX_PAGES; i++)
6090 		bus_dmamap_sync(sc->rx_bd_chain_tag,
6091 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6092 
6093 #ifdef BCE_JUMBO_HDRSPLIT
6094 	for (int i = 0; i < PG_PAGES; i++)
6095 		bus_dmamap_sync(sc->pg_bd_chain_tag,
6096 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6097 #endif
6098 
6099 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
6100 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
6101 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
6102 	DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6103 }
6104 
6105 
6106 /****************************************************************************/
6107 /* Reads the transmit consumer value from the status block (skipping over   */
6108 /* chain page pointer if necessary).                                        */
6109 /*                                                                          */
6110 /* Returns:                                                                 */
6111 /*   hw_cons                                                                */
6112 /****************************************************************************/
6113 static inline u16
6114 bce_get_hw_tx_cons(struct bce_softc *sc)
6115 {
6116 	u16 hw_cons;
6117 
6118 	mb();
6119 	hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6120 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6121 		hw_cons++;
6122 
6123 	return hw_cons;
6124 }
6125 
6126 
6127 /****************************************************************************/
6128 /* Handles transmit completion interrupt events.                            */
6129 /*                                                                          */
6130 /* Returns:                                                                 */
6131 /*   Nothing.                                                               */
6132 /****************************************************************************/
6133 static void
6134 bce_tx_intr(struct bce_softc *sc)
6135 {
6136 	struct ifnet *ifp = sc->bce_ifp;
6137 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6138 
6139 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6140 	DBRUN(sc->tx_interrupts++);
6141 	DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6142 		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6143 		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6144 
6145 	BCE_LOCK_ASSERT(sc);
6146 
6147 	/* Get the hardware's view of the TX consumer index. */
6148 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6149 	sw_tx_cons = sc->tx_cons;
6150 
6151 	/* Prevent speculative reads from getting ahead of the status block. */
6152 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6153 		BUS_SPACE_BARRIER_READ);
6154 
6155 	/* Cycle through any completed TX chain page entries. */
6156 	while (sw_tx_cons != hw_tx_cons) {
6157 #ifdef BCE_DEBUG
6158 		struct tx_bd *txbd = NULL;
6159 #endif
6160 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6161 
6162 		DBPRINT(sc, BCE_INFO_SEND,
6163 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6164 			"sw_tx_chain_cons = 0x%04X\n",
6165 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6166 
6167 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6168 			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6169 				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6170 				(int) MAX_TX_BD);
6171 			bce_breakpoint(sc));
6172 
6173 		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6174 				[TX_IDX(sw_tx_chain_cons)]);
6175 
6176 		DBRUNIF((txbd == NULL),
6177 			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6178 				__FILE__, __LINE__, sw_tx_chain_cons);
6179 			bce_breakpoint(sc));
6180 
6181 		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6182 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6183 
6184 		/*
6185 		 * Free the associated mbuf. Remember
6186 		 * that only the last tx_bd of a packet
6187 		 * has an mbuf pointer and DMA map.
6188 		 */
6189 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6190 
6191 			/* Validate that this is the last tx_bd. */
6192 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6193 				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6194 				"txmbuf == NULL!\n", __FILE__, __LINE__);
6195 				bce_breakpoint(sc));
6196 
6197 			DBRUNMSG(BCE_INFO_SEND,
6198 				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6199 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
6200 
6201 			/* Unmap the mbuf. */
6202 			bus_dmamap_unload(sc->tx_mbuf_tag,
6203 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
6204 
6205 			/* Free the mbuf. */
6206 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6207 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6208 			DBRUN(sc->debug_tx_mbuf_alloc--);
6209 
6210 			ifp->if_opackets++;
6211 		}
6212 
6213 		sc->used_tx_bd--;
6214 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6215 
6216 		/* Refresh hw_cons to see if there's new work. */
6217 		hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6218 
6219 		/* Prevent speculative reads from getting ahead of the status block. */
6220 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6221 			BUS_SPACE_BARRIER_READ);
6222 	}
6223 
6224 	/* Clear the TX timeout timer. */
6225 	sc->watchdog_timer = 0;
6226 
6227 	/* Clear the tx hardware queue full flag. */
6228 	if (sc->used_tx_bd < sc->max_tx_bd) {
6229 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6230 			DBPRINT(sc, BCE_INFO_SEND,
6231 				"%s(): Open TX chain! %d/%d (used/total)\n",
6232 				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6233 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6234 	}
6235 
6236 	sc->tx_cons = sw_tx_cons;
6237 
6238 	DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6239 		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6240 		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6241 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6242 }
6243 
6244 
6245 /****************************************************************************/
6246 /* Disables interrupt generation.                                           */
6247 /*                                                                          */
6248 /* Returns:                                                                 */
6249 /*   Nothing.                                                               */
6250 /****************************************************************************/
6251 static void
6252 bce_disable_intr(struct bce_softc *sc)
6253 {
6254 	DBENTER(BCE_VERBOSE_INTR);
6255 
6256 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6257 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6258 
6259 	DBEXIT(BCE_VERBOSE_INTR);
6260 }
6261 
6262 
6263 /****************************************************************************/
6264 /* Enables interrupt generation.                                            */
6265 /*                                                                          */
6266 /* Returns:                                                                 */
6267 /*   Nothing.                                                               */
6268 /****************************************************************************/
6269 static void
6270 bce_enable_intr(struct bce_softc *sc, int coal_now)
6271 {
6272 	DBENTER(BCE_VERBOSE_INTR);
6273 
6274 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6275 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6276 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6277 
6278 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6279 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6280 
6281 	/* Force an immediate interrupt (whether there is new data or not). */
6282 	if (coal_now)
6283 		REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6284 
6285 	DBEXIT(BCE_VERBOSE_INTR);
6286 }
6287 
6288 
6289 /****************************************************************************/
6290 /* Handles controller initialization.                                       */
6291 /*                                                                          */
6292 /* Returns:                                                                 */
6293 /*   Nothing.                                                               */
6294 /****************************************************************************/
6295 static void
6296 bce_init_locked(struct bce_softc *sc)
6297 {
6298 	struct ifnet *ifp;
6299 	u32 ether_mtu = 0;
6300 
6301 	DBENTER(BCE_VERBOSE_RESET);
6302 
6303 	BCE_LOCK_ASSERT(sc);
6304 
6305 	ifp = sc->bce_ifp;
6306 
6307 	/* Check if the driver is still running and bail out if it is. */
6308 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6309 		goto bce_init_locked_exit;
6310 
6311 	bce_stop(sc);
6312 
6313 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6314 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
6315 			__FILE__, __LINE__);
6316 		goto bce_init_locked_exit;
6317 	}
6318 
6319 	if (bce_chipinit(sc)) {
6320 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6321 			__FILE__, __LINE__);
6322 		goto bce_init_locked_exit;
6323 	}
6324 
6325 	if (bce_blockinit(sc)) {
6326 		BCE_PRINTF("%s(%d): Block initialization failed!\n",
6327 			__FILE__, __LINE__);
6328 		goto bce_init_locked_exit;
6329 	}
6330 
6331 	/* Load our MAC address. */
6332 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6333 	bce_set_mac_addr(sc);
6334 
6335 	/*
6336 	 * Calculate and program the hardware Ethernet MTU
6337 	 * size. Be generous on the receive if we have room.
6338 	 */
6339 #ifdef BCE_JUMBO_HDRSPLIT
6340 	if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size))
6341 		ether_mtu = sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size;
6342 #else
6343 	if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6344 		ether_mtu = sc->rx_bd_mbuf_data_len;
6345 #endif
6346 	else
6347 		ether_mtu = ifp->if_mtu;
6348 
6349 	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6350 
6351 	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
6352 		ether_mtu);
6353 
6354 	/* Program the mtu, enabling jumbo frame support if necessary. */
6355 	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6356 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6357 			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6358 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6359 	else
6360 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6361 
6362 	DBPRINT(sc, BCE_INFO_LOAD,
6363 		"%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6364 		"rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
6365 		sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6366 		sc->rx_bd_mbuf_align_pad);
6367 
6368 	/* Program appropriate promiscuous/multicast filtering. */
6369 	bce_set_rx_mode(sc);
6370 
6371 #ifdef BCE_JUMBO_HDRSPLIT
6372 	DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
6373 		__FUNCTION__, sc->pg_bd_mbuf_alloc_size);
6374 
6375 	/* Init page buffer descriptor chain. */
6376 	bce_init_pg_chain(sc);
6377 #endif
6378 
6379 	/* Init RX buffer descriptor chain. */
6380 	bce_init_rx_chain(sc);
6381 
6382 	/* Init TX buffer descriptor chain. */
6383 	bce_init_tx_chain(sc);
6384 
6385 	/* Enable host interrupts. */
6386 	bce_enable_intr(sc, 1);
6387 
6388 	bce_ifmedia_upd_locked(ifp);
6389 
6390 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6391 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6392 
6393 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6394 
6395 bce_init_locked_exit:
6396 	DBEXIT(BCE_VERBOSE_RESET);
6397 }
6398 
6399 
6400 /****************************************************************************/
6401 /* Initialize the controller just enough so that any management firmware    */
6402 /* running on the device will continue to operate correctly.                */
6403 /*                                                                          */
6404 /* Returns:                                                                 */
6405 /*   Nothing.                                                               */
6406 /****************************************************************************/
6407 static void
6408 bce_mgmt_init_locked(struct bce_softc *sc)
6409 {
6410 	struct ifnet *ifp;
6411 
6412 	DBENTER(BCE_VERBOSE_RESET);
6413 
6414 	BCE_LOCK_ASSERT(sc);
6415 
6416 	/* Bail out if management firmware is not running. */
6417 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6418 		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6419 			"No management firmware running...\n");
6420 		goto bce_mgmt_init_locked_exit;
6421 	}
6422 
6423 	ifp = sc->bce_ifp;
6424 
6425 	/* Enable all critical blocks in the MAC. */
6426 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6427 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6428 	DELAY(20);
6429 
6430 	bce_ifmedia_upd_locked(ifp);
6431 
6432 bce_mgmt_init_locked_exit:
6433 	DBEXIT(BCE_VERBOSE_RESET);
6434 }
6435 
6436 
6437 /****************************************************************************/
6438 /* Handles controller initialization when called from an unlocked routine.  */
6439 /*                                                                          */
6440 /* Returns:                                                                 */
6441 /*   Nothing.                                                               */
6442 /****************************************************************************/
6443 static void
6444 bce_init(void *xsc)
6445 {
6446 	struct bce_softc *sc = xsc;
6447 
6448 	DBENTER(BCE_VERBOSE_RESET);
6449 
6450 	BCE_LOCK(sc);
6451 	bce_init_locked(sc);
6452 	BCE_UNLOCK(sc);
6453 
6454 	DBEXIT(BCE_VERBOSE_RESET);
6455 }
6456 
6457 
6458 /****************************************************************************/
6459 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6460 /* memory visible to the controller.                                        */
6461 /*                                                                          */
6462 /* Returns:                                                                 */
6463 /*   0 for success, positive value for failure.                             */
6464 /* Modified:                                                                */
6465 /*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
6466 /****************************************************************************/
6467 static int
6468 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6469 {
6470 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6471 	bus_dmamap_t map;
6472 	struct tx_bd *txbd = NULL;
6473 	struct mbuf *m0;
6474 	struct ether_vlan_header *eh;
6475 	struct ip *ip;
6476 	struct tcphdr *th;
6477 	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
6478 	u32 prod_bseq;
6479 	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6480 
6481 #ifdef BCE_DEBUG
6482 	u16 debug_prod;
6483 #endif
6484 	int i, error, nsegs, rc = 0;
6485 
6486 	DBENTER(BCE_VERBOSE_SEND);
6487 	DBPRINT(sc, BCE_INFO_SEND,
6488 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = %04X, "
6489 		"tx_prod_bseq = 0x%08X\n",
6490 		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6491 		sc->tx_prod_bseq);
6492 
6493 	/* Transfer any checksum offload flags to the bd. */
6494 	m0 = *m_head;
6495 	if (m0->m_pkthdr.csum_flags) {
6496 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
6497 			flags |= TX_BD_FLAGS_IP_CKSUM;
6498 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6499 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6500 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6501 			/* For TSO the controller needs two pieces of info, */
6502 			/* the MSS and the IP+TCP options length.           */
6503 			mss = htole16(m0->m_pkthdr.tso_segsz);
6504 
6505 			/* Map the header and find the Ethernet type & header length */
6506 			eh = mtod(m0, struct ether_vlan_header *);
6507 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
6508 				etype = ntohs(eh->evl_proto);
6509 				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6510 			} else {
6511 				etype = ntohs(eh->evl_encap_proto);
6512 				e_hlen = ETHER_HDR_LEN;
6513 			}
6514 
6515 			/* Check for supported TSO Ethernet types (only IPv4 for now) */
6516 			switch (etype) {
6517 				case ETHERTYPE_IP:
6518 					ip = (struct ip *)(m0->m_data + e_hlen);
6519 
6520 					/* TSO only supported for TCP protocol */
6521 					if (ip->ip_p != IPPROTO_TCP) {
6522 						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6523 							__FILE__, __LINE__);
6524 						goto bce_tx_encap_skip_tso;
6525 					}
6526 
6527 					/* Get IP header length in bytes (min 20) */
6528 					ip_hlen = ip->ip_hl << 2;
6529 
6530 					/* Get the TCP header length in bytes (min 20) */
6531 					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6532 					tcp_hlen = (th->th_off << 2);
6533 
6534 					/* IP header length and checksum will be calc'd by hardware */
6535 					ip_len = ip->ip_len;
6536 					ip->ip_len = 0;
6537 					ip->ip_sum = 0;
6538 					break;
6539 				case ETHERTYPE_IPV6:
6540 					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6541 						__FILE__, __LINE__);
6542 					goto bce_tx_encap_skip_tso;
6543 				default:
6544 					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6545 						__FILE__, __LINE__);
6546 					goto bce_tx_encap_skip_tso;
6547 			}
6548 
6549 			hdr_len = e_hlen + ip_hlen + tcp_hlen;
6550 
6551 			DBPRINT(sc, BCE_EXTREME_SEND,
6552 				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6553 				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
6554 
6555 			/* Set the LSO flag in the TX BD */
6556 			flags |= TX_BD_FLAGS_SW_LSO;
6557 			/* Set the length of IP + TCP options (in 32 bit words) */
6558 			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
6559 
6560 bce_tx_encap_skip_tso:
6561 			DBRUN(sc->requested_tso_frames++);
6562 		}
6563 	}
6564 
6565 	/* Transfer any VLAN tags to the bd. */
6566 	if (m0->m_flags & M_VLANTAG) {
6567 		flags |= TX_BD_FLAGS_VLAN_TAG;
6568 		vlan_tag = m0->m_pkthdr.ether_vtag;
6569 	}
6570 
6571 	/* Map the mbuf into DMAable memory. */
6572 	prod = sc->tx_prod;
6573 	chain_prod = TX_CHAIN_IDX(prod);
6574 	map = sc->tx_mbuf_map[chain_prod];
6575 
6576 	/* Map the mbuf into our DMA address space. */
6577 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6578 	    segs, &nsegs, BUS_DMA_NOWAIT);
6579 
6580 	/* Check if the DMA mapping was successful */
6581 	if (error == EFBIG) {
6582 
6583 		sc->fragmented_mbuf_count++;
6584 
6585 		/* Try to defrag the mbuf. */
6586 		m0 = m_defrag(*m_head, M_DONTWAIT);
6587 		if (m0 == NULL) {
6588 			/* Defrag was unsuccessful */
6589 			m_freem(*m_head);
6590 			*m_head = NULL;
6591 			sc->mbuf_alloc_failed_count++;
6592 			rc = ENOBUFS;
6593 			goto bce_tx_encap_exit;
6594 		}
6595 
6596 		/* Defrag was successful, try mapping again */
6597 		*m_head = m0;
6598 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6599 		    segs, &nsegs, BUS_DMA_NOWAIT);
6600 
6601 		/* Still getting an error after a defrag. */
6602 		if (error == ENOMEM) {
6603 			/* Insufficient DMA buffers available. */
6604 			sc->dma_map_addr_tx_failed_count++;
6605 			rc = error;
6606 			goto bce_tx_encap_exit;
6607 		} else if (error != 0) {
6608 			/* Still can't map the mbuf, release it and return an error. */
6609 			BCE_PRINTF(
6610 			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
6611 			    __FILE__, __LINE__);
6612 			m_freem(m0);
6613 			*m_head = NULL;
6614 			sc->dma_map_addr_tx_failed_count++;
6615 			rc = ENOBUFS;
6616 			goto bce_tx_encap_exit;
6617 		}
6618 	} else if (error == ENOMEM) {
6619 		/* Insufficient DMA buffers available. */
6620 		sc->dma_map_addr_tx_failed_count++;
6621 		rc = error;
6622 		goto bce_tx_encap_exit;
6623 	} else if (error != 0) {
6624 		m_freem(m0);
6625 		*m_head = NULL;
6626 		sc->dma_map_addr_tx_failed_count++;
6627 		rc = error;
6628 		goto bce_tx_encap_exit;
6629 	}
6630 
6631 	/* Make sure there's room in the chain */
6632 	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6633 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
6634 		rc = ENOBUFS;
6635 		goto bce_tx_encap_exit;
6636 	}
6637 
6638 	/* prod points to an empty tx_bd at this point. */
6639 	prod_bseq  = sc->tx_prod_bseq;
6640 
6641 #ifdef BCE_DEBUG
6642 	debug_prod = chain_prod;
6643 #endif
6644 
6645 	DBPRINT(sc, BCE_INFO_SEND,
6646 		"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6647 		"prod_bseq = 0x%08X\n",
6648 		__FUNCTION__, prod, chain_prod, prod_bseq);
6649 
6650 	/*
6651 	 * Cycle through each mbuf segment that makes up
6652 	 * the outgoing frame, gathering the mapping info
6653 	 * for that segment and creating a tx_bd for
6654 	 * the mbuf.
6655 	 */
6656 	for (i = 0; i < nsegs ; i++) {
6657 
6658 		chain_prod = TX_CHAIN_IDX(prod);
6659 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
6660 
6661 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
6662 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
6663 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
6664 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6665 		txbd->tx_bd_flags = htole16(flags);
6666 		prod_bseq += segs[i].ds_len;
6667 		if (i == 0)
6668 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6669 		prod = NEXT_TX_BD(prod);
6670 	}
6671 
6672 	/* Set the END flag on the last TX buffer descriptor. */
6673 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6674 
6675 	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
6676 
6677 	DBPRINT(sc, BCE_INFO_SEND,
6678 		"%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
6679 		"prod_bseq = 0x%08X\n",
6680 		__FUNCTION__, prod, chain_prod, prod_bseq);
6681 
6682 	/*
6683 	 * Ensure that the mbuf pointer for this transmission
6684 	 * is placed at the array index of the last
6685 	 * descriptor in this chain.  This is done
6686 	 * because a single map is used for all
6687 	 * segments of the mbuf and we don't want to
6688 	 * unload the map before all of the segments
6689 	 * have been freed.
6690 	 */
6691 	sc->tx_mbuf_ptr[chain_prod] = m0;
6692 	sc->used_tx_bd += nsegs;
6693 
6694 	/* Update some debug statistic counters */
6695 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6696 		sc->tx_hi_watermark = sc->used_tx_bd);
6697 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6698 	DBRUNIF(sc->debug_tx_mbuf_alloc++);
6699 
6700 	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6701 
6702 	/* prod points to the next free tx_bd at this point. */
6703 	sc->tx_prod = prod;
6704 	sc->tx_prod_bseq = prod_bseq;
6705 
6706 	DBPRINT(sc, BCE_INFO_SEND,
6707 		"%s(exit): prod = 0x%04X, chain_prod = %04X, "
6708 		"prod_bseq = 0x%08X\n",
6709 		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6710 		sc->tx_prod_bseq);
6711 
6712 bce_tx_encap_exit:
6713 	DBEXIT(BCE_VERBOSE_SEND);
6714 	return(rc);
6715 }
6716 
6717 
6718 /****************************************************************************/
6719 /* Main transmit routine when called from another routine with a lock.      */
6720 /*                                                                          */
6721 /* Returns:                                                                 */
6722 /*   Nothing.                                                               */
6723 /****************************************************************************/
6724 static void
6725 bce_start_locked(struct ifnet *ifp)
6726 {
6727 	struct bce_softc *sc = ifp->if_softc;
6728 	struct mbuf *m_head = NULL;
6729 	int count = 0;
6730 	u16 tx_prod, tx_chain_prod;
6731 
6732 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6733 
6734 	BCE_LOCK_ASSERT(sc);
6735 
6736 	/* prod points to the next free tx_bd. */
6737 	tx_prod = sc->tx_prod;
6738 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
6739 
6740 	DBPRINT(sc, BCE_INFO_SEND,
6741 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
6742 		"tx_prod_bseq = 0x%08X\n",
6743 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
6744 
6745 	/* If there's no link or the transmit queue is empty then just exit. */
6746 	if (!sc->bce_link) {
6747 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
6748 			__FUNCTION__);
6749 		goto bce_start_locked_exit;
6750 	}
6751 
6752 	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
6753 		DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
6754 			__FUNCTION__);
6755 		goto bce_start_locked_exit;
6756 	}
6757 
6758 	/*
6759 	 * Keep adding entries while there is space in the ring.
6760 	 */
6761 	while (sc->used_tx_bd < sc->max_tx_bd) {
6762 
6763 		/* Check for any frames to send. */
6764 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
6765 
6766 		/* Stop when the transmit queue is empty. */
6767 		if (m_head == NULL)
6768 			break;
6769 
6770 		/*
6771 		 * Pack the data into the transmit ring. If we
6772 		 * don't have room, place the mbuf back at the
6773 		 * head of the queue and set the OACTIVE flag
6774 		 * to wait for the NIC to drain the chain.
6775 		 */
6776 		if (bce_tx_encap(sc, &m_head)) {
6777 			/* No room, put the frame back on the transmit queue. */
6778 			if (m_head != NULL)
6779 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
6780 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6781 			DBPRINT(sc, BCE_INFO_SEND,
6782 				"TX chain is closed for business! Total tx_bd used = %d\n",
6783 				sc->used_tx_bd);
6784 			break;
6785 		}
6786 
6787 		count++;
6788 
6789 		/* Send a copy of the frame to any BPF listeners. */
6790 		ETHER_BPF_MTAP(ifp, m_head);
6791 	}
6792 
6793 	/* Exit if no packets were dequeued. */
6794 	if (count == 0) {
6795 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
6796 			__FUNCTION__);
6797 		goto bce_start_locked_exit;
6798 	}
6799 
6800 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into send queue.\n",
6801 		__FUNCTION__, count);
6802 
6803 	REG_WR(sc, BCE_MQ_COMMAND, REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR);
6804 
6805 	/* Write the mailbox and tell the chip about the waiting tx_bd's. */
6806 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6807 		"BCE_L2MQ_TX_HOST_BIDX = 0x%08X, sc->tx_prod = 0x%04X\n",
6808 		__FUNCTION__,
6809 		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6810 	REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6811 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6812 		"BCE_L2MQ_TX_HOST_BSEQ = 0x%08X, sc->tx_prod_bseq = 0x%04X\n",
6813 		__FUNCTION__,
6814 		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6815 	REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6816 
6817 	/* Set the tx timeout. */
6818 	sc->watchdog_timer = BCE_TX_TIMEOUT;
6819 
6820 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
6821 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
6822 
6823 bce_start_locked_exit:
6824 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6825 	return;
6826 }
6827 
6828 
6829 /****************************************************************************/
6830 /* Main transmit routine when called from another routine without a lock.   */
6831 /*                                                                          */
6832 /* Returns:                                                                 */
6833 /*   Nothing.                                                               */
6834 /****************************************************************************/
6835 static void
6836 bce_start(struct ifnet *ifp)
6837 {
6838 	struct bce_softc *sc = ifp->if_softc;
6839 
6840 	DBENTER(BCE_VERBOSE_SEND);
6841 
6842 	BCE_LOCK(sc);
6843 	bce_start_locked(ifp);
6844 	BCE_UNLOCK(sc);
6845 
6846 	DBEXIT(BCE_VERBOSE_SEND);
6847 }
6848 
6849 
6850 /****************************************************************************/
6851 /* Handles any IOCTL calls from the operating system.                       */
6852 /*                                                                          */
6853 /* Returns:                                                                 */
6854 /*   0 for success, positive value for failure.                             */
6855 /****************************************************************************/
6856 static int
6857 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
6858 {
6859 	struct bce_softc *sc = ifp->if_softc;
6860 	struct ifreq *ifr = (struct ifreq *) data;
6861 	struct mii_data *mii;
6862 	int mask, error = 0;
6863 
6864 	DBENTER(BCE_VERBOSE_MISC);
6865 
6866 	switch(command) {
6867 
6868 		/* Set the interface MTU. */
6869 		case SIOCSIFMTU:
6870 			/* Check that the MTU setting is supported. */
6871 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
6872 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
6873 				error = EINVAL;
6874 				break;
6875 			}
6876 
6877 			DBPRINT(sc, BCE_INFO_MISC,
6878 				"SIOCSIFMTU: Changing MTU from %d to %d\n",
6879 				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
6880 
6881 			BCE_LOCK(sc);
6882 			ifp->if_mtu = ifr->ifr_mtu;
6883 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6884 #ifdef BCE_JUMBO_HDRSPLIT
6885 			/* No buffer allocation size changes are necessary. */
6886 #else
6887 			/* Recalculate our buffer allocation sizes. */
6888 			if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) {
6889 				sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
6890 				sc->rx_bd_mbuf_align_pad  = roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
6891 				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6892 					sc->rx_bd_mbuf_align_pad;
6893 			} else {
6894 				sc->rx_bd_mbuf_alloc_size = MCLBYTES;
6895 				sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
6896 				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6897 					sc->rx_bd_mbuf_align_pad;
6898 			}
6899 #endif
6900 
6901 			bce_init_locked(sc);
6902 			BCE_UNLOCK(sc);
6903 			break;
6904 
6905 		/* Set interface flags. */
6906 		case SIOCSIFFLAGS:
6907 			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
6908 
6909 			BCE_LOCK(sc);
6910 
6911 			/* Check if the interface is up. */
6912 			if (ifp->if_flags & IFF_UP) {
6913 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6914 					/* Change promiscuous/multicast flags as necessary. */
6915 					bce_set_rx_mode(sc);
6916 				} else {
6917 					/* Start the HW */
6918 					bce_init_locked(sc);
6919 				}
6920 			} else {
6921 				/* The interface is down, check if driver is running. */
6922 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6923 					bce_stop(sc);
6924 
6925 					/* If MFW is running, restart the controller a bit. */
6926 					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
6927 						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
6928 						bce_chipinit(sc);
6929 						bce_mgmt_init_locked(sc);
6930 					}
6931 				}
6932 			}
6933 
6934 			BCE_UNLOCK(sc);
6935 			error = 0;
6936 
6937 			break;
6938 
6939 		/* Add/Delete multicast address */
6940 		case SIOCADDMULTI:
6941 		case SIOCDELMULTI:
6942 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
6943 
6944 			BCE_LOCK(sc);
6945 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6946 				bce_set_rx_mode(sc);
6947 				error = 0;
6948 			}
6949 			BCE_UNLOCK(sc);
6950 
6951 			break;
6952 
6953 		/* Set/Get Interface media */
6954 		case SIOCSIFMEDIA:
6955 		case SIOCGIFMEDIA:
6956 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
6957 
6958 			mii = device_get_softc(sc->bce_miibus);
6959 			error = ifmedia_ioctl(ifp, ifr,
6960 			    &mii->mii_media, command);
6961 			break;
6962 
6963 		/* Set interface capability */
6964 		case SIOCSIFCAP:
6965 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
6966 			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
6967 
6968 			/* Toggle the TX checksum capabilites enable flag. */
6969 			if (mask & IFCAP_TXCSUM) {
6970 				ifp->if_capenable ^= IFCAP_TXCSUM;
6971 				if (IFCAP_TXCSUM & ifp->if_capenable)
6972 					ifp->if_hwassist = BCE_IF_HWASSIST;
6973 				else
6974 					ifp->if_hwassist = 0;
6975 			}
6976 
6977 			/* Toggle the RX checksum capabilities enable flag. */
6978 			if (mask & IFCAP_RXCSUM) {
6979 				ifp->if_capenable ^= IFCAP_RXCSUM;
6980 				if (IFCAP_RXCSUM & ifp->if_capenable)
6981 					ifp->if_hwassist = BCE_IF_HWASSIST;
6982 				else
6983 					ifp->if_hwassist = 0;
6984 			}
6985 
6986 			/* Toggle the TSO capabilities enable flag. */
6987 			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
6988 				ifp->if_capenable ^= IFCAP_TSO4;
6989 				if (IFCAP_RXCSUM & ifp->if_capenable)
6990 					ifp->if_hwassist = BCE_IF_HWASSIST;
6991 				else
6992 					ifp->if_hwassist = 0;
6993 			}
6994 
6995 			/* Toggle VLAN_MTU capabilities enable flag. */
6996 			if (mask & IFCAP_VLAN_MTU) {
6997 				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
6998 					__FILE__, __LINE__);
6999 			}
7000 
7001 			/* Toggle VLANHWTAG capabilities enabled flag. */
7002 			if (mask & IFCAP_VLAN_HWTAGGING) {
7003 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
7004 					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
7005 						"management firmware (ASF/IPMI/UMP) is running!\n",
7006 						__FILE__, __LINE__);
7007 				else
7008 					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
7009 						__FILE__, __LINE__);
7010 			}
7011 
7012 			break;
7013 		default:
7014 			/* We don't know how to handle the IOCTL, pass it on. */
7015 			error = ether_ioctl(ifp, command, data);
7016 			break;
7017 	}
7018 
7019 	DBEXIT(BCE_VERBOSE_MISC);
7020 	return(error);
7021 }
7022 
7023 
7024 /****************************************************************************/
7025 /* Transmit timeout handler.                                                */
7026 /*                                                                          */
7027 /* Returns:                                                                 */
7028 /*   Nothing.                                                               */
7029 /****************************************************************************/
7030 static void
7031 bce_watchdog(struct bce_softc *sc)
7032 {
7033 	DBENTER(BCE_EXTREME_SEND);
7034 
7035 	BCE_LOCK_ASSERT(sc);
7036 
7037 	/* If the watchdog timer hasn't expired then just exit. */
7038 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
7039 		goto bce_watchdog_exit;
7040 
7041 	/* If pause frames are active then don't reset the hardware. */
7042 	/* ToDo: Should we reset the timer here? */
7043 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
7044 		goto bce_watchdog_exit;
7045 
7046 	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
7047 		__FILE__, __LINE__);
7048 
7049 	DBRUNMSG(BCE_INFO,
7050 		bce_dump_driver_state(sc);
7051 		bce_dump_status_block(sc);
7052 		bce_dump_stats_block(sc);
7053 		bce_dump_ftqs(sc);
7054 		bce_dump_txp_state(sc, 0);
7055 		bce_dump_rxp_state(sc, 0);
7056 		bce_dump_tpat_state(sc, 0);
7057 		bce_dump_cp_state(sc, 0);
7058 		bce_dump_com_state(sc, 0));
7059 
7060 	DBRUN(bce_breakpoint(sc));
7061 
7062 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
7063 
7064 	bce_init_locked(sc);
7065 	sc->bce_ifp->if_oerrors++;
7066 
7067 bce_watchdog_exit:
7068 	DBEXIT(BCE_EXTREME_SEND);
7069 }
7070 
7071 
7072 /*
7073  * Interrupt handler.
7074  */
7075 /****************************************************************************/
7076 /* Main interrupt entry point.  Verifies that the controller generated the  */
7077 /* interrupt and then calls a separate routine for handle the various       */
7078 /* interrupt causes (PHY, TX, RX).                                          */
7079 /*                                                                          */
7080 /* Returns:                                                                 */
7081 /*   0 for success, positive value for failure.                             */
7082 /****************************************************************************/
7083 static void
7084 bce_intr(void *xsc)
7085 {
7086 	struct bce_softc *sc;
7087 	struct ifnet *ifp;
7088 	u32 status_attn_bits;
7089 	u16 hw_rx_cons, hw_tx_cons;
7090 
7091 	sc = xsc;
7092 	ifp = sc->bce_ifp;
7093 
7094 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7095 	DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
7096 
7097 	BCE_LOCK(sc);
7098 
7099 	DBRUN(sc->interrupts_generated++);
7100 
7101 	/* Synchnorize before we read from interface's status block */
7102 	bus_dmamap_sync(sc->status_tag, sc->status_map,
7103 	    BUS_DMASYNC_POSTREAD);
7104 
7105 	/*
7106 	 * If the hardware status block index
7107 	 * matches the last value read by the
7108 	 * driver and we haven't asserted our
7109 	 * interrupt then there's nothing to do.
7110 	 */
7111 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
7112 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7113 			DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7114 				__FUNCTION__);
7115 			goto bce_intr_exit;
7116 	}
7117 
7118 	/* Ack the interrupt and stop others from occuring. */
7119 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7120 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7121 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7122 
7123 	/* Check if the hardware has finished any work. */
7124 	hw_rx_cons = bce_get_hw_rx_cons(sc);
7125 	hw_tx_cons = bce_get_hw_tx_cons(sc);
7126 
7127 	/* Keep processing data as long as there is work to do. */
7128 	for (;;) {
7129 
7130 		status_attn_bits = sc->status_block->status_attn_bits;
7131 
7132 	DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control),
7133 		BCE_PRINTF("Simulating unexpected status attention bit set.");
7134 		sc->unexpected_attention_sim_count++;
7135 		status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
7136 
7137 		/* Was it a link change interrupt? */
7138 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7139 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
7140 			bce_phy_intr(sc);
7141 
7142 			/* Clear any transient status updates during link state change. */
7143 			REG_WR(sc, BCE_HC_COMMAND,
7144 				sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
7145 			REG_RD(sc, BCE_HC_COMMAND);
7146 		}
7147 
7148 		/* If any other attention is asserted then the chip is toast. */
7149 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7150 			(sc->status_block->status_attn_bits_ack &
7151 			~STATUS_ATTN_BITS_LINK_STATE))) {
7152 
7153 		sc->unexpected_attention_count++;
7154 
7155 			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
7156 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
7157 
7158 			DBRUNMSG(BCE_FATAL,
7159 				if (unexpected_attention_sim_control == 0)
7160 					bce_breakpoint(sc));
7161 
7162 			bce_init_locked(sc);
7163 			goto bce_intr_exit;
7164 		}
7165 
7166 		/* Check for any completed RX frames. */
7167 		if (hw_rx_cons != sc->hw_rx_cons)
7168 			bce_rx_intr(sc);
7169 
7170 		/* Check for any completed TX frames. */
7171 		if (hw_tx_cons != sc->hw_tx_cons)
7172 			bce_tx_intr(sc);
7173 
7174 		/* Save the status block index value for use during the next interrupt. */
7175 		sc->last_status_idx = sc->status_block->status_idx;
7176 
7177 		/* Prevent speculative reads from getting ahead of the status block. */
7178 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7179 			BUS_SPACE_BARRIER_READ);
7180 
7181 		/* If there's no work left then exit the interrupt service routine. */
7182 		hw_rx_cons = bce_get_hw_rx_cons(sc);
7183 		hw_tx_cons = bce_get_hw_tx_cons(sc);
7184 
7185 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
7186 			break;
7187 
7188 	}
7189 
7190 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
7191 	    BUS_DMASYNC_PREREAD);
7192 
7193 	/* Re-enable interrupts. */
7194 	bce_enable_intr(sc, 0);
7195 
7196 	/* Handle any frames that arrived while handling the interrupt. */
7197 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7198 		bce_start_locked(ifp);
7199 
7200 bce_intr_exit:
7201 	BCE_UNLOCK(sc);
7202 
7203 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7204 }
7205 
7206 
7207 /****************************************************************************/
7208 /* Programs the various packet receive modes (broadcast and multicast).     */
7209 /*                                                                          */
7210 /* Returns:                                                                 */
7211 /*   Nothing.                                                               */
7212 /****************************************************************************/
7213 static void
7214 bce_set_rx_mode(struct bce_softc *sc)
7215 {
7216 	struct ifnet *ifp;
7217 	struct ifmultiaddr *ifma;
7218 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7219 	u32 rx_mode, sort_mode;
7220 	int h, i;
7221 
7222 	DBENTER(BCE_VERBOSE_MISC);
7223 
7224 	BCE_LOCK_ASSERT(sc);
7225 
7226 	ifp = sc->bce_ifp;
7227 
7228 	/* Initialize receive mode default settings. */
7229 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7230 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7231 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7232 
7233 	/*
7234 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7235 	 * be enbled.
7236 	 */
7237 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7238 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7239 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7240 
7241 	/*
7242 	 * Check for promiscuous, all multicast, or selected
7243 	 * multicast address filtering.
7244 	 */
7245 	if (ifp->if_flags & IFF_PROMISC) {
7246 		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7247 
7248 		/* Enable promiscuous mode. */
7249 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7250 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7251 	} else if (ifp->if_flags & IFF_ALLMULTI) {
7252 		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7253 
7254 		/* Enable all multicast addresses. */
7255 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7256 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7257        	}
7258 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7259 	} else {
7260 		/* Accept one or more multicast(s). */
7261 		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7262 
7263 		if_maddr_rlock(ifp);
7264 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7265 			if (ifma->ifma_addr->sa_family != AF_LINK)
7266 				continue;
7267 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7268 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7269 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7270 		}
7271 		if_maddr_runlock(ifp);
7272 
7273 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7274 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7275 
7276 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7277 	}
7278 
7279 	/* Only make changes if the recive mode has actually changed. */
7280 	if (rx_mode != sc->rx_mode) {
7281 		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
7282 			rx_mode);
7283 
7284 		sc->rx_mode = rx_mode;
7285 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7286 	}
7287 
7288 	/* Disable and clear the exisitng sort before enabling a new sort. */
7289 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7290 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7291 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7292 
7293 	DBEXIT(BCE_VERBOSE_MISC);
7294 }
7295 
7296 
7297 /****************************************************************************/
7298 /* Called periodically to updates statistics from the controllers           */
7299 /* statistics block.                                                        */
7300 /*                                                                          */
7301 /* Returns:                                                                 */
7302 /*   Nothing.                                                               */
7303 /****************************************************************************/
7304 static void
7305 bce_stats_update(struct bce_softc *sc)
7306 {
7307 	struct ifnet *ifp;
7308 	struct statistics_block *stats;
7309 
7310 	DBENTER(BCE_EXTREME_MISC);
7311 
7312 	ifp = sc->bce_ifp;
7313 
7314 	stats = (struct statistics_block *) sc->stats_block;
7315 
7316 	/*
7317 	 * Certain controllers don't report
7318 	 * carrier sense errors correctly.
7319 	 * See errata E11_5708CA0_1165.
7320 	 */
7321 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7322 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7323 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7324 
7325 	/*
7326 	 * Update the sysctl statistics from the
7327 	 * hardware statistics.
7328 	 */
7329 	sc->stat_IfHCInOctets =
7330 		((u64) stats->stat_IfHCInOctets_hi << 32) +
7331 		 (u64) stats->stat_IfHCInOctets_lo;
7332 
7333 	sc->stat_IfHCInBadOctets =
7334 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7335 		 (u64) stats->stat_IfHCInBadOctets_lo;
7336 
7337 	sc->stat_IfHCOutOctets =
7338 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
7339 		 (u64) stats->stat_IfHCOutOctets_lo;
7340 
7341 	sc->stat_IfHCOutBadOctets =
7342 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7343 		 (u64) stats->stat_IfHCOutBadOctets_lo;
7344 
7345 	sc->stat_IfHCInUcastPkts =
7346 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7347 		 (u64) stats->stat_IfHCInUcastPkts_lo;
7348 
7349 	sc->stat_IfHCInMulticastPkts =
7350 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7351 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
7352 
7353 	sc->stat_IfHCInBroadcastPkts =
7354 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7355 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7356 
7357 	sc->stat_IfHCOutUcastPkts =
7358 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7359 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
7360 
7361 	sc->stat_IfHCOutMulticastPkts =
7362 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7363 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7364 
7365 	sc->stat_IfHCOutBroadcastPkts =
7366 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7367 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7368 
7369 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7370 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7371 
7372 	sc->stat_Dot3StatsCarrierSenseErrors =
7373 		stats->stat_Dot3StatsCarrierSenseErrors;
7374 
7375 	sc->stat_Dot3StatsFCSErrors =
7376 		stats->stat_Dot3StatsFCSErrors;
7377 
7378 	sc->stat_Dot3StatsAlignmentErrors =
7379 		stats->stat_Dot3StatsAlignmentErrors;
7380 
7381 	sc->stat_Dot3StatsSingleCollisionFrames =
7382 		stats->stat_Dot3StatsSingleCollisionFrames;
7383 
7384 	sc->stat_Dot3StatsMultipleCollisionFrames =
7385 		stats->stat_Dot3StatsMultipleCollisionFrames;
7386 
7387 	sc->stat_Dot3StatsDeferredTransmissions =
7388 		stats->stat_Dot3StatsDeferredTransmissions;
7389 
7390 	sc->stat_Dot3StatsExcessiveCollisions =
7391 		stats->stat_Dot3StatsExcessiveCollisions;
7392 
7393 	sc->stat_Dot3StatsLateCollisions =
7394 		stats->stat_Dot3StatsLateCollisions;
7395 
7396 	sc->stat_EtherStatsCollisions =
7397 		stats->stat_EtherStatsCollisions;
7398 
7399 	sc->stat_EtherStatsFragments =
7400 		stats->stat_EtherStatsFragments;
7401 
7402 	sc->stat_EtherStatsJabbers =
7403 		stats->stat_EtherStatsJabbers;
7404 
7405 	sc->stat_EtherStatsUndersizePkts =
7406 		stats->stat_EtherStatsUndersizePkts;
7407 
7408 	sc->stat_EtherStatsOversizePkts =
7409 		stats->stat_EtherStatsOversizePkts;
7410 
7411 	sc->stat_EtherStatsPktsRx64Octets =
7412 		stats->stat_EtherStatsPktsRx64Octets;
7413 
7414 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7415 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7416 
7417 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7418 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7419 
7420 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7421 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7422 
7423 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7424 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7425 
7426 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7427 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7428 
7429 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7430 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7431 
7432 	sc->stat_EtherStatsPktsTx64Octets =
7433 		stats->stat_EtherStatsPktsTx64Octets;
7434 
7435 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7436 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7437 
7438 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7439 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7440 
7441 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7442 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7443 
7444 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7445 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7446 
7447 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7448 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7449 
7450 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7451 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7452 
7453 	sc->stat_XonPauseFramesReceived =
7454 		stats->stat_XonPauseFramesReceived;
7455 
7456 	sc->stat_XoffPauseFramesReceived =
7457 		stats->stat_XoffPauseFramesReceived;
7458 
7459 	sc->stat_OutXonSent =
7460 		stats->stat_OutXonSent;
7461 
7462 	sc->stat_OutXoffSent =
7463 		stats->stat_OutXoffSent;
7464 
7465 	sc->stat_FlowControlDone =
7466 		stats->stat_FlowControlDone;
7467 
7468 	sc->stat_MacControlFramesReceived =
7469 		stats->stat_MacControlFramesReceived;
7470 
7471 	sc->stat_XoffStateEntered =
7472 		stats->stat_XoffStateEntered;
7473 
7474 	sc->stat_IfInFramesL2FilterDiscards =
7475 		stats->stat_IfInFramesL2FilterDiscards;
7476 
7477 	sc->stat_IfInRuleCheckerDiscards =
7478 		stats->stat_IfInRuleCheckerDiscards;
7479 
7480 	sc->stat_IfInFTQDiscards =
7481 		stats->stat_IfInFTQDiscards;
7482 
7483 	sc->stat_IfInMBUFDiscards =
7484 		stats->stat_IfInMBUFDiscards;
7485 
7486 	sc->stat_IfInRuleCheckerP4Hit =
7487 		stats->stat_IfInRuleCheckerP4Hit;
7488 
7489 	sc->stat_CatchupInRuleCheckerDiscards =
7490 		stats->stat_CatchupInRuleCheckerDiscards;
7491 
7492 	sc->stat_CatchupInFTQDiscards =
7493 		stats->stat_CatchupInFTQDiscards;
7494 
7495 	sc->stat_CatchupInMBUFDiscards =
7496 		stats->stat_CatchupInMBUFDiscards;
7497 
7498 	sc->stat_CatchupInRuleCheckerP4Hit =
7499 		stats->stat_CatchupInRuleCheckerP4Hit;
7500 
7501 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7502 
7503 	/*
7504 	 * Update the interface statistics from the
7505 	 * hardware statistics.
7506 	 */
7507 	ifp->if_collisions =
7508 		(u_long) sc->stat_EtherStatsCollisions;
7509 
7510 	/* ToDo: This method loses soft errors. */
7511 	ifp->if_ierrors =
7512 		(u_long) sc->stat_EtherStatsUndersizePkts +
7513 		(u_long) sc->stat_EtherStatsOversizePkts +
7514 		(u_long) sc->stat_IfInMBUFDiscards +
7515 		(u_long) sc->stat_Dot3StatsAlignmentErrors +
7516 		(u_long) sc->stat_Dot3StatsFCSErrors +
7517 		(u_long) sc->stat_IfInRuleCheckerDiscards +
7518 		(u_long) sc->stat_IfInFTQDiscards +
7519 		(u_long) sc->com_no_buffers;
7520 
7521 	/* ToDo: This method loses soft errors. */
7522 	ifp->if_oerrors =
7523 		(u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7524 		(u_long) sc->stat_Dot3StatsExcessiveCollisions +
7525 		(u_long) sc->stat_Dot3StatsLateCollisions;
7526 
7527 	/* ToDo: Add additional statistics. */
7528 
7529 	DBEXIT(BCE_EXTREME_MISC);
7530 }
7531 
7532 
7533 /****************************************************************************/
7534 /* Periodic function to notify the bootcode that the driver is still        */
7535 /* present.                                                                 */
7536 /*                                                                          */
7537 /* Returns:                                                                 */
7538 /*   Nothing.                                                               */
7539 /****************************************************************************/
7540 static void
7541 bce_pulse(void *xsc)
7542 {
7543 	struct bce_softc *sc = xsc;
7544 	u32 msg;
7545 
7546 	DBENTER(BCE_EXTREME_MISC);
7547 
7548 	BCE_LOCK_ASSERT(sc);
7549 
7550 	/* Tell the firmware that the driver is still running. */
7551 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7552 	bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
7553 
7554 	/* Schedule the next pulse. */
7555 	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7556 
7557 	DBEXIT(BCE_EXTREME_MISC);
7558 }
7559 
7560 
7561 /****************************************************************************/
7562 /* Periodic function to perform maintenance tasks.                          */
7563 /*                                                                          */
7564 /* Returns:                                                                 */
7565 /*   Nothing.                                                               */
7566 /****************************************************************************/
7567 static void
7568 bce_tick(void *xsc)
7569 {
7570 	struct bce_softc *sc = xsc;
7571 	struct mii_data *mii;
7572 	struct ifnet *ifp;
7573 
7574 	ifp = sc->bce_ifp;
7575 
7576 	DBENTER(BCE_EXTREME_MISC);
7577 
7578 	BCE_LOCK_ASSERT(sc);
7579 
7580 	/* Schedule the next tick. */
7581 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7582 
7583 	/* Update the statistics from the hardware statistics block. */
7584 	bce_stats_update(sc);
7585 
7586 	/* Top off the receive and page chains. */
7587 #ifdef BCE_JUMBO_HDRSPLIT
7588 	bce_fill_pg_chain(sc);
7589 #endif
7590 	bce_fill_rx_chain(sc);
7591 
7592 	/* Check that chip hasn't hung. */
7593 	bce_watchdog(sc);
7594 
7595 	/* If link is up already up then we're done. */
7596 	if (sc->bce_link)
7597 		goto bce_tick_exit;
7598 
7599 	/* Link is down.  Check what the PHY's doing. */
7600 	mii = device_get_softc(sc->bce_miibus);
7601 	mii_tick(mii);
7602 
7603 	/* Check if the link has come up. */
7604 	if ((mii->mii_media_status & IFM_ACTIVE) &&
7605 	    (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7606 		DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__);
7607 		sc->bce_link++;
7608 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7609 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
7610 		    bootverbose)
7611 			BCE_PRINTF("Gigabit link up!\n");
7612 		/* Now that link is up, handle any outstanding TX traffic. */
7613 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7614 			DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found pending TX traffic.\n",
7615 				 __FUNCTION__);
7616 			bce_start_locked(ifp);
7617 		}
7618 	}
7619 
7620 bce_tick_exit:
7621 	DBEXIT(BCE_EXTREME_MISC);
7622 	return;
7623 }
7624 
7625 
7626 #ifdef BCE_DEBUG
7627 /****************************************************************************/
7628 /* Allows the driver state to be dumped through the sysctl interface.       */
7629 /*                                                                          */
7630 /* Returns:                                                                 */
7631 /*   0 for success, positive value for failure.                             */
7632 /****************************************************************************/
7633 static int
7634 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7635 {
7636         int error;
7637         int result;
7638         struct bce_softc *sc;
7639 
7640         result = -1;
7641         error = sysctl_handle_int(oidp, &result, 0, req);
7642 
7643         if (error || !req->newptr)
7644                 return (error);
7645 
7646         if (result == 1) {
7647                 sc = (struct bce_softc *)arg1;
7648                 bce_dump_driver_state(sc);
7649         }
7650 
7651         return error;
7652 }
7653 
7654 
7655 /****************************************************************************/
7656 /* Allows the hardware state to be dumped through the sysctl interface.     */
7657 /*                                                                          */
7658 /* Returns:                                                                 */
7659 /*   0 for success, positive value for failure.                             */
7660 /****************************************************************************/
7661 static int
7662 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7663 {
7664         int error;
7665         int result;
7666         struct bce_softc *sc;
7667 
7668         result = -1;
7669         error = sysctl_handle_int(oidp, &result, 0, req);
7670 
7671         if (error || !req->newptr)
7672                 return (error);
7673 
7674         if (result == 1) {
7675                 sc = (struct bce_softc *)arg1;
7676                 bce_dump_hw_state(sc);
7677         }
7678 
7679         return error;
7680 }
7681 
7682 
7683 /****************************************************************************/
7684 /* Allows the bootcode state to be dumped through the sysctl interface.     */
7685 /*                                                                          */
7686 /* Returns:                                                                 */
7687 /*   0 for success, positive value for failure.                             */
7688 /****************************************************************************/
7689 static int
7690 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
7691 {
7692         int error;
7693         int result;
7694         struct bce_softc *sc;
7695 
7696         result = -1;
7697         error = sysctl_handle_int(oidp, &result, 0, req);
7698 
7699         if (error || !req->newptr)
7700                 return (error);
7701 
7702         if (result == 1) {
7703                 sc = (struct bce_softc *)arg1;
7704                 bce_dump_bc_state(sc);
7705         }
7706 
7707         return error;
7708 }
7709 
7710 
7711 /****************************************************************************/
7712 /* Provides a sysctl interface to allow dumping the RX chain.               */
7713 /*                                                                          */
7714 /* Returns:                                                                 */
7715 /*   0 for success, positive value for failure.                             */
7716 /****************************************************************************/
7717 static int
7718 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
7719 {
7720         int error;
7721         int result;
7722         struct bce_softc *sc;
7723 
7724         result = -1;
7725         error = sysctl_handle_int(oidp, &result, 0, req);
7726 
7727         if (error || !req->newptr)
7728                 return (error);
7729 
7730         if (result == 1) {
7731                 sc = (struct bce_softc *)arg1;
7732                 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
7733         }
7734 
7735         return error;
7736 }
7737 
7738 
7739 /****************************************************************************/
7740 /* Provides a sysctl interface to allow dumping the TX chain.               */
7741 /*                                                                          */
7742 /* Returns:                                                                 */
7743 /*   0 for success, positive value for failure.                             */
7744 /****************************************************************************/
7745 static int
7746 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
7747 {
7748         int error;
7749         int result;
7750         struct bce_softc *sc;
7751 
7752         result = -1;
7753         error = sysctl_handle_int(oidp, &result, 0, req);
7754 
7755         if (error || !req->newptr)
7756                 return (error);
7757 
7758         if (result == 1) {
7759                 sc = (struct bce_softc *)arg1;
7760                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7761         }
7762 
7763         return error;
7764 }
7765 
7766 
7767 #ifdef BCE_JUMBO_HDRSPLIT
7768 /****************************************************************************/
7769 /* Provides a sysctl interface to allow dumping the page chain.             */
7770 /*                                                                          */
7771 /* Returns:                                                                 */
7772 /*   0 for success, positive value for failure.                             */
7773 /****************************************************************************/
7774 static int
7775 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
7776 {
7777         int error;
7778         int result;
7779         struct bce_softc *sc;
7780 
7781         result = -1;
7782         error = sysctl_handle_int(oidp, &result, 0, req);
7783 
7784         if (error || !req->newptr)
7785                 return (error);
7786 
7787         if (result == 1) {
7788                 sc = (struct bce_softc *)arg1;
7789                 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
7790         }
7791 
7792         return error;
7793 }
7794 #endif
7795 
7796 /****************************************************************************/
7797 /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in  */
7798 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7799 /*                                                                          */
7800 /* Returns:                                                                 */
7801 /*   0 for success, positive value for failure.                             */
7802 /****************************************************************************/
7803 static int
7804 bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
7805 {
7806 	struct bce_softc *sc = (struct bce_softc *)arg1;
7807 	int error;
7808 	u32 result;
7809 	u32 val[1];
7810 	u8 *data = (u8 *) val;
7811 
7812 	result = -1;
7813 	error = sysctl_handle_int(oidp, &result, 0, req);
7814 	if (error || (req->newptr == NULL))
7815 		return (error);
7816 
7817 	bce_nvram_read(sc, result, data, 4);
7818 	BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
7819 
7820 	return (error);
7821 }
7822 
7823 
7824 /****************************************************************************/
7825 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
7826 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
7827 /*                                                                          */
7828 /* Returns:                                                                 */
7829 /*   0 for success, positive value for failure.                             */
7830 /****************************************************************************/
7831 static int
7832 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
7833 {
7834 	struct bce_softc *sc = (struct bce_softc *)arg1;
7835 	int error;
7836 	u32 val, result;
7837 
7838 	result = -1;
7839 	error = sysctl_handle_int(oidp, &result, 0, req);
7840 	if (error || (req->newptr == NULL))
7841 		return (error);
7842 
7843 	/* Make sure the register is accessible. */
7844 	if (result < 0x8000) {
7845 		val = REG_RD(sc, result);
7846 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7847 	} else if (result < 0x0280000) {
7848 		val = REG_RD_IND(sc, result);
7849 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7850 	}
7851 
7852 	return (error);
7853 }
7854 
7855 
7856 /****************************************************************************/
7857 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
7858 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7859 /*                                                                          */
7860 /* Returns:                                                                 */
7861 /*   0 for success, positive value for failure.                             */
7862 /****************************************************************************/
7863 static int
7864 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
7865 {
7866 	struct bce_softc *sc;
7867 	device_t dev;
7868 	int error, result;
7869 	u16 val;
7870 
7871 	result = -1;
7872 	error = sysctl_handle_int(oidp, &result, 0, req);
7873 	if (error || (req->newptr == NULL))
7874 		return (error);
7875 
7876 	/* Make sure the register is accessible. */
7877 	if (result < 0x20) {
7878 		sc = (struct bce_softc *)arg1;
7879 		dev = sc->bce_dev;
7880 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
7881 		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
7882 	}
7883 	return (error);
7884 }
7885 
7886 
7887 /****************************************************************************/
7888 /* Provides a sysctl interface to allow reading a CID.                      */
7889 /*                                                                          */
7890 /* Returns:                                                                 */
7891 /*   0 for success, positive value for failure.                             */
7892 /****************************************************************************/
7893 static int
7894 bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
7895 {
7896 	struct bce_softc *sc;
7897 	int error;
7898 	u16 result;
7899 
7900 	result = -1;
7901 	error = sysctl_handle_int(oidp, &result, 0, req);
7902 	if (error || (req->newptr == NULL))
7903 		return (error);
7904 
7905 	/* Make sure the register is accessible. */
7906 	if (result <= TX_CID) {
7907 		sc = (struct bce_softc *)arg1;
7908 		bce_dump_ctx(sc, result);
7909 	}
7910 
7911 	return (error);
7912 }
7913 
7914 
7915  /****************************************************************************/
7916 /* Provides a sysctl interface to forcing the driver to dump state and      */
7917 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
7918 /*                                                                          */
7919 /* Returns:                                                                 */
7920 /*   0 for success, positive value for failure.                             */
7921 /****************************************************************************/
7922 static int
7923 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
7924 {
7925         int error;
7926         int result;
7927         struct bce_softc *sc;
7928 
7929         result = -1;
7930         error = sysctl_handle_int(oidp, &result, 0, req);
7931 
7932         if (error || !req->newptr)
7933                 return (error);
7934 
7935         if (result == 1) {
7936                 sc = (struct bce_softc *)arg1;
7937                 bce_breakpoint(sc);
7938         }
7939 
7940         return error;
7941 }
7942 #endif
7943 
7944 
7945 /****************************************************************************/
7946 /* Adds any sysctl parameters for tuning or debugging purposes.             */
7947 /*                                                                          */
7948 /* Returns:                                                                 */
7949 /*   0 for success, positive value for failure.                             */
7950 /****************************************************************************/
7951 static void
7952 bce_add_sysctls(struct bce_softc *sc)
7953 {
7954 	struct sysctl_ctx_list *ctx;
7955 	struct sysctl_oid_list *children;
7956 
7957 	DBENTER(BCE_VERBOSE_MISC);
7958 
7959 	ctx = device_get_sysctl_ctx(sc->bce_dev);
7960 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
7961 
7962 #ifdef BCE_DEBUG
7963 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7964 		"l2fhdr_error_sim_control",
7965 		CTLFLAG_RW, &l2fhdr_error_sim_control,
7966 		0, "Debug control to force l2fhdr errors");
7967 
7968 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7969 		"l2fhdr_error_sim_count",
7970 		CTLFLAG_RD, &sc->l2fhdr_error_sim_count,
7971 		0, "Number of simulated l2_fhdr errors");
7972 #endif
7973 
7974 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7975 		"l2fhdr_error_count",
7976 		CTLFLAG_RD, &sc->l2fhdr_error_count,
7977 		0, "Number of l2_fhdr errors");
7978 
7979 #ifdef BCE_DEBUG
7980 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7981 		"mbuf_alloc_failed_sim_control",
7982 		CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
7983 		0, "Debug control to force mbuf allocation failures");
7984 
7985 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7986 		"mbuf_alloc_failed_sim_count",
7987 		CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
7988 		0, "Number of simulated mbuf cluster allocation failures");
7989 #endif
7990 
7991 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7992 		"mbuf_alloc_failed_count",
7993 		CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
7994 		0, "Number of mbuf allocation failures");
7995 
7996 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7997 		"fragmented_mbuf_count",
7998 		CTLFLAG_RD, &sc->fragmented_mbuf_count,
7999 		0, "Number of fragmented mbufs");
8000 
8001 #ifdef BCE_DEBUG
8002 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8003 		"dma_map_addr_failed_sim_control",
8004 		CTLFLAG_RW, &dma_map_addr_failed_sim_control,
8005 		0, "Debug control to force DMA mapping failures");
8006 
8007 	/* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
8008 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8009 		"dma_map_addr_failed_sim_count",
8010 		CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
8011 		0, "Number of simulated DMA mapping failures");
8012 
8013 #endif
8014 
8015 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8016 		"dma_map_addr_rx_failed_count",
8017 		CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
8018 		0, "Number of RX DMA mapping failures");
8019 
8020 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8021 		"dma_map_addr_tx_failed_count",
8022 		CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
8023 		0, "Number of TX DMA mapping failures");
8024 
8025 #ifdef BCE_DEBUG
8026 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8027 		"unexpected_attention_sim_control",
8028 		CTLFLAG_RW, &unexpected_attention_sim_control,
8029 		0, "Debug control to simulate unexpected attentions");
8030 
8031 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8032 		"unexpected_attention_sim_count",
8033 		CTLFLAG_RW, &sc->unexpected_attention_sim_count,
8034 		0, "Number of simulated unexpected attentions");
8035 #endif
8036 
8037 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8038 		"unexpected_attention_count",
8039 		CTLFLAG_RW, &sc->unexpected_attention_count,
8040 		0, "Number of unexpected attentions");
8041 
8042 #ifdef BCE_DEBUG
8043 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8044 		"debug_bootcode_running_failure",
8045 		CTLFLAG_RW, &bootcode_running_failure_sim_control,
8046 		0, "Debug control to force bootcode running failures");
8047 
8048 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8049 		"rx_low_watermark",
8050 		CTLFLAG_RD, &sc->rx_low_watermark,
8051 		0, "Lowest level of free rx_bd's");
8052 
8053 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8054 		"rx_empty_count",
8055 		CTLFLAG_RD, &sc->rx_empty_count,
8056 		0, "Number of times the RX chain was empty");
8057 
8058 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8059 		"tx_hi_watermark",
8060 		CTLFLAG_RD, &sc->tx_hi_watermark,
8061 		0, "Highest level of used tx_bd's");
8062 
8063 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8064 		"tx_full_count",
8065 		CTLFLAG_RD, &sc->tx_full_count,
8066 		0, "Number of times the TX chain was full");
8067 
8068 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8069 		"requested_tso_frames",
8070 		CTLFLAG_RD, &sc->requested_tso_frames,
8071 		0, "Number of TSO frames received");
8072 
8073 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8074 		"rx_interrupts",
8075 		CTLFLAG_RD, &sc->rx_interrupts,
8076 		0, "Number of RX interrupts");
8077 
8078 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8079 		"tx_interrupts",
8080 		CTLFLAG_RD, &sc->tx_interrupts,
8081 		0, "Number of TX interrupts");
8082 
8083 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8084 		"rx_intr_time",
8085 		CTLFLAG_RD, &sc->rx_intr_time,
8086 		"RX interrupt time");
8087 
8088 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8089 		"tx_intr_time",
8090 		CTLFLAG_RD, &sc->tx_intr_time,
8091 		"TX interrupt time");
8092 #endif
8093 
8094 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8095 		"stat_IfHcInOctets",
8096 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
8097 		"Bytes received");
8098 
8099 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8100 		"stat_IfHCInBadOctets",
8101 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
8102 		"Bad bytes received");
8103 
8104 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8105 		"stat_IfHCOutOctets",
8106 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
8107 		"Bytes sent");
8108 
8109 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8110 		"stat_IfHCOutBadOctets",
8111 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
8112 		"Bad bytes sent");
8113 
8114 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8115 		"stat_IfHCInUcastPkts",
8116 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
8117 		"Unicast packets received");
8118 
8119 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8120 		"stat_IfHCInMulticastPkts",
8121 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
8122 		"Multicast packets received");
8123 
8124 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8125 		"stat_IfHCInBroadcastPkts",
8126 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
8127 		"Broadcast packets received");
8128 
8129 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8130 		"stat_IfHCOutUcastPkts",
8131 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
8132 		"Unicast packets sent");
8133 
8134 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8135 		"stat_IfHCOutMulticastPkts",
8136 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
8137 		"Multicast packets sent");
8138 
8139 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8140 		"stat_IfHCOutBroadcastPkts",
8141 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
8142 		"Broadcast packets sent");
8143 
8144 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8145 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
8146 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
8147 		0, "Internal MAC transmit errors");
8148 
8149 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8150 		"stat_Dot3StatsCarrierSenseErrors",
8151 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
8152 		0, "Carrier sense errors");
8153 
8154 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8155 		"stat_Dot3StatsFCSErrors",
8156 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
8157 		0, "Frame check sequence errors");
8158 
8159 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8160 		"stat_Dot3StatsAlignmentErrors",
8161 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8162 		0, "Alignment errors");
8163 
8164 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8165 		"stat_Dot3StatsSingleCollisionFrames",
8166 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8167 		0, "Single Collision Frames");
8168 
8169 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8170 		"stat_Dot3StatsMultipleCollisionFrames",
8171 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8172 		0, "Multiple Collision Frames");
8173 
8174 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8175 		"stat_Dot3StatsDeferredTransmissions",
8176 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8177 		0, "Deferred Transmissions");
8178 
8179 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8180 		"stat_Dot3StatsExcessiveCollisions",
8181 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8182 		0, "Excessive Collisions");
8183 
8184 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8185 		"stat_Dot3StatsLateCollisions",
8186 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8187 		0, "Late Collisions");
8188 
8189 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8190 		"stat_EtherStatsCollisions",
8191 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8192 		0, "Collisions");
8193 
8194 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8195 		"stat_EtherStatsFragments",
8196 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8197 		0, "Fragments");
8198 
8199 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8200 		"stat_EtherStatsJabbers",
8201 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8202 		0, "Jabbers");
8203 
8204 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8205 		"stat_EtherStatsUndersizePkts",
8206 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8207 		0, "Undersize packets");
8208 
8209 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8210 		"stat_EtherStatsOversizePkts",
8211 		CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts,
8212 		0, "stat_EtherStatsOversizePkts");
8213 
8214 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8215 		"stat_EtherStatsPktsRx64Octets",
8216 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8217 		0, "Bytes received in 64 byte packets");
8218 
8219 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8220 		"stat_EtherStatsPktsRx65Octetsto127Octets",
8221 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8222 		0, "Bytes received in 65 to 127 byte packets");
8223 
8224 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8225 		"stat_EtherStatsPktsRx128Octetsto255Octets",
8226 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8227 		0, "Bytes received in 128 to 255 byte packets");
8228 
8229 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8230 		"stat_EtherStatsPktsRx256Octetsto511Octets",
8231 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8232 		0, "Bytes received in 256 to 511 byte packets");
8233 
8234 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8235 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
8236 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8237 		0, "Bytes received in 512 to 1023 byte packets");
8238 
8239 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8240 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
8241 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8242 		0, "Bytes received in 1024 t0 1522 byte packets");
8243 
8244 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8245 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
8246 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8247 		0, "Bytes received in 1523 to 9022 byte packets");
8248 
8249 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8250 		"stat_EtherStatsPktsTx64Octets",
8251 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8252 		0, "Bytes sent in 64 byte packets");
8253 
8254 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8255 		"stat_EtherStatsPktsTx65Octetsto127Octets",
8256 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8257 		0, "Bytes sent in 65 to 127 byte packets");
8258 
8259 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8260 		"stat_EtherStatsPktsTx128Octetsto255Octets",
8261 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8262 		0, "Bytes sent in 128 to 255 byte packets");
8263 
8264 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8265 		"stat_EtherStatsPktsTx256Octetsto511Octets",
8266 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8267 		0, "Bytes sent in 256 to 511 byte packets");
8268 
8269 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8270 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
8271 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8272 		0, "Bytes sent in 512 to 1023 byte packets");
8273 
8274 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8275 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
8276 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8277 		0, "Bytes sent in 1024 to 1522 byte packets");
8278 
8279 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8280 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
8281 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8282 		0, "Bytes sent in 1523 to 9022 byte packets");
8283 
8284 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8285 		"stat_XonPauseFramesReceived",
8286 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8287 		0, "XON pause frames receved");
8288 
8289 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8290 		"stat_XoffPauseFramesReceived",
8291 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8292 		0, "XOFF pause frames received");
8293 
8294 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8295 		"stat_OutXonSent",
8296 		CTLFLAG_RD, &sc->stat_OutXonSent,
8297 		0, "XON pause frames sent");
8298 
8299 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8300 		"stat_OutXoffSent",
8301 		CTLFLAG_RD, &sc->stat_OutXoffSent,
8302 		0, "XOFF pause frames sent");
8303 
8304 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8305 		"stat_FlowControlDone",
8306 		CTLFLAG_RD, &sc->stat_FlowControlDone,
8307 		0, "Flow control done");
8308 
8309 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8310 		"stat_MacControlFramesReceived",
8311 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8312 		0, "MAC control frames received");
8313 
8314 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8315 		"stat_XoffStateEntered",
8316 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
8317 		0, "XOFF state entered");
8318 
8319 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8320 		"stat_IfInFramesL2FilterDiscards",
8321 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8322 		0, "Received L2 packets discarded");
8323 
8324 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8325 		"stat_IfInRuleCheckerDiscards",
8326 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8327 		0, "Received packets discarded by rule");
8328 
8329 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8330 		"stat_IfInFTQDiscards",
8331 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8332 		0, "Received packet FTQ discards");
8333 
8334 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8335 		"stat_IfInMBUFDiscards",
8336 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8337 		0, "Received packets discarded due to lack of controller buffer memory");
8338 
8339 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8340 		"stat_IfInRuleCheckerP4Hit",
8341 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8342 		0, "Received packets rule checker hits");
8343 
8344 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8345 		"stat_CatchupInRuleCheckerDiscards",
8346 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8347 		0, "Received packets discarded in Catchup path");
8348 
8349 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8350 		"stat_CatchupInFTQDiscards",
8351 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8352 		0, "Received packets discarded in FTQ in Catchup path");
8353 
8354 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8355 		"stat_CatchupInMBUFDiscards",
8356 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8357 		0, "Received packets discarded in controller buffer memory in Catchup path");
8358 
8359 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8360 		"stat_CatchupInRuleCheckerP4Hit",
8361 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8362 		0, "Received packets rule checker hits in Catchup path");
8363 
8364 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8365 		"com_no_buffers",
8366 		CTLFLAG_RD, &sc->com_no_buffers,
8367 		0, "Valid packets received but no RX buffers available");
8368 
8369 #ifdef BCE_DEBUG
8370 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8371 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
8372 		(void *)sc, 0,
8373 		bce_sysctl_driver_state, "I", "Drive state information");
8374 
8375 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8376 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
8377 		(void *)sc, 0,
8378 		bce_sysctl_hw_state, "I", "Hardware state information");
8379 
8380 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8381 		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
8382 		(void *)sc, 0,
8383 		bce_sysctl_bc_state, "I", "Bootcode state information");
8384 
8385 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8386 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
8387 		(void *)sc, 0,
8388 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
8389 
8390 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8391 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8392 		(void *)sc, 0,
8393 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8394 
8395 #ifdef BCE_JUMBO_HDRSPLIT
8396 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8397 		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8398 		(void *)sc, 0,
8399 		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8400 #endif
8401 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8402 		"dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8403 		(void *)sc, 0,
8404 		bce_sysctl_dump_ctx, "I", "Dump context memory");
8405 
8406 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8407 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8408 		(void *)sc, 0,
8409 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
8410 
8411 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8412 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
8413 		(void *)sc, 0,
8414 		bce_sysctl_reg_read, "I", "Register read");
8415 
8416 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8417 		"nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8418 		(void *)sc, 0,
8419 		bce_sysctl_nvram_read, "I", "NVRAM read");
8420 
8421 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8422 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
8423 		(void *)sc, 0,
8424 		bce_sysctl_phy_read, "I", "PHY register read");
8425 
8426 #endif
8427 
8428 	DBEXIT(BCE_VERBOSE_MISC);
8429 }
8430 
8431 
8432 /****************************************************************************/
8433 /* BCE Debug Routines                                                       */
8434 /****************************************************************************/
8435 #ifdef BCE_DEBUG
8436 
8437 /****************************************************************************/
8438 /* Freezes the controller to allow for a cohesive state dump.               */
8439 /*                                                                          */
8440 /* Returns:                                                                 */
8441 /*   Nothing.                                                               */
8442 /****************************************************************************/
8443 static void
8444 bce_freeze_controller(struct bce_softc *sc)
8445 {
8446 	u32 val;
8447 	val = REG_RD(sc, BCE_MISC_COMMAND);
8448 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
8449 	REG_WR(sc, BCE_MISC_COMMAND, val);
8450 }
8451 
8452 
8453 /****************************************************************************/
8454 /* Unfreezes the controller after a freeze operation.  This may not always  */
8455 /* work and the controller will require a reset!                            */
8456 /*                                                                          */
8457 /* Returns:                                                                 */
8458 /*   Nothing.                                                               */
8459 /****************************************************************************/
8460 static void
8461 bce_unfreeze_controller(struct bce_softc *sc)
8462 {
8463 	u32 val;
8464 	val = REG_RD(sc, BCE_MISC_COMMAND);
8465 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
8466 	REG_WR(sc, BCE_MISC_COMMAND, val);
8467 }
8468 
8469 
8470 /****************************************************************************/
8471 /* Prints out Ethernet frame information from an mbuf.                      */
8472 /*                                                                          */
8473 /* Partially decode an Ethernet frame to look at some important headers.    */
8474 /*                                                                          */
8475 /* Returns:                                                                 */
8476 /*   Nothing.                                                               */
8477 /****************************************************************************/
8478 static void
8479 bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
8480 {
8481 	struct ether_vlan_header *eh;
8482 	u16 etype;
8483 	int ehlen;
8484 	struct ip *ip;
8485 	struct tcphdr *th;
8486 	struct udphdr *uh;
8487 	struct arphdr *ah;
8488 
8489 		BCE_PRINTF(
8490 			"-----------------------------"
8491 			" Frame Decode "
8492 			"-----------------------------\n");
8493 
8494 	eh = mtod(m, struct ether_vlan_header *);
8495 
8496 	/* Handle VLAN encapsulation if present. */
8497 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
8498 		etype = ntohs(eh->evl_proto);
8499 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8500 	} else {
8501 		etype = ntohs(eh->evl_encap_proto);
8502 		ehlen = ETHER_HDR_LEN;
8503 	}
8504 
8505 	/* ToDo: Add VLAN output. */
8506 	BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
8507 		eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
8508 
8509 	switch (etype) {
8510 		case ETHERTYPE_IP:
8511 			ip = (struct ip *)(m->m_data + ehlen);
8512 			BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, len = %d bytes, "
8513 				"protocol = 0x%02X, xsum = 0x%04X\n",
8514 				ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
8515 				ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
8516 
8517 			switch (ip->ip_p) {
8518 				case IPPROTO_TCP:
8519 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8520 					BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = %d bytes, "
8521 						"flags = 0x%b, csum = 0x%04X\n",
8522 						ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2),
8523 						th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
8524 						ntohs(th->th_sum));
8525 					break;
8526 				case IPPROTO_UDP:
8527         		    uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8528 					BCE_PRINTF("-udp: dest = %d, src = %d, len = %d bytes, "
8529 						"csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport),
8530 						ntohs(uh->uh_ulen), ntohs(uh->uh_sum));
8531 					break;
8532 				case IPPROTO_ICMP:
8533 					BCE_PRINTF("icmp:\n");
8534 					break;
8535 				default:
8536 					BCE_PRINTF("----: Other IP protocol.\n");
8537 			}
8538 			break;
8539 		case ETHERTYPE_IPV6:
8540 			BCE_PRINTF("ipv6: No decode supported.\n");
8541 			break;
8542 		case ETHERTYPE_ARP:
8543 			BCE_PRINTF("-arp: ");
8544 			ah = (struct arphdr *) (m->m_data + ehlen);
8545 			switch (ntohs(ah->ar_op)) {
8546 				case ARPOP_REVREQUEST:
8547 					printf("reverse ARP request\n");
8548 					break;
8549 				case ARPOP_REVREPLY:
8550 					printf("reverse ARP reply\n");
8551 					break;
8552 				case ARPOP_REQUEST:
8553 					printf("ARP request\n");
8554 					break;
8555 				case ARPOP_REPLY:
8556 					printf("ARP reply\n");
8557 					break;
8558 				default:
8559 					printf("other ARP operation\n");
8560 			}
8561 			break;
8562 		default:
8563 			BCE_PRINTF("----: Other protocol.\n");
8564 	}
8565 
8566 	BCE_PRINTF(
8567 		"-----------------------------"
8568 		"--------------"
8569 		"-----------------------------\n");
8570 }
8571 
8572 
8573 /****************************************************************************/
8574 /* Prints out information about an mbuf.                                    */
8575 /*                                                                          */
8576 /* Returns:                                                                 */
8577 /*   Nothing.                                                               */
8578 /****************************************************************************/
8579 static __attribute__ ((noinline)) void
8580 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
8581 {
8582 	struct mbuf *mp = m;
8583 
8584 	if (m == NULL) {
8585 		BCE_PRINTF("mbuf: null pointer\n");
8586 		return;
8587 	}
8588 
8589 	while (mp) {
8590 		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
8591 			mp, mp->m_len, mp->m_flags,
8592 			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
8593 			mp->m_data);
8594 
8595 		if (mp->m_flags & M_PKTHDR) {
8596 			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
8597 				mp->m_pkthdr.len, mp->m_flags,
8598 				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
8599 				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
8600 				mp->m_pkthdr.csum_flags,
8601 				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
8602 				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
8603 				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
8604 		}
8605 
8606 		if (mp->m_flags & M_EXT) {
8607 			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
8608 				mp->m_ext.ext_buf, mp->m_ext.ext_size);
8609 			switch (mp->m_ext.ext_type) {
8610 				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
8611 				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
8612 				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
8613 				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
8614 				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
8615 				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
8616 				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
8617 				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
8618 				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
8619 				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
8620 				default:             printf("UNKNOWN\n");
8621 			}
8622 		}
8623 
8624 		mp = mp->m_next;
8625 	}
8626 }
8627 
8628 
8629 /****************************************************************************/
8630 /* Prints out the mbufs in the TX mbuf chain.                               */
8631 /*                                                                          */
8632 /* Returns:                                                                 */
8633 /*   Nothing.                                                               */
8634 /****************************************************************************/
8635 static __attribute__ ((noinline)) void
8636 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8637 {
8638 	struct mbuf *m;
8639 
8640 	BCE_PRINTF(
8641 		"----------------------------"
8642 		"  tx mbuf data  "
8643 		"----------------------------\n");
8644 
8645 	for (int i = 0; i < count; i++) {
8646 	 	m = sc->tx_mbuf_ptr[chain_prod];
8647 		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
8648 		bce_dump_mbuf(sc, m);
8649 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
8650 	}
8651 
8652 	BCE_PRINTF(
8653 		"----------------------------"
8654 		"----------------"
8655 		"----------------------------\n");
8656 }
8657 
8658 
8659 /****************************************************************************/
8660 /* Prints out the mbufs in the RX mbuf chain.                               */
8661 /*                                                                          */
8662 /* Returns:                                                                 */
8663 /*   Nothing.                                                               */
8664 /****************************************************************************/
8665 static __attribute__ ((noinline)) void
8666 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8667 {
8668 	struct mbuf *m;
8669 
8670 	BCE_PRINTF(
8671 		"----------------------------"
8672 		"  rx mbuf data  "
8673 		"----------------------------\n");
8674 
8675 	for (int i = 0; i < count; i++) {
8676 	 	m = sc->rx_mbuf_ptr[chain_prod];
8677 		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
8678 		bce_dump_mbuf(sc, m);
8679 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
8680 	}
8681 
8682 
8683 	BCE_PRINTF(
8684 		"----------------------------"
8685 		"----------------"
8686 		"----------------------------\n");
8687 }
8688 
8689 
8690 #ifdef BCE_JUMBO_HDRSPLIT
8691 /****************************************************************************/
8692 /* Prints out the mbufs in the mbuf page chain.                             */
8693 /*                                                                          */
8694 /* Returns:                                                                 */
8695 /*   Nothing.                                                               */
8696 /****************************************************************************/
8697 static __attribute__ ((noinline)) void
8698 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8699 {
8700 	struct mbuf *m;
8701 
8702 	BCE_PRINTF(
8703 		"----------------------------"
8704 		"  pg mbuf data  "
8705 		"----------------------------\n");
8706 
8707 	for (int i = 0; i < count; i++) {
8708 	 	m = sc->pg_mbuf_ptr[chain_prod];
8709 		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
8710 		bce_dump_mbuf(sc, m);
8711 		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
8712 	}
8713 
8714 
8715 	BCE_PRINTF(
8716 		"----------------------------"
8717 		"----------------"
8718 		"----------------------------\n");
8719 }
8720 #endif
8721 
8722 
8723 /****************************************************************************/
8724 /* Prints out a tx_bd structure.                                            */
8725 /*                                                                          */
8726 /* Returns:                                                                 */
8727 /*   Nothing.                                                               */
8728 /****************************************************************************/
8729 static __attribute__ ((noinline)) void
8730 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
8731 {
8732 	if (idx > MAX_TX_BD)
8733 		/* Index out of range. */
8734 		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
8735 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
8736 		/* TX Chain page pointer. */
8737 		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8738 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
8739 	else {
8740 			/* Normal tx_bd entry. */
8741 			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8742 				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
8743 				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
8744 				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
8745 				txbd->tx_bd_flags);
8746 
8747 			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
8748 				printf(" CONN_FAULT");
8749 
8750 			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
8751 				printf(" TCP_UDP_CKSUM");
8752 
8753 			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
8754 				printf(" IP_CKSUM");
8755 
8756 			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
8757 				printf("  VLAN");
8758 
8759 			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
8760 				printf(" COAL_NOW");
8761 
8762 			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
8763 				printf(" DONT_GEN_CRC");
8764 
8765 			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
8766 				printf(" START");
8767 
8768 			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
8769 				printf(" END");
8770 
8771 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
8772 				printf(" LSO");
8773 
8774 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
8775 				printf(" OPTION_WORD");
8776 
8777 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
8778 				printf(" FLAGS");
8779 
8780 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
8781 				printf(" SNAP");
8782 
8783 			printf(" )\n");
8784 		}
8785 
8786 }
8787 
8788 
8789 /****************************************************************************/
8790 /* Prints out a rx_bd structure.                                            */
8791 /*                                                                          */
8792 /* Returns:                                                                 */
8793 /*   Nothing.                                                               */
8794 /****************************************************************************/
8795 static __attribute__ ((noinline)) void
8796 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
8797 {
8798 	if (idx > MAX_RX_BD)
8799 		/* Index out of range. */
8800 		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
8801 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
8802 		/* RX Chain page pointer. */
8803 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8804 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
8805 	else
8806 		/* Normal rx_bd entry. */
8807 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8808 			"flags = 0x%08X\n", idx,
8809 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
8810 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
8811 }
8812 
8813 
8814 #ifdef BCE_JUMBO_HDRSPLIT
8815 /****************************************************************************/
8816 /* Prints out a rx_bd structure in the page chain.                          */
8817 /*                                                                          */
8818 /* Returns:                                                                 */
8819 /*   Nothing.                                                               */
8820 /****************************************************************************/
8821 static __attribute__ ((noinline)) void
8822 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
8823 {
8824 	if (idx > MAX_PG_BD)
8825 		/* Index out of range. */
8826 		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
8827 	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
8828 		/* Page Chain page pointer. */
8829 		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8830 			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
8831 	else
8832 		/* Normal rx_bd entry. */
8833 		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8834 			"flags = 0x%08X\n", idx,
8835 			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
8836 			pgbd->rx_bd_len, pgbd->rx_bd_flags);
8837 }
8838 #endif
8839 
8840 
8841 /****************************************************************************/
8842 /* Prints out a l2_fhdr structure.                                          */
8843 /*                                                                          */
8844 /* Returns:                                                                 */
8845 /*   Nothing.                                                               */
8846 /****************************************************************************/
8847 static __attribute__ ((noinline)) void
8848 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
8849 {
8850 	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
8851 		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
8852 		"tcp_udp_xsum = 0x%04X\n", idx,
8853 		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
8854 		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
8855 		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
8856 }
8857 
8858 
8859 /****************************************************************************/
8860 /* Prints out context memory info.  (Only useful for CID 0 to 16.)          */
8861 /*                                                                          */
8862 /* Returns:                                                                 */
8863 /*   Nothing.                                                               */
8864 /****************************************************************************/
8865 static __attribute__ ((noinline)) void
8866 bce_dump_ctx(struct bce_softc *sc, u16 cid)
8867 {
8868 	if (cid <= TX_CID) {
8869 		BCE_PRINTF(
8870 			"----------------------------"
8871 			"    CTX Data    "
8872 			"----------------------------\n");
8873 
8874 		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
8875 
8876 		if (cid == RX_CID) {
8877 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
8878 				"producer index\n",
8879 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
8880 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host byte sequence\n",
8881 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ));
8882 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
8883 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
8884 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
8885 				"descriptor address\n",
8886  				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
8887 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
8888 				"descriptor address\n",
8889 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
8890 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer index\n",
8891 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX));
8892 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
8893 				"producer index\n",
8894 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX));
8895 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
8896 				"buffer size\n",
8897 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE));
8898 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
8899 				"chain address\n",
8900 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
8901 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
8902 				"chain address\n",
8903 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
8904 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
8905 				"consumer index\n",
8906 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX));
8907 		} else if (cid == TX_CID) {
8908 			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
8909 				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
8910 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
8911 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI));
8912 				BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx cmd\n",
8913 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI));
8914 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) h/w buffer "
8915 					"descriptor address\n",	CTX_RD(sc,
8916 					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
8917 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) h/w buffer "
8918 					"descriptor address\n", CTX_RD(sc,
8919 					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
8920 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) host producer "
8921 					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8922 					BCE_L2CTX_TX_HOST_BIDX_XI));
8923 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) host byte "
8924 					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8925 					BCE_L2CTX_TX_HOST_BSEQ_XI));
8926 			} else {
8927 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
8928 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
8929 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
8930 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE));
8931 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) h/w buffer "
8932 					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8933 					BCE_L2CTX_TX_TBDR_BHADDR_HI));
8934 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) h/w buffer "
8935 					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8936 					BCE_L2CTX_TX_TBDR_BHADDR_LO));
8937 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host producer "
8938 					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8939 					BCE_L2CTX_TX_HOST_BIDX));
8940 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
8941 					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8942 					BCE_L2CTX_TX_HOST_BSEQ));
8943 			}
8944 		} else
8945 			BCE_PRINTF(" Unknown CID\n");
8946 
8947 		BCE_PRINTF(
8948 			"----------------------------"
8949 			"    Raw CTX     "
8950 			"----------------------------\n");
8951 
8952 		for (int i = 0x0; i < 0x300; i += 0x10) {
8953 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
8954 				CTX_RD(sc, GET_CID_ADDR(cid), i),
8955 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
8956 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
8957 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
8958 		}
8959 
8960 
8961 		BCE_PRINTF(
8962 			"----------------------------"
8963 			"----------------"
8964 			"----------------------------\n");
8965 	}
8966 }
8967 
8968 
8969 /****************************************************************************/
8970 /* Prints out the FTQ data.                                                 */
8971 /*                                                                          */
8972 /* Returns:                                                                */
8973 /*   Nothing.                                                               */
8974 /****************************************************************************/
8975 static __attribute__ ((noinline)) void
8976 bce_dump_ftqs(struct bce_softc *sc)
8977 {
8978 	u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
8979 
8980 	BCE_PRINTF(
8981 		"----------------------------"
8982 		"    FTQ Data    "
8983 		"----------------------------\n");
8984 
8985 	BCE_PRINTF("   FTQ    Command    Control   Depth_Now  Max_Depth  Valid_Cnt \n");
8986 	BCE_PRINTF(" ------- ---------- ---------- ---------- ---------- ----------\n");
8987 
8988 	/* Setup the generic statistic counters for the FTQ valid count. */
8989 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
8990 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
8991 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
8992 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
8993 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
8994 
8995 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
8996 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
8997 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
8998 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
8999 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
9000 
9001 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
9002 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
9003 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
9004 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
9005 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
9006 
9007 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
9008 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
9009 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
9010 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
9011 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
9012 
9013 	/* Input queue to the Receive Lookup state machine */
9014 	cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
9015 	ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
9016 	cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
9017 	max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
9018 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9019 	BCE_PRINTF(" RLUP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9020 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9021 
9022 	/* Input queue to the Receive Processor */
9023 	cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
9024 	ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
9025 	cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
9026 	max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
9027 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9028 	BCE_PRINTF(" RXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9029 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9030 
9031 	/* Input queue to the Recevie Processor */
9032 	cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
9033 	ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
9034 	cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
9035 	max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
9036 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9037 	BCE_PRINTF(" RXPC    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9038 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9039 
9040 	/* Input queue to the Receive Virtual to Physical state machine */
9041 	cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
9042 	ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
9043 	cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
9044 	max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
9045 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9046 	BCE_PRINTF(" RV2PP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9047 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9048 
9049 	/* Input queue to the Recevie Virtual to Physical state machine */
9050 	cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
9051 	ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
9052 	cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
9053 	max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
9054 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
9055 	BCE_PRINTF(" RV2PM   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9056 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9057 
9058 	/* Input queue to the Receive Virtual to Physical state machine */
9059 	cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
9060 	ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
9061 	cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
9062 	max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
9063 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
9064 	BCE_PRINTF(" RV2PT   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9065 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9066 
9067 	/* Input queue to the Receive DMA state machine */
9068 	cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
9069 	ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
9070 	cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9071 	max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9072 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
9073 	BCE_PRINTF(" RDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9074 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9075 
9076 	/* Input queue to the Transmit Scheduler state machine */
9077 	cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
9078 	ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
9079 	cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
9080 	max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
9081 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
9082 	BCE_PRINTF(" TSCH    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9083 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9084 
9085 	/* Input queue to the Transmit Buffer Descriptor state machine */
9086 	cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
9087 	ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
9088 	cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
9089 	max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
9090 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
9091 	BCE_PRINTF(" TBDR    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9092 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9093 
9094 	/* Input queue to the Transmit Processor */
9095 	cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
9096 	ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
9097 	cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
9098 	max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
9099 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
9100 	BCE_PRINTF(" TXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9101 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9102 
9103 	/* Input queue to the Transmit DMA state machine */
9104 	cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
9105 	ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
9106 	cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9107 	max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9108 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
9109 	BCE_PRINTF(" TDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9110 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9111 
9112 	/* Input queue to the Transmit Patch-Up Processor */
9113 	cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
9114 	ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
9115 	cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
9116 	max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
9117 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
9118 	BCE_PRINTF(" TPAT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9119 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9120 
9121 	/* Input queue to the Transmit Assembler state machine */
9122 	cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
9123 	ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
9124 	cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
9125 	max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
9126 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
9127 	BCE_PRINTF(" TAS     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9128 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9129 
9130 	/* Input queue to the Completion Processor */
9131 	cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
9132 	ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
9133 	cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
9134 	max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
9135 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
9136 	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9137 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9138 
9139 	/* Input queue to the Completion Processor */
9140 	cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
9141 	ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
9142 	cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
9143 	max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
9144 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
9145 	BCE_PRINTF(" COMT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9146 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9147 
9148 	/* Input queue to the Completion Processor */
9149 	cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
9150 	ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
9151 	cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
9152 	max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
9153 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
9154 	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9155 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9156 
9157 	/* Setup the generic statistic counters for the FTQ valid count. */
9158 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
9159 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
9160 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9161 
9162 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
9163 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9164 		val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24);
9165 		REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9166 
9167 	/* Input queue to the Management Control Processor */
9168 	cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9169 	ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9170 	cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9171 	max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9172 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9173 	BCE_PRINTF(" MCP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9174 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9175 
9176 	/* Input queue to the Command Processor */
9177 	cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9178 	ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9179 	cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9180 	max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9181 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9182 	BCE_PRINTF(" CP      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9183 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9184 
9185 	/* Input queue to the Completion Scheduler state machine */
9186 	cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9187 	ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9188 	cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9189 	max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9190 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9191 	BCE_PRINTF(" CS      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9192 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9193 
9194 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9195 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9196 		/* Input queue to the Receive Virtual to Physical Command Scheduler */
9197 		cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9198 		ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9199 		cur_depth = (ctl & 0xFFC00000) >> 22;
9200 		max_depth = (ctl & 0x003FF000) >> 12;
9201 		valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9202 		BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9203 			cmd, ctl, cur_depth, max_depth, valid_cnt);
9204 	}
9205 
9206 	BCE_PRINTF(
9207 		"----------------------------"
9208 		"----------------"
9209 		"----------------------------\n");
9210 }
9211 
9212 
9213 /****************************************************************************/
9214 /* Prints out the TX chain.                                                 */
9215 /*                                                                          */
9216 /* Returns:                                                                 */
9217 /*   Nothing.                                                               */
9218 /****************************************************************************/
9219 static __attribute__ ((noinline)) void
9220 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9221 {
9222 	struct tx_bd *txbd;
9223 
9224 	/* First some info about the tx_bd chain structure. */
9225 	BCE_PRINTF(
9226 		"----------------------------"
9227 		"  tx_bd  chain  "
9228 		"----------------------------\n");
9229 
9230 	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
9231 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9232 
9233 	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9234 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9235 
9236 	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
9237 
9238 	BCE_PRINTF(
9239 		"----------------------------"
9240 		"   tx_bd data   "
9241 		"----------------------------\n");
9242 
9243 	/* Now print out the tx_bd's themselves. */
9244 	for (int i = 0; i < count; i++) {
9245 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9246 		bce_dump_txbd(sc, tx_prod, txbd);
9247 		tx_prod = NEXT_TX_BD(tx_prod);
9248 	}
9249 
9250 	BCE_PRINTF(
9251 		"----------------------------"
9252 		"----------------"
9253 		"----------------------------\n");
9254 }
9255 
9256 
9257 /****************************************************************************/
9258 /* Prints out the RX chain.                                                 */
9259 /*                                                                          */
9260 /* Returns:                                                                 */
9261 /*   Nothing.                                                               */
9262 /****************************************************************************/
9263 static __attribute__ ((noinline)) void
9264 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
9265 {
9266 	struct rx_bd *rxbd;
9267 
9268 	/* First some info about the rx_bd chain structure. */
9269 	BCE_PRINTF(
9270 		"----------------------------"
9271 		"  rx_bd  chain  "
9272 		"----------------------------\n");
9273 
9274 	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
9275 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9276 
9277 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9278 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9279 
9280 	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
9281 
9282 	BCE_PRINTF(
9283 		"----------------------------"
9284 		"   rx_bd data   "
9285 		"----------------------------\n");
9286 
9287 	/* Now print out the rx_bd's themselves. */
9288 	for (int i = 0; i < count; i++) {
9289 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9290 		bce_dump_rxbd(sc, rx_prod, rxbd);
9291 		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9292 	}
9293 
9294 	BCE_PRINTF(
9295 		"----------------------------"
9296 		"----------------"
9297 		"----------------------------\n");
9298 }
9299 
9300 
9301 #ifdef BCE_JUMBO_HDRSPLIT
9302 /****************************************************************************/
9303 /* Prints out the page chain.                                               */
9304 /*                                                                          */
9305 /* Returns:                                                                 */
9306 /*   Nothing.                                                               */
9307 /****************************************************************************/
9308 static __attribute__ ((noinline)) void
9309 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9310 {
9311 	struct rx_bd *pgbd;
9312 
9313 	/* First some info about the page chain structure. */
9314 	BCE_PRINTF(
9315 		"----------------------------"
9316 		"   page chain   "
9317 		"----------------------------\n");
9318 
9319 	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
9320 		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9321 
9322 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9323 		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9324 
9325 	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
9326 		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9327 
9328 	BCE_PRINTF(
9329 		"----------------------------"
9330 		"   page data    "
9331 		"----------------------------\n");
9332 
9333 	/* Now print out the rx_bd's themselves. */
9334 	for (int i = 0; i < count; i++) {
9335 		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9336 		bce_dump_pgbd(sc, pg_prod, pgbd);
9337 		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9338 	}
9339 
9340 	BCE_PRINTF(
9341 		"----------------------------"
9342 		"----------------"
9343 		"----------------------------\n");
9344 }
9345 #endif
9346 
9347 
9348 /****************************************************************************/
9349 /* Prints out the status block from host memory.                            */
9350 /*                                                                          */
9351 /* Returns:                                                                 */
9352 /*   Nothing.                                                               */
9353 /****************************************************************************/
9354 static __attribute__ ((noinline)) void
9355 bce_dump_status_block(struct bce_softc *sc)
9356 {
9357 	struct status_block *sblk;
9358 
9359 	sblk = sc->status_block;
9360 
9361    	BCE_PRINTF(
9362 		"----------------------------"
9363 		"  Status Block  "
9364 		"----------------------------\n");
9365 
9366 	BCE_PRINTF("    0x%08X - attn_bits\n",
9367 		sblk->status_attn_bits);
9368 
9369 	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
9370 		sblk->status_attn_bits_ack);
9371 
9372 	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
9373 		sblk->status_rx_quick_consumer_index0,
9374 		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
9375 
9376 	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
9377 		sblk->status_tx_quick_consumer_index0,
9378 		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
9379 
9380 	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
9381 
9382 	/* Theses indices are not used for normal L2 drivers. */
9383 	if (sblk->status_rx_quick_consumer_index1)
9384 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
9385 			sblk->status_rx_quick_consumer_index1,
9386 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
9387 
9388 	if (sblk->status_tx_quick_consumer_index1)
9389 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
9390 			sblk->status_tx_quick_consumer_index1,
9391 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
9392 
9393 	if (sblk->status_rx_quick_consumer_index2)
9394 		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
9395 			sblk->status_rx_quick_consumer_index2,
9396 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
9397 
9398 	if (sblk->status_tx_quick_consumer_index2)
9399 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
9400 			sblk->status_tx_quick_consumer_index2,
9401 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
9402 
9403 	if (sblk->status_rx_quick_consumer_index3)
9404 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
9405 			sblk->status_rx_quick_consumer_index3,
9406 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
9407 
9408 	if (sblk->status_tx_quick_consumer_index3)
9409 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
9410 			sblk->status_tx_quick_consumer_index3,
9411 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
9412 
9413 	if (sblk->status_rx_quick_consumer_index4 ||
9414 		sblk->status_rx_quick_consumer_index5)
9415 		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
9416 			sblk->status_rx_quick_consumer_index4,
9417 			sblk->status_rx_quick_consumer_index5);
9418 
9419 	if (sblk->status_rx_quick_consumer_index6 ||
9420 		sblk->status_rx_quick_consumer_index7)
9421 		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
9422 			sblk->status_rx_quick_consumer_index6,
9423 			sblk->status_rx_quick_consumer_index7);
9424 
9425 	if (sblk->status_rx_quick_consumer_index8 ||
9426 		sblk->status_rx_quick_consumer_index9)
9427 		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
9428 			sblk->status_rx_quick_consumer_index8,
9429 			sblk->status_rx_quick_consumer_index9);
9430 
9431 	if (sblk->status_rx_quick_consumer_index10 ||
9432 		sblk->status_rx_quick_consumer_index11)
9433 		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
9434 			sblk->status_rx_quick_consumer_index10,
9435 			sblk->status_rx_quick_consumer_index11);
9436 
9437 	if (sblk->status_rx_quick_consumer_index12 ||
9438 		sblk->status_rx_quick_consumer_index13)
9439 		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
9440 			sblk->status_rx_quick_consumer_index12,
9441 			sblk->status_rx_quick_consumer_index13);
9442 
9443 	if (sblk->status_rx_quick_consumer_index14 ||
9444 		sblk->status_rx_quick_consumer_index15)
9445 		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
9446 			sblk->status_rx_quick_consumer_index14,
9447 			sblk->status_rx_quick_consumer_index15);
9448 
9449 	if (sblk->status_completion_producer_index ||
9450 		sblk->status_cmd_consumer_index)
9451 		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
9452 			sblk->status_completion_producer_index,
9453 			sblk->status_cmd_consumer_index);
9454 
9455 	BCE_PRINTF(
9456 		"----------------------------"
9457 		"----------------"
9458 		"----------------------------\n");
9459 }
9460 
9461 
9462 /****************************************************************************/
9463 /* Prints out the statistics block from host memory.                        */
9464 /*                                                                          */
9465 /* Returns:                                                                 */
9466 /*   Nothing.                                                               */
9467 /****************************************************************************/
9468 static __attribute__ ((noinline)) void
9469 bce_dump_stats_block(struct bce_softc *sc)
9470 {
9471 	struct statistics_block *sblk;
9472 
9473 	sblk = sc->stats_block;
9474 
9475 	BCE_PRINTF(
9476 		"---------------"
9477 		" Stats Block  (All Stats Not Shown Are 0) "
9478 		"---------------\n");
9479 
9480 	if (sblk->stat_IfHCInOctets_hi
9481 		|| sblk->stat_IfHCInOctets_lo)
9482 		BCE_PRINTF("0x%08X:%08X : "
9483 			"IfHcInOctets\n",
9484 			sblk->stat_IfHCInOctets_hi,
9485 			sblk->stat_IfHCInOctets_lo);
9486 
9487 	if (sblk->stat_IfHCInBadOctets_hi
9488 		|| sblk->stat_IfHCInBadOctets_lo)
9489 		BCE_PRINTF("0x%08X:%08X : "
9490 			"IfHcInBadOctets\n",
9491 			sblk->stat_IfHCInBadOctets_hi,
9492 			sblk->stat_IfHCInBadOctets_lo);
9493 
9494 	if (sblk->stat_IfHCOutOctets_hi
9495 		|| sblk->stat_IfHCOutOctets_lo)
9496 		BCE_PRINTF("0x%08X:%08X : "
9497 			"IfHcOutOctets\n",
9498 			sblk->stat_IfHCOutOctets_hi,
9499 			sblk->stat_IfHCOutOctets_lo);
9500 
9501 	if (sblk->stat_IfHCOutBadOctets_hi
9502 		|| sblk->stat_IfHCOutBadOctets_lo)
9503 		BCE_PRINTF("0x%08X:%08X : "
9504 			"IfHcOutBadOctets\n",
9505 			sblk->stat_IfHCOutBadOctets_hi,
9506 			sblk->stat_IfHCOutBadOctets_lo);
9507 
9508 	if (sblk->stat_IfHCInUcastPkts_hi
9509 		|| sblk->stat_IfHCInUcastPkts_lo)
9510 		BCE_PRINTF("0x%08X:%08X : "
9511 			"IfHcInUcastPkts\n",
9512 			sblk->stat_IfHCInUcastPkts_hi,
9513 			sblk->stat_IfHCInUcastPkts_lo);
9514 
9515 	if (sblk->stat_IfHCInBroadcastPkts_hi
9516 		|| sblk->stat_IfHCInBroadcastPkts_lo)
9517 		BCE_PRINTF("0x%08X:%08X : "
9518 			"IfHcInBroadcastPkts\n",
9519 			sblk->stat_IfHCInBroadcastPkts_hi,
9520 			sblk->stat_IfHCInBroadcastPkts_lo);
9521 
9522 	if (sblk->stat_IfHCInMulticastPkts_hi
9523 		|| sblk->stat_IfHCInMulticastPkts_lo)
9524 		BCE_PRINTF("0x%08X:%08X : "
9525 			"IfHcInMulticastPkts\n",
9526 			sblk->stat_IfHCInMulticastPkts_hi,
9527 			sblk->stat_IfHCInMulticastPkts_lo);
9528 
9529 	if (sblk->stat_IfHCOutUcastPkts_hi
9530 		|| sblk->stat_IfHCOutUcastPkts_lo)
9531 		BCE_PRINTF("0x%08X:%08X : "
9532 			"IfHcOutUcastPkts\n",
9533 			sblk->stat_IfHCOutUcastPkts_hi,
9534 			sblk->stat_IfHCOutUcastPkts_lo);
9535 
9536 	if (sblk->stat_IfHCOutBroadcastPkts_hi
9537 		|| sblk->stat_IfHCOutBroadcastPkts_lo)
9538 		BCE_PRINTF("0x%08X:%08X : "
9539 			"IfHcOutBroadcastPkts\n",
9540 			sblk->stat_IfHCOutBroadcastPkts_hi,
9541 			sblk->stat_IfHCOutBroadcastPkts_lo);
9542 
9543 	if (sblk->stat_IfHCOutMulticastPkts_hi
9544 		|| sblk->stat_IfHCOutMulticastPkts_lo)
9545 		BCE_PRINTF("0x%08X:%08X : "
9546 			"IfHcOutMulticastPkts\n",
9547 			sblk->stat_IfHCOutMulticastPkts_hi,
9548 			sblk->stat_IfHCOutMulticastPkts_lo);
9549 
9550 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
9551 		BCE_PRINTF("         0x%08X : "
9552 			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
9553 			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
9554 
9555 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
9556 		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
9557 			sblk->stat_Dot3StatsCarrierSenseErrors);
9558 
9559 	if (sblk->stat_Dot3StatsFCSErrors)
9560 		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
9561 			sblk->stat_Dot3StatsFCSErrors);
9562 
9563 	if (sblk->stat_Dot3StatsAlignmentErrors)
9564 		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
9565 			sblk->stat_Dot3StatsAlignmentErrors);
9566 
9567 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
9568 		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
9569 			sblk->stat_Dot3StatsSingleCollisionFrames);
9570 
9571 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
9572 		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
9573 			sblk->stat_Dot3StatsMultipleCollisionFrames);
9574 
9575 	if (sblk->stat_Dot3StatsDeferredTransmissions)
9576 		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
9577 			sblk->stat_Dot3StatsDeferredTransmissions);
9578 
9579 	if (sblk->stat_Dot3StatsExcessiveCollisions)
9580 		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
9581 			sblk->stat_Dot3StatsExcessiveCollisions);
9582 
9583 	if (sblk->stat_Dot3StatsLateCollisions)
9584 		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
9585 			sblk->stat_Dot3StatsLateCollisions);
9586 
9587 	if (sblk->stat_EtherStatsCollisions)
9588 		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
9589 			sblk->stat_EtherStatsCollisions);
9590 
9591 	if (sblk->stat_EtherStatsFragments)
9592 		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
9593 			sblk->stat_EtherStatsFragments);
9594 
9595 	if (sblk->stat_EtherStatsJabbers)
9596 		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
9597 			sblk->stat_EtherStatsJabbers);
9598 
9599 	if (sblk->stat_EtherStatsUndersizePkts)
9600 		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
9601 			sblk->stat_EtherStatsUndersizePkts);
9602 
9603 	if (sblk->stat_EtherStatsOversizePkts)
9604 		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
9605 			sblk->stat_EtherStatsOversizePkts);
9606 
9607 	if (sblk->stat_EtherStatsPktsRx64Octets)
9608 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
9609 			sblk->stat_EtherStatsPktsRx64Octets);
9610 
9611 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
9612 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
9613 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
9614 
9615 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
9616 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
9617 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
9618 
9619 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
9620 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
9621 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
9622 
9623 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
9624 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
9625 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
9626 
9627 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
9628 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
9629 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
9630 
9631 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
9632 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
9633 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
9634 
9635 	if (sblk->stat_EtherStatsPktsTx64Octets)
9636 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
9637 			sblk->stat_EtherStatsPktsTx64Octets);
9638 
9639 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
9640 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
9641 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
9642 
9643 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
9644 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
9645 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
9646 
9647 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
9648 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
9649 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
9650 
9651 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
9652 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
9653 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
9654 
9655 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
9656 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
9657 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
9658 
9659 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
9660 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
9661 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
9662 
9663 	if (sblk->stat_XonPauseFramesReceived)
9664 		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
9665 			sblk->stat_XonPauseFramesReceived);
9666 
9667 	if (sblk->stat_XoffPauseFramesReceived)
9668 	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
9669 			sblk->stat_XoffPauseFramesReceived);
9670 
9671 	if (sblk->stat_OutXonSent)
9672 		BCE_PRINTF("         0x%08X : OutXonSent\n",
9673 			sblk->stat_OutXonSent);
9674 
9675 	if (sblk->stat_OutXoffSent)
9676 		BCE_PRINTF("         0x%08X : OutXoffSent\n",
9677 			sblk->stat_OutXoffSent);
9678 
9679 	if (sblk->stat_FlowControlDone)
9680 		BCE_PRINTF("         0x%08X : FlowControlDone\n",
9681 			sblk->stat_FlowControlDone);
9682 
9683 	if (sblk->stat_MacControlFramesReceived)
9684 		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
9685 			sblk->stat_MacControlFramesReceived);
9686 
9687 	if (sblk->stat_XoffStateEntered)
9688 		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
9689 			sblk->stat_XoffStateEntered);
9690 
9691 	if (sblk->stat_IfInFramesL2FilterDiscards)
9692 		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
9693 			sblk->stat_IfInFramesL2FilterDiscards);
9694 
9695 	if (sblk->stat_IfInRuleCheckerDiscards)
9696 		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
9697 			sblk->stat_IfInRuleCheckerDiscards);
9698 
9699 	if (sblk->stat_IfInFTQDiscards)
9700 		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
9701 			sblk->stat_IfInFTQDiscards);
9702 
9703 	if (sblk->stat_IfInMBUFDiscards)
9704 		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
9705 			sblk->stat_IfInMBUFDiscards);
9706 
9707 	if (sblk->stat_IfInRuleCheckerP4Hit)
9708 		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
9709 			sblk->stat_IfInRuleCheckerP4Hit);
9710 
9711 	if (sblk->stat_CatchupInRuleCheckerDiscards)
9712 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
9713 			sblk->stat_CatchupInRuleCheckerDiscards);
9714 
9715 	if (sblk->stat_CatchupInFTQDiscards)
9716 		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
9717 			sblk->stat_CatchupInFTQDiscards);
9718 
9719 	if (sblk->stat_CatchupInMBUFDiscards)
9720 		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
9721 			sblk->stat_CatchupInMBUFDiscards);
9722 
9723 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
9724 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
9725 			sblk->stat_CatchupInRuleCheckerP4Hit);
9726 
9727 	BCE_PRINTF(
9728 		"----------------------------"
9729 		"----------------"
9730 		"----------------------------\n");
9731 }
9732 
9733 
9734 /****************************************************************************/
9735 /* Prints out a summary of the driver state.                                */
9736 /*                                                                          */
9737 /* Returns:                                                                 */
9738 /*   Nothing.                                                               */
9739 /****************************************************************************/
9740 static __attribute__ ((noinline)) void
9741 bce_dump_driver_state(struct bce_softc *sc)
9742 {
9743 	u32 val_hi, val_lo;
9744 
9745 	BCE_PRINTF(
9746 		"-----------------------------"
9747 		" Driver State "
9748 		"-----------------------------\n");
9749 
9750 	val_hi = BCE_ADDR_HI(sc);
9751 	val_lo = BCE_ADDR_LO(sc);
9752 	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
9753 		val_hi, val_lo);
9754 
9755 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
9756 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
9757 	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
9758 		val_hi, val_lo);
9759 
9760 	val_hi = BCE_ADDR_HI(sc->status_block);
9761 	val_lo = BCE_ADDR_LO(sc->status_block);
9762 	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
9763 		val_hi, val_lo);
9764 
9765 	val_hi = BCE_ADDR_HI(sc->stats_block);
9766 	val_lo = BCE_ADDR_LO(sc->stats_block);
9767 	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
9768 		val_hi, val_lo);
9769 
9770 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
9771 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
9772 	BCE_PRINTF(
9773 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
9774 		val_hi, val_lo);
9775 
9776 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
9777 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
9778 	BCE_PRINTF(
9779 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
9780 		val_hi, val_lo);
9781 
9782 #ifdef BCE_JUMBO_HDRSPLIT
9783 	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
9784 	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
9785 	BCE_PRINTF(
9786 		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
9787 		val_hi, val_lo);
9788 #endif
9789 
9790 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
9791 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
9792 	BCE_PRINTF(
9793 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
9794 		val_hi, val_lo);
9795 
9796 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
9797 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
9798 	BCE_PRINTF(
9799 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
9800 		val_hi, val_lo);
9801 
9802 #ifdef BCE_JUMBO_HDRSPLIT
9803 	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
9804 	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
9805 	BCE_PRINTF(
9806 		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
9807 		val_hi, val_lo);
9808 #endif
9809 
9810 	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
9811 		sc->interrupts_generated);
9812 
9813 	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
9814 		sc->rx_interrupts);
9815 
9816 	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
9817 		sc->tx_interrupts);
9818 
9819 	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
9820 		sc->last_status_idx);
9821 
9822 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
9823 		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
9824 
9825 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
9826 		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
9827 
9828 	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
9829 		sc->tx_prod_bseq);
9830 
9831 	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
9832 		sc->debug_tx_mbuf_alloc);
9833 
9834 	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
9835 		sc->used_tx_bd);
9836 
9837 	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
9838 		sc->tx_hi_watermark, sc->max_tx_bd);
9839 
9840 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
9841 		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
9842 
9843 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
9844 		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
9845 
9846 	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
9847 		sc->rx_prod_bseq);
9848 
9849 	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
9850 		sc->debug_rx_mbuf_alloc);
9851 
9852 	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
9853 		sc->free_rx_bd);
9854 
9855 #ifdef BCE_JUMBO_HDRSPLIT
9856 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
9857 		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
9858 
9859 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
9860 		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
9861 
9862 	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
9863 		sc->debug_pg_mbuf_alloc);
9864 
9865 	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
9866 		sc->free_pg_bd);
9867 
9868 	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
9869 		sc->pg_low_watermark, sc->max_pg_bd);
9870 #endif
9871 
9872 	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed_count) "
9873 		"mbuf alloc failures\n",
9874 		sc->mbuf_alloc_failed_count);
9875 
9876 	BCE_PRINTF("         0x%08X - (sc->bce_flags) bce mac flags\n",
9877 		sc->bce_flags);
9878 
9879 	BCE_PRINTF("         0x%08X - (sc->bce_phy_flags) bce phy flags\n",
9880 		sc->bce_phy_flags);
9881 
9882 	BCE_PRINTF(
9883 		"----------------------------"
9884 		"----------------"
9885 		"----------------------------\n");
9886 }
9887 
9888 
9889 /****************************************************************************/
9890 /* Prints out the hardware state through a summary of important register,   */
9891 /* followed by a complete register dump.                                    */
9892 /*                                                                          */
9893 /* Returns:                                                                 */
9894 /*   Nothing.                                                               */
9895 /****************************************************************************/
9896 static __attribute__ ((noinline)) void
9897 bce_dump_hw_state(struct bce_softc *sc)
9898 {
9899 	u32 val;
9900 
9901 	BCE_PRINTF(
9902 		"----------------------------"
9903 		" Hardware State "
9904 		"----------------------------\n");
9905 
9906 	BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
9907 
9908 	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
9909 	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
9910 		val, BCE_MISC_ENABLE_STATUS_BITS);
9911 
9912 	val = REG_RD(sc, BCE_DMA_STATUS);
9913 	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
9914 
9915 	val = REG_RD(sc, BCE_CTX_STATUS);
9916 	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
9917 
9918 	val = REG_RD(sc, BCE_EMAC_STATUS);
9919 	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
9920 
9921 	val = REG_RD(sc, BCE_RPM_STATUS);
9922 	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
9923 
9924 	val = REG_RD(sc, 0x2004);
9925 	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
9926 
9927 	val = REG_RD(sc, BCE_RV2P_STATUS);
9928 	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
9929 
9930 	val = REG_RD(sc, 0x2c04);
9931 	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
9932 
9933 	val = REG_RD(sc, BCE_TBDR_STATUS);
9934 	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
9935 
9936 	val = REG_RD(sc, BCE_TDMA_STATUS);
9937 	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
9938 
9939 	val = REG_RD(sc, BCE_HC_STATUS);
9940 	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
9941 
9942 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
9943 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
9944 
9945 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
9946 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
9947 
9948 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
9949 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
9950 
9951 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
9952 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
9953 
9954 	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
9955 	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
9956 
9957 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
9958 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
9959 
9960 	BCE_PRINTF(
9961 		"----------------------------"
9962 		"----------------"
9963 		"----------------------------\n");
9964 
9965 	BCE_PRINTF(
9966 		"----------------------------"
9967 		" Register  Dump "
9968 		"----------------------------\n");
9969 
9970 	for (int i = 0x400; i < 0x8000; i += 0x10) {
9971 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9972 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
9973 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
9974 	}
9975 
9976 	BCE_PRINTF(
9977 		"----------------------------"
9978 		"----------------"
9979 		"----------------------------\n");
9980 }
9981 
9982 
9983 /****************************************************************************/
9984 /* Prints out the mailbox queue registers.                                  */
9985 /*                                                                          */
9986 /* Returns:                                                                 */
9987 /*   Nothing.                                                               */
9988 /****************************************************************************/
9989 static __attribute__ ((noinline)) void
9990 bce_dump_mq_regs(struct bce_softc *sc)
9991 {
9992 	BCE_PRINTF(
9993 		"----------------------------"
9994 		"    MQ Regs     "
9995 		"----------------------------\n");
9996 
9997 	BCE_PRINTF(
9998 		"----------------------------"
9999 		"----------------"
10000 		"----------------------------\n");
10001 
10002 	for (int i = 0x3c00; i < 0x4000; i += 0x10) {
10003 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10004 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10005 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10006 	}
10007 
10008 	BCE_PRINTF(
10009 		"----------------------------"
10010 		"----------------"
10011 		"----------------------------\n");
10012 }
10013 
10014 
10015 /****************************************************************************/
10016 /* Prints out the bootcode state.                                           */
10017 /*                                                                          */
10018 /* Returns:                                                                 */
10019 /*   Nothing.                                                               */
10020 /****************************************************************************/
10021 static __attribute__ ((noinline)) void
10022 bce_dump_bc_state(struct bce_softc *sc)
10023 {
10024 	u32 val;
10025 
10026 	BCE_PRINTF(
10027 		"----------------------------"
10028 		" Bootcode State "
10029 		"----------------------------\n");
10030 
10031 	BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10032 
10033 	val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE);
10034 	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
10035 		val, BCE_BC_RESET_TYPE);
10036 
10037 	val = bce_shmem_rd(sc, BCE_BC_STATE);
10038 	BCE_PRINTF("0x%08X - (0x%06X) state\n",
10039 		val, BCE_BC_STATE);
10040 
10041 	val = bce_shmem_rd(sc, BCE_BC_CONDITION);
10042 	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
10043 		val, BCE_BC_CONDITION);
10044 
10045 	val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD);
10046 	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
10047 		val, BCE_BC_STATE_DEBUG_CMD);
10048 
10049 	BCE_PRINTF(
10050 		"----------------------------"
10051 		"----------------"
10052 		"----------------------------\n");
10053 }
10054 
10055 
10056 /****************************************************************************/
10057 /* Prints out the TXP processor state.                                      */
10058 /*                                                                          */
10059 /* Returns:                                                                 */
10060 /*   Nothing.                                                               */
10061 /****************************************************************************/
10062 static __attribute__ ((noinline)) void
10063 bce_dump_txp_state(struct bce_softc *sc, int regs)
10064 {
10065 	u32 val;
10066 	u32 fw_version[3];
10067 
10068 	BCE_PRINTF(
10069 		"----------------------------"
10070 		"   TXP  State   "
10071 		"----------------------------\n");
10072 
10073 	for (int i = 0; i < 3; i++)
10074 		fw_version[i] = htonl(REG_RD_IND(sc,
10075 			(BCE_TXP_SCRATCH + 0x10 + i * 4)));
10076 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10077 
10078 	val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
10079 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE);
10080 
10081 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10082 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
10083 
10084 	val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
10085 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val,
10086 		BCE_TXP_CPU_EVENT_MASK);
10087 
10088 	if (regs) {
10089 		BCE_PRINTF(
10090 			"----------------------------"
10091 			" Register  Dump "
10092 			"----------------------------\n");
10093 
10094 		for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
10095 			/* Skip the big blank spaces */
10096 			if (i < 0x454000 && i > 0x5ffff)
10097 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10098 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10099 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10100 		}
10101 	}
10102 
10103 	BCE_PRINTF(
10104 		"----------------------------"
10105 		"----------------"
10106 		"----------------------------\n");
10107 }
10108 
10109 
10110 /****************************************************************************/
10111 /* Prints out the RXP processor state.                                      */
10112 /*                                                                          */
10113 /* Returns:                                                                 */
10114 /*   Nothing.                                                               */
10115 /****************************************************************************/
10116 static __attribute__ ((noinline)) void
10117 bce_dump_rxp_state(struct bce_softc *sc, int regs)
10118 {
10119 	u32 val;
10120 	u32 fw_version[3];
10121 
10122 	BCE_PRINTF(
10123 		"----------------------------"
10124 		"   RXP  State   "
10125 		"----------------------------\n");
10126 
10127 	for (int i = 0; i < 3; i++)
10128 		fw_version[i] = htonl(REG_RD_IND(sc,
10129 			(BCE_RXP_SCRATCH + 0x10 + i * 4)));
10130 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10131 
10132 	val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
10133 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE);
10134 
10135 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10136 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
10137 
10138 	val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
10139 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val,
10140 		BCE_RXP_CPU_EVENT_MASK);
10141 
10142 	if (regs) {
10143 		BCE_PRINTF(
10144 			"----------------------------"
10145 			" Register  Dump "
10146 			"----------------------------\n");
10147 
10148 		for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
10149 			/* Skip the big blank sapces */
10150 			if (i < 0xc5400 && i > 0xdffff)
10151 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10152 	 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10153 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10154 		}
10155 	}
10156 
10157 	BCE_PRINTF(
10158 		"----------------------------"
10159 		"----------------"
10160 		"----------------------------\n");
10161 }
10162 
10163 
10164 /****************************************************************************/
10165 /* Prints out the TPAT processor state.                                     */
10166 /*                                                                          */
10167 /* Returns:                                                                 */
10168 /*   Nothing.                                                               */
10169 /****************************************************************************/
10170 static __attribute__ ((noinline)) void
10171 bce_dump_tpat_state(struct bce_softc *sc, int regs)
10172 {
10173 	u32 val;
10174 	u32 fw_version[3];
10175 
10176 	BCE_PRINTF(
10177 		"----------------------------"
10178 		"   TPAT State   "
10179 		"----------------------------\n");
10180 
10181 	for (int i = 0; i < 3; i++)
10182 		fw_version[i] = htonl(REG_RD_IND(sc,
10183 			(BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10184 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10185 
10186 	val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10187 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE);
10188 
10189 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10190 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
10191 
10192 	val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10193 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val,
10194 		BCE_TPAT_CPU_EVENT_MASK);
10195 
10196 	if (regs) {
10197 		BCE_PRINTF(
10198 			"----------------------------"
10199 			" Register  Dump "
10200 			"----------------------------\n");
10201 
10202 		for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10203 			/* Skip the big blank spaces */
10204 			if (i < 0x854000 && i > 0x9ffff)
10205 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10206 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10207 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10208 		}
10209 	}
10210 
10211 	BCE_PRINTF(
10212 		"----------------------------"
10213 		"----------------"
10214 		"----------------------------\n");
10215 }
10216 
10217 
10218 /****************************************************************************/
10219 /* Prints out the Command Procesor (CP) state.                              */
10220 /*                                                                          */
10221 /* Returns:                                                                 */
10222 /*   Nothing.                                                               */
10223 /****************************************************************************/
10224 static __attribute__ ((noinline)) void
10225 bce_dump_cp_state(struct bce_softc *sc, int regs)
10226 {
10227 	u32 val;
10228 	u32 fw_version[3];
10229 
10230 	BCE_PRINTF(
10231 		"----------------------------"
10232 		"    CP State    "
10233 		"----------------------------\n");
10234 
10235 	for (int i = 0; i < 3; i++)
10236 		fw_version[i] = htonl(REG_RD_IND(sc,
10237 			(BCE_CP_SCRATCH + 0x10 + i * 4)));
10238 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10239 
10240 	val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10241 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE);
10242 
10243 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10244 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
10245 
10246 	val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10247 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10248 		BCE_CP_CPU_EVENT_MASK);
10249 
10250 	if (regs) {
10251 		BCE_PRINTF(
10252 			"----------------------------"
10253 			" Register  Dump "
10254 			"----------------------------\n");
10255 
10256 		for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10257 			/* Skip the big blank spaces */
10258 			if (i < 0x185400 && i > 0x19ffff)
10259 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10260 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10261 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10262 		}
10263 	}
10264 
10265 	BCE_PRINTF(
10266 		"----------------------------"
10267 		"----------------"
10268 		"----------------------------\n");
10269 }
10270 
10271 
10272 /****************************************************************************/
10273 /* Prints out the Completion Procesor (COM) state.                          */
10274 /*                                                                          */
10275 /* Returns:                                                                 */
10276 /*   Nothing.                                                               */
10277 /****************************************************************************/
10278 static __attribute__ ((noinline)) void
10279 bce_dump_com_state(struct bce_softc *sc, int regs)
10280 {
10281 	u32 val;
10282 	u32 fw_version[3];
10283 
10284 	BCE_PRINTF(
10285 		"----------------------------"
10286 		"   COM State    "
10287 		"----------------------------\n");
10288 
10289 	for (int i = 0; i < 3; i++)
10290 		fw_version[i] = htonl(REG_RD_IND(sc,
10291 			(BCE_COM_SCRATCH + 0x10 + i * 4)));
10292 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10293 
10294 	val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10295 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE);
10296 
10297 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10298 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
10299 
10300 	val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10301 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10302 		BCE_COM_CPU_EVENT_MASK);
10303 
10304 	if (regs) {
10305 		BCE_PRINTF(
10306 			"----------------------------"
10307 			" Register  Dump "
10308 			"----------------------------\n");
10309 
10310 		for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10311 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10312 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10313 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10314 		}
10315 	}
10316 
10317 	BCE_PRINTF(
10318 		"----------------------------"
10319 		"----------------"
10320 		"----------------------------\n");
10321 }
10322 
10323 
10324 /****************************************************************************/
10325 /* Prints out the driver state and then enters the debugger.                */
10326 /*                                                                          */
10327 /* Returns:                                                                 */
10328 /*   Nothing.                                                               */
10329 /****************************************************************************/
10330 static void
10331 bce_breakpoint(struct bce_softc *sc)
10332 {
10333 
10334 	/*
10335 	 * Unreachable code to silence compiler warnings
10336 	 * about unused functions.
10337 	 */
10338 	if (0) {
10339 		bce_freeze_controller(sc);
10340 		bce_unfreeze_controller(sc);
10341 		bce_dump_enet(sc, NULL);
10342    		bce_dump_txbd(sc, 0, NULL);
10343 		bce_dump_rxbd(sc, 0, NULL);
10344 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10345 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10346 		bce_dump_l2fhdr(sc, 0, NULL);
10347 		bce_dump_ctx(sc, RX_CID);
10348 		bce_dump_ftqs(sc);
10349 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10350 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
10351 		bce_dump_status_block(sc);
10352 		bce_dump_stats_block(sc);
10353 		bce_dump_driver_state(sc);
10354 		bce_dump_hw_state(sc);
10355 		bce_dump_bc_state(sc);
10356 		bce_dump_txp_state(sc, 0);
10357 		bce_dump_rxp_state(sc, 0);
10358 		bce_dump_tpat_state(sc, 0);
10359 		bce_dump_cp_state(sc, 0);
10360 		bce_dump_com_state(sc, 0);
10361 #ifdef BCE_JUMBO_HDRSPLIT
10362 		bce_dump_pgbd(sc, 0, NULL);
10363 		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10364 		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10365 #endif
10366 	}
10367 
10368 	bce_dump_status_block(sc);
10369 	bce_dump_driver_state(sc);
10370 
10371 	/* Call the debugger. */
10372 	breakpoint();
10373 
10374 	return;
10375 }
10376 #endif
10377 
10378