xref: /freebsd/sys/dev/bce/if_bce.c (revision 0efd6615cd5f39b67cec82a7034e655f3b5801e3)
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  *
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
57 
58 
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63 	u32 bce_debug = BCE_WARN;
64 
65 	/*          0 = Never              */
66 	/*          1 = 1 in 2,147,483,648 */
67 	/*        256 = 1 in     8,388,608 */
68 	/*       2048 = 1 in     1,048,576 */
69 	/*      65536 = 1 in        32,768 */
70 	/*    1048576 = 1 in         2,048 */
71 	/*  268435456 =	1 in             8 */
72 	/*  536870912 = 1 in             4 */
73 	/* 1073741824 = 1 in             2 */
74 
75 	/* Controls how often the l2_fhdr frame error check will fail. */
76 	int bce_debug_l2fhdr_status_check = 0;
77 
78 	/* Controls how often the unexpected attention check will fail. */
79 	int bce_debug_unexpected_attention = 0;
80 
81 	/* Controls how often to simulate an mbuf allocation failure. */
82 	int bce_debug_mbuf_allocation_failure = 0;
83 
84 	/* Controls how often to simulate a DMA mapping failure. */
85 	int bce_debug_dma_map_addr_failure = 0;
86 
87 	/* Controls how often to simulate a bootcode failure. */
88 	int bce_debug_bootcode_running_failure = 0;
89 #endif
90 
91 
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX		64
98 
99 static struct bce_type bce_devs[] = {
100 	/* BCM5706C Controllers and OEM boards. */
101 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102 		"HP NC370T Multifunction Gigabit Server Adapter" },
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104 		"HP NC370i Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
107 
108 	/* BCM5706S controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110 		"HP NC370F Multifunction Gigabit Server Adapter" },
111 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 
114 	/* BCM5708C controllers and OEM boards. */
115 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
117 
118 	/* BCM5708S controllers and OEM boards. */
119 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
120 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
121 	{ 0, 0, 0, 0, NULL }
122 };
123 
124 
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130 	/* Slow EEPROM */
131 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 	 "EEPROM - slow"},
135 	/* Expansion entry 0001 */
136 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 	 "Entry 0001"},
140 	/* Saifun SA25F010 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145 	 "Non-buffered flash (128kB)"},
146 	/* Saifun SA25F020 (non-buffered flash) */
147 	/* strap, cfg1, & write1 need updates */
148 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151 	 "Non-buffered flash (256kB)"},
152 	/* Expansion entry 0100 */
153 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0100"},
157 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
163 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167 	/* Saifun SA25F005 (non-buffered flash) */
168 	/* strap, cfg1, & write1 need updates */
169 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172 	 "Non-buffered flash (64kB)"},
173 	/* Fast EEPROM */
174 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - fast"},
178 	/* Expansion entry 1001 */
179 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 1001"},
183 	/* Expansion entry 1010 */
184 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 	 "Entry 1010"},
188 	/* ATMEL AT45DB011B (buffered flash) */
189 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192 	 "Buffered flash (128kB)"},
193 	/* Expansion entry 1100 */
194 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 	 "Entry 1100"},
198 	/* Expansion entry 1101 */
199 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 	 "Entry 1101"},
203 	/* Ateml Expansion entry 1110 */
204 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207 	 "Entry 1110 (Atmel)"},
208 	/* ATMEL AT45DB021B (buffered flash) */
209 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212 	 "Buffered flash (256kB)"},
213 };
214 
215 
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe				(device_t);
220 static int  bce_attach				(device_t);
221 static int  bce_detach				(device_t);
222 static void bce_shutdown			(device_t);
223 
224 
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, int, int);
232 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain		(struct bce_softc *, int, int);
236 static void bce_dump_rx_chain		(struct bce_softc *, int, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_breakpoint			(struct bce_softc *);
242 #endif
243 
244 
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
249 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
250 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg		(device_t, int, int);
252 static int  bce_miibus_write_reg	(device_t, int, int, int);
253 static void bce_miibus_statchg		(device_t);
254 
255 
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock	(struct bce_softc *);
260 static int  bce_release_nvram_lock	(struct bce_softc *);
261 static void bce_enable_nvram_access	(struct bce_softc *);
262 static void	bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram			(struct bce_softc *);
265 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test			(struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write	(struct bce_softc *);
269 static void bce_disable_nvram_write	(struct bce_softc *);
270 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
271 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
273 #endif
274 
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
279 static int  bce_dma_alloc			(device_t);
280 static void bce_dma_free			(struct bce_softc *);
281 static void bce_release_resources	(struct bce_softc *);
282 
283 /****************************************************************************/
284 /* BCE Firmware Synchronization and Load                                    */
285 /****************************************************************************/
286 static int  bce_fw_sync				(struct bce_softc *, u32);
287 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
288 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
289 static void bce_init_cpus			(struct bce_softc *);
290 
291 static void bce_stop				(struct bce_softc *);
292 static int  bce_reset				(struct bce_softc *, u32);
293 static int  bce_chipinit 			(struct bce_softc *);
294 static int  bce_blockinit 			(struct bce_softc *);
295 static int  bce_get_buf				(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
296 
297 static int  bce_init_tx_chain		(struct bce_softc *);
298 static int  bce_init_rx_chain		(struct bce_softc *);
299 static void bce_free_rx_chain		(struct bce_softc *);
300 static void bce_free_tx_chain		(struct bce_softc *);
301 
302 static int  bce_tx_encap		(struct bce_softc *, struct mbuf **);
303 static void bce_start_locked		(struct ifnet *);
304 static void bce_start				(struct ifnet *);
305 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
306 static void bce_watchdog			(struct ifnet *);
307 static int  bce_ifmedia_upd			(struct ifnet *);
308 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
309 static void bce_init_locked			(struct bce_softc *);
310 static void bce_init				(void *);
311 static void bce_mgmt_init_locked(struct bce_softc *sc);
312 
313 static void bce_init_context		(struct bce_softc *);
314 static void bce_get_mac_addr		(struct bce_softc *);
315 static void bce_set_mac_addr		(struct bce_softc *);
316 static void bce_phy_intr			(struct bce_softc *);
317 static void bce_rx_intr				(struct bce_softc *);
318 static void bce_tx_intr				(struct bce_softc *);
319 static void bce_disable_intr		(struct bce_softc *);
320 static void bce_enable_intr			(struct bce_softc *);
321 
322 #ifdef DEVICE_POLLING
323 static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
324 static void bce_poll				(struct ifnet *, enum poll_cmd, int);
325 #endif
326 static void bce_intr				(void *);
327 static void bce_set_rx_mode			(struct bce_softc *);
328 static void bce_stats_update		(struct bce_softc *);
329 static void bce_tick_locked			(struct bce_softc *);
330 static void bce_tick				(void *);
331 static void bce_add_sysctls			(struct bce_softc *);
332 
333 
334 /****************************************************************************/
335 /* FreeBSD device dispatch table.                                           */
336 /****************************************************************************/
337 static device_method_t bce_methods[] = {
338 	/* Device interface */
339 	DEVMETHOD(device_probe,		bce_probe),
340 	DEVMETHOD(device_attach,	bce_attach),
341 	DEVMETHOD(device_detach,	bce_detach),
342 	DEVMETHOD(device_shutdown,	bce_shutdown),
343 
344 	/* bus interface */
345 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
346 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
347 
348 	/* MII interface */
349 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
350 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
351 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
352 
353 	{ 0, 0 }
354 };
355 
356 static driver_t bce_driver = {
357 	"bce",
358 	bce_methods,
359 	sizeof(struct bce_softc)
360 };
361 
362 static devclass_t bce_devclass;
363 
364 MODULE_DEPEND(bce, pci, 1, 1, 1);
365 MODULE_DEPEND(bce, ether, 1, 1, 1);
366 MODULE_DEPEND(bce, miibus, 1, 1, 1);
367 
368 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370 
371 
372 /****************************************************************************/
373 /* Device probe function.                                                   */
374 /*                                                                          */
375 /* Compares the device to the driver's list of supported devices and        */
376 /* reports back to the OS whether this is the right driver for the device.  */
377 /*                                                                          */
378 /* Returns:                                                                 */
379 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380 /****************************************************************************/
381 static int
382 bce_probe(device_t dev)
383 {
384 	struct bce_type *t;
385 	struct bce_softc *sc;
386 	char *descbuf;
387 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
388 
389 	t = bce_devs;
390 
391 	sc = device_get_softc(dev);
392 	bzero(sc, sizeof(struct bce_softc));
393 	sc->bce_unit = device_get_unit(dev);
394 	sc->bce_dev = dev;
395 
396 	/* Get the data for the device to be probed. */
397 	vid  = pci_get_vendor(dev);
398 	did  = pci_get_device(dev);
399 	svid = pci_get_subvendor(dev);
400 	sdid = pci_get_subdevice(dev);
401 
402 	DBPRINT(sc, BCE_VERBOSE_LOAD,
403 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405 
406 	/* Look through the list of known devices for a match. */
407 	while(t->bce_name != NULL) {
408 
409 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
410 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412 
413 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414 
415 			if (descbuf == NULL)
416 				return(ENOMEM);
417 
418 			/* Print out the device identity. */
419 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
420 				t->bce_name,
421 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423 			    bce_driver_version);
424 
425 			device_set_desc_copy(dev, descbuf);
426 			free(descbuf, M_TEMP);
427 			return(BUS_PROBE_DEFAULT);
428 		}
429 		t++;
430 	}
431 
432 	DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
433 		__FILE__, __LINE__);
434 
435 	return(ENXIO);
436 }
437 
438 
439 /****************************************************************************/
440 /* Device attach function.                                                  */
441 /*                                                                          */
442 /* Allocates device resources, performs secondary chip identification,      */
443 /* resets and initializes the hardware, and initializes driver instance     */
444 /* variables.                                                               */
445 /*                                                                          */
446 /* Returns:                                                                 */
447 /*   0 on success, positive value on failure.                               */
448 /****************************************************************************/
449 static int
450 bce_attach(device_t dev)
451 {
452 	struct bce_softc *sc;
453 	struct ifnet *ifp;
454 	u32 val;
455 	int count, mbuf, rid, rc = 0;
456 
457 	sc = device_get_softc(dev);
458 	sc->bce_dev = dev;
459 
460 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461 
462 	mbuf = device_get_unit(dev);
463 	sc->bce_unit = mbuf;
464 
465 	pci_enable_busmaster(dev);
466 
467 	/* Allocate PCI memory resources. */
468 	rid = PCIR_BAR(0);
469 	sc->bce_res = bus_alloc_resource_any(
470 		dev,				/* dev */
471 		SYS_RES_MEMORY,			/* type */
472 		&rid,				/* rid */
473 		RF_ACTIVE | PCI_RF_DENSE);	/* flags */
474 
475 	if (sc->bce_res == NULL) {
476 		BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
477 			__FILE__, __LINE__);
478 		rc = ENXIO;
479 		goto bce_attach_fail;
480 	}
481 
482 	/* Get various resource handles. */
483 	sc->bce_btag    = rman_get_bustag(sc->bce_res);
484 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486 
487 	/* Allocate PCI IRQ resources. */
488 	count = pci_msi_count(dev);
489 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
490 		rid = 1;
491 		sc->bce_flags |= BCE_USING_MSI_FLAG;
492 	} else
493 		rid = 0;
494 	sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
495 	    RF_SHAREABLE | RF_ACTIVE);
496 
497 	if (sc->bce_irq == NULL) {
498 		BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
499 			__FILE__, __LINE__);
500 		rc = ENXIO;
501 		goto bce_attach_fail;
502 	}
503 
504 	/* Initialize mutex for the current device instance. */
505 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
506 
507 	/*
508 	 * Configure byte swap and enable indirect register access.
509 	 * Rely on CPU to do target byte swapping on big endian systems.
510 	 * Access to registers outside of PCI configurtion space are not
511 	 * valid until this is done.
512 	 */
513 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
514 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
515 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
516 
517 	/* Save ASIC revsion info. */
518 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
519 
520 	/* Weed out any non-production controller revisions. */
521 	switch(BCE_CHIP_ID(sc)) {
522 		case BCE_CHIP_ID_5706_A0:
523 		case BCE_CHIP_ID_5706_A1:
524 		case BCE_CHIP_ID_5708_A0:
525 		case BCE_CHIP_ID_5708_B0:
526 			BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
527 				__FILE__, __LINE__,
528 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
529 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
530 			rc = ENODEV;
531 			goto bce_attach_fail;
532 	}
533 
534 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
535 		BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
536 			__FILE__, __LINE__);
537 		rc = ENODEV;
538 		goto bce_attach_fail;
539 	}
540 
541 	/*
542 	 * The embedded PCIe to PCI-X bridge (EPB)
543 	 * in the 5708 cannot address memory above
544 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
545 	 */
546 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
547 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
548 	else
549 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
550 
551 	/*
552 	 * Find the base address for shared memory access.
553 	 * Newer versions of bootcode use a signature and offset
554 	 * while older versions use a fixed address.
555 	 */
556 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
557 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
558 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
559 	else
560 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
561 
562 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
563 
564 	/* Set initial device and PHY flags */
565 	sc->bce_flags = 0;
566 	sc->bce_phy_flags = 0;
567 
568 	/* Get PCI bus information (speed and type). */
569 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
570 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
571 		u32 clkreg;
572 
573 		sc->bce_flags |= BCE_PCIX_FLAG;
574 
575 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
576 
577 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
578 		switch (clkreg) {
579 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
580 			sc->bus_speed_mhz = 133;
581 			break;
582 
583 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
584 			sc->bus_speed_mhz = 100;
585 			break;
586 
587 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
588 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
589 			sc->bus_speed_mhz = 66;
590 			break;
591 
592 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
593 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
594 			sc->bus_speed_mhz = 50;
595 			break;
596 
597 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
598 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
599 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
600 			sc->bus_speed_mhz = 33;
601 			break;
602 		}
603 	} else {
604 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
605 			sc->bus_speed_mhz = 66;
606 		else
607 			sc->bus_speed_mhz = 33;
608 	}
609 
610 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
611 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
612 
613 	BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
614 		sc->bce_chipid,
615 		((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
616 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
617 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
618 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
619 		sc->bus_speed_mhz);
620 
621 	/* Reset the controller. */
622 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
623 		rc = ENXIO;
624 		goto bce_attach_fail;
625 	}
626 
627 	/* Initialize the controller. */
628 	if (bce_chipinit(sc)) {
629 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
630 			__FILE__, __LINE__);
631 		rc = ENXIO;
632 		goto bce_attach_fail;
633 	}
634 
635 	/* Perform NVRAM test. */
636 	if (bce_nvram_test(sc)) {
637 		BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
638 			__FILE__, __LINE__);
639 		rc = ENXIO;
640 		goto bce_attach_fail;
641 	}
642 
643 	/* Fetch the permanent Ethernet MAC address. */
644 	bce_get_mac_addr(sc);
645 
646 	/*
647 	 * Trip points control how many BDs
648 	 * should be ready before generating an
649 	 * interrupt while ticks control how long
650 	 * a BD can sit in the chain before
651 	 * generating an interrupt.  Set the default
652 	 * values for the RX and TX rings.
653 	 */
654 
655 #ifdef BCE_DRBUG
656 	/* Force more frequent interrupts. */
657 	sc->bce_tx_quick_cons_trip_int = 1;
658 	sc->bce_tx_quick_cons_trip     = 1;
659 	sc->bce_tx_ticks_int           = 0;
660 	sc->bce_tx_ticks               = 0;
661 
662 	sc->bce_rx_quick_cons_trip_int = 1;
663 	sc->bce_rx_quick_cons_trip     = 1;
664 	sc->bce_rx_ticks_int           = 0;
665 	sc->bce_rx_ticks               = 0;
666 #else
667 	sc->bce_tx_quick_cons_trip_int = 20;
668 	sc->bce_tx_quick_cons_trip     = 20;
669 	sc->bce_tx_ticks_int           = 80;
670 	sc->bce_tx_ticks               = 80;
671 
672 	sc->bce_rx_quick_cons_trip_int = 6;
673 	sc->bce_rx_quick_cons_trip     = 6;
674 	sc->bce_rx_ticks_int           = 18;
675 	sc->bce_rx_ticks               = 18;
676 #endif
677 
678 	/* Update statistics once every second. */
679 	sc->bce_stats_ticks = 1000000 & 0xffff00;
680 
681 	/*
682 	 * The copper based NetXtreme II controllers
683 	 * use an integrated PHY at address 1 while
684 	 * the SerDes controllers use a PHY at
685 	 * address 2.
686 	 */
687 	sc->bce_phy_addr = 1;
688 
689 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
690 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
691 		sc->bce_flags |= BCE_NO_WOL_FLAG;
692 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
693 			sc->bce_phy_addr = 2;
694 			val = REG_RD_IND(sc, sc->bce_shmem_base +
695 					 BCE_SHARED_HW_CFG_CONFIG);
696 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
697 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
698 		}
699 	}
700 
701 	/* Allocate DMA memory resources. */
702 	if (bce_dma_alloc(dev)) {
703 		BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
704 		    __FILE__, __LINE__);
705 		rc = ENXIO;
706 		goto bce_attach_fail;
707 	}
708 
709 	/* Allocate an ifnet structure. */
710 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
711 	if (ifp == NULL) {
712 		BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
713 			__FILE__, __LINE__);
714 		rc = ENXIO;
715 		goto bce_attach_fail;
716 	}
717 
718 	/* Initialize the ifnet interface. */
719 	ifp->if_softc        = sc;
720 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
721 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
722 	ifp->if_ioctl        = bce_ioctl;
723 	ifp->if_start        = bce_start;
724 	ifp->if_timer        = 0;
725 	ifp->if_watchdog     = bce_watchdog;
726 	ifp->if_init         = bce_init;
727 	ifp->if_mtu          = ETHERMTU;
728 	ifp->if_hwassist     = BCE_IF_HWASSIST;
729 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
730 	ifp->if_capenable    = ifp->if_capabilities;
731 
732 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
733 	sc->mbuf_alloc_size  = MCLBYTES;
734 #ifdef DEVICE_POLLING
735 	ifp->if_capabilities |= IFCAP_POLLING;
736 #endif
737 
738 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
739 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
740 		ifp->if_baudrate = IF_Gbps(2.5);
741 	else
742 		ifp->if_baudrate = IF_Gbps(1);
743 
744 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
745 	IFQ_SET_READY(&ifp->if_snd);
746 
747 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
748 		BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n",
749 			__FILE__, __LINE__);
750 		rc = ENODEV;
751 		goto bce_attach_fail;
752 	} else {
753 		/* Look for our PHY. */
754 		if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
755 			bce_ifmedia_sts)) {
756 			BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
757 				__FILE__, __LINE__);
758 			rc = ENXIO;
759 			goto bce_attach_fail;
760 		}
761 	}
762 
763 	/* Attach to the Ethernet interface list. */
764 	ether_ifattach(ifp, sc->eaddr);
765 
766 #if __FreeBSD_version < 500000
767 	callout_init(&sc->bce_stat_ch);
768 #else
769 	callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
770 #endif
771 
772 	/* Hookup IRQ last. */
773 	rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
774 	   bce_intr, sc, &sc->bce_intrhand);
775 
776 	if (rc) {
777 		BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
778 			__FILE__, __LINE__);
779 		bce_detach(dev);
780 		goto bce_attach_exit;
781 	}
782 
783 	/* Print some important debugging info. */
784 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
785 
786 	/* Add the supported sysctls to the kernel. */
787 	bce_add_sysctls(sc);
788 
789 	/* Get the firmware running so IPMI still works */
790 	BCE_LOCK(sc);
791 	bce_mgmt_init_locked(sc);
792 	BCE_UNLOCK(sc);
793 
794 	goto bce_attach_exit;
795 
796 bce_attach_fail:
797 	bce_release_resources(sc);
798 
799 bce_attach_exit:
800 
801 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
802 
803 	return(rc);
804 }
805 
806 
807 /****************************************************************************/
808 /* Device detach function.                                                  */
809 /*                                                                          */
810 /* Stops the controller, resets the controller, and releases resources.     */
811 /*                                                                          */
812 /* Returns:                                                                 */
813 /*   0 on success, positive value on failure.                               */
814 /****************************************************************************/
815 static int
816 bce_detach(device_t dev)
817 {
818 	struct bce_softc *sc;
819 	struct ifnet *ifp;
820 
821 	sc = device_get_softc(dev);
822 
823 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
824 
825 	ifp = sc->bce_ifp;
826 
827 #ifdef DEVICE_POLLING
828 	if (ifp->if_capenable & IFCAP_POLLING)
829 		ether_poll_deregister(ifp);
830 #endif
831 
832 	/* Stop and reset the controller. */
833 	BCE_LOCK(sc);
834 	bce_stop(sc);
835 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
836 	BCE_UNLOCK(sc);
837 
838 	ether_ifdetach(ifp);
839 
840 	/* If we have a child device on the MII bus remove it too. */
841 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
842 		ifmedia_removeall(&sc->bce_ifmedia);
843 	} else {
844 		bus_generic_detach(dev);
845 		device_delete_child(dev, sc->bce_miibus);
846 	}
847 
848 	/* Release all remaining resources. */
849 	bce_release_resources(sc);
850 
851 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
852 
853 	return(0);
854 }
855 
856 
857 /****************************************************************************/
858 /* Device shutdown function.                                                */
859 /*                                                                          */
860 /* Stops and resets the controller.                                         */
861 /*                                                                          */
862 /* Returns:                                                                 */
863 /*   Nothing                                                                */
864 /****************************************************************************/
865 static void
866 bce_shutdown(device_t dev)
867 {
868 	struct bce_softc *sc = device_get_softc(dev);
869 
870 	BCE_LOCK(sc);
871 	bce_stop(sc);
872 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
873 	BCE_UNLOCK(sc);
874 }
875 
876 
877 /****************************************************************************/
878 /* Indirect register read.                                                  */
879 /*                                                                          */
880 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
881 /* configuration space.  Using this mechanism avoids issues with posted     */
882 /* reads but is much slower than memory-mapped I/O.                         */
883 /*                                                                          */
884 /* Returns:                                                                 */
885 /*   The value of the register.                                             */
886 /****************************************************************************/
887 static u32
888 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
889 {
890 	device_t dev;
891 	dev = sc->bce_dev;
892 
893 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
894 #ifdef BCE_DEBUG
895 	{
896 		u32 val;
897 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
898 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
899 			__FUNCTION__, offset, val);
900 		return val;
901 	}
902 #else
903 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
904 #endif
905 }
906 
907 
908 /****************************************************************************/
909 /* Indirect register write.                                                 */
910 /*                                                                          */
911 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
912 /* configuration space.  Using this mechanism avoids issues with posted     */
913 /* writes but is muchh slower than memory-mapped I/O.                       */
914 /*                                                                          */
915 /* Returns:                                                                 */
916 /*   Nothing.                                                               */
917 /****************************************************************************/
918 static void
919 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
920 {
921 	device_t dev;
922 	dev = sc->bce_dev;
923 
924 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
925 		__FUNCTION__, offset, val);
926 
927 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
928 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
929 }
930 
931 
932 /****************************************************************************/
933 /* Context memory write.                                                    */
934 /*                                                                          */
935 /* The NetXtreme II controller uses context memory to track connection      */
936 /* information for L2 and higher network protocols.                         */
937 /*                                                                          */
938 /* Returns:                                                                 */
939 /*   Nothing.                                                               */
940 /****************************************************************************/
941 static void
942 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
943 {
944 
945 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
946 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
947 
948 	offset += cid_addr;
949 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
950 	REG_WR(sc, BCE_CTX_DATA, val);
951 }
952 
953 
954 /****************************************************************************/
955 /* PHY register read.                                                       */
956 /*                                                                          */
957 /* Implements register reads on the MII bus.                                */
958 /*                                                                          */
959 /* Returns:                                                                 */
960 /*   The value of the register.                                             */
961 /****************************************************************************/
962 static int
963 bce_miibus_read_reg(device_t dev, int phy, int reg)
964 {
965 	struct bce_softc *sc;
966 	u32 val;
967 	int i;
968 
969 	sc = device_get_softc(dev);
970 
971 	/* Make sure we are accessing the correct PHY address. */
972 	if (phy != sc->bce_phy_addr) {
973 		DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
974 		return(0);
975 	}
976 
977 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
978 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
979 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
980 
981 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
982 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
983 
984 		DELAY(40);
985 	}
986 
987 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
988 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
989 		BCE_EMAC_MDIO_COMM_START_BUSY;
990 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
991 
992 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
993 		DELAY(10);
994 
995 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
997 			DELAY(5);
998 
999 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1000 			val &= BCE_EMAC_MDIO_COMM_DATA;
1001 
1002 			break;
1003 		}
1004 	}
1005 
1006 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1007 		BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1008 			__FILE__, __LINE__, phy, reg);
1009 		val = 0x0;
1010 	} else {
1011 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1012 	}
1013 
1014 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1015 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1016 
1017 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1018 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1019 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1020 
1021 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1022 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1023 
1024 		DELAY(40);
1025 	}
1026 
1027 	return (val & 0xffff);
1028 
1029 }
1030 
1031 
1032 /****************************************************************************/
1033 /* PHY register write.                                                      */
1034 /*                                                                          */
1035 /* Implements register writes on the MII bus.                               */
1036 /*                                                                          */
1037 /* Returns:                                                                 */
1038 /*   The value of the register.                                             */
1039 /****************************************************************************/
1040 static int
1041 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1042 {
1043 	struct bce_softc *sc;
1044 	u32 val1;
1045 	int i;
1046 
1047 	sc = device_get_softc(dev);
1048 
1049 	/* Make sure we are accessing the correct PHY address. */
1050 	if (phy != sc->bce_phy_addr) {
1051 		DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1052 		return(0);
1053 	}
1054 
1055 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1056 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1057 
1058 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1059 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1060 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1061 
1062 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1063 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1064 
1065 		DELAY(40);
1066 	}
1067 
1068 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1069 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1070 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1071 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1072 
1073 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1074 		DELAY(10);
1075 
1076 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1077 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1078 			DELAY(5);
1079 			break;
1080 		}
1081 	}
1082 
1083 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1084 		BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1085 			__FILE__, __LINE__);
1086 
1087 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1088 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1089 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1090 
1091 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1092 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1093 
1094 		DELAY(40);
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 
1101 /****************************************************************************/
1102 /* MII bus status change.                                                   */
1103 /*                                                                          */
1104 /* Called by the MII bus driver when the PHY establishes link to set the    */
1105 /* MAC interface registers.                                                 */
1106 /*                                                                          */
1107 /* Returns:                                                                 */
1108 /*   Nothing.                                                               */
1109 /****************************************************************************/
1110 static void
1111 bce_miibus_statchg(device_t dev)
1112 {
1113 	struct bce_softc *sc;
1114 	struct mii_data *mii;
1115 
1116 	sc = device_get_softc(dev);
1117 
1118 	mii = device_get_softc(sc->bce_miibus);
1119 
1120 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1121 
1122 	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1123 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1124 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1125 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1126 	} else {
1127 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1128 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1129 	}
1130 
1131 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1132 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1133 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1134 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1135 	} else {
1136 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1137 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1138 	}
1139 }
1140 
1141 
1142 /****************************************************************************/
1143 /* Acquire NVRAM lock.                                                      */
1144 /*                                                                          */
1145 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1146 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1147 /* for use by the driver.                                                   */
1148 /*                                                                          */
1149 /* Returns:                                                                 */
1150 /*   0 on success, positive value on failure.                               */
1151 /****************************************************************************/
1152 static int
1153 bce_acquire_nvram_lock(struct bce_softc *sc)
1154 {
1155 	u32 val;
1156 	int j;
1157 
1158 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1159 
1160 	/* Request access to the flash interface. */
1161 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1162 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1163 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1164 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1165 			break;
1166 
1167 		DELAY(5);
1168 	}
1169 
1170 	if (j >= NVRAM_TIMEOUT_COUNT) {
1171 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1172 		return EBUSY;
1173 	}
1174 
1175 	return 0;
1176 }
1177 
1178 
1179 /****************************************************************************/
1180 /* Release NVRAM lock.                                                      */
1181 /*                                                                          */
1182 /* When the caller is finished accessing NVRAM the lock must be released.   */
1183 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1184 /* for use by the driver.                                                   */
1185 /*                                                                          */
1186 /* Returns:                                                                 */
1187 /*   0 on success, positive value on failure.                               */
1188 /****************************************************************************/
1189 static int
1190 bce_release_nvram_lock(struct bce_softc *sc)
1191 {
1192 	int j;
1193 	u32 val;
1194 
1195 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1196 
1197 	/*
1198 	 * Relinquish nvram interface.
1199 	 */
1200 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1201 
1202 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1203 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1204 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1205 			break;
1206 
1207 		DELAY(5);
1208 	}
1209 
1210 	if (j >= NVRAM_TIMEOUT_COUNT) {
1211 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1212 		return EBUSY;
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 
1219 #ifdef BCE_NVRAM_WRITE_SUPPORT
1220 /****************************************************************************/
1221 /* Enable NVRAM write access.                                               */
1222 /*                                                                          */
1223 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1224 /*                                                                          */
1225 /* Returns:                                                                 */
1226 /*   0 on success, positive value on failure.                               */
1227 /****************************************************************************/
1228 static int
1229 bce_enable_nvram_write(struct bce_softc *sc)
1230 {
1231 	u32 val;
1232 
1233 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1234 
1235 	val = REG_RD(sc, BCE_MISC_CFG);
1236 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1237 
1238 	if (!sc->bce_flash_info->buffered) {
1239 		int j;
1240 
1241 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1242 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1243 
1244 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1245 			DELAY(5);
1246 
1247 			val = REG_RD(sc, BCE_NVM_COMMAND);
1248 			if (val & BCE_NVM_COMMAND_DONE)
1249 				break;
1250 		}
1251 
1252 		if (j >= NVRAM_TIMEOUT_COUNT) {
1253 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1254 			return EBUSY;
1255 		}
1256 	}
1257 	return 0;
1258 }
1259 
1260 
1261 /****************************************************************************/
1262 /* Disable NVRAM write access.                                              */
1263 /*                                                                          */
1264 /* When the caller is finished writing to NVRAM write access must be        */
1265 /* disabled.                                                                */
1266 /*                                                                          */
1267 /* Returns:                                                                 */
1268 /*   Nothing.                                                               */
1269 /****************************************************************************/
1270 static void
1271 bce_disable_nvram_write(struct bce_softc *sc)
1272 {
1273 	u32 val;
1274 
1275 	DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1276 
1277 	val = REG_RD(sc, BCE_MISC_CFG);
1278 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1279 }
1280 #endif
1281 
1282 
1283 /****************************************************************************/
1284 /* Enable NVRAM access.                                                     */
1285 /*                                                                          */
1286 /* Before accessing NVRAM for read or write operations the caller must      */
1287 /* enabled NVRAM access.                                                    */
1288 /*                                                                          */
1289 /* Returns:                                                                 */
1290 /*   Nothing.                                                               */
1291 /****************************************************************************/
1292 static void
1293 bce_enable_nvram_access(struct bce_softc *sc)
1294 {
1295 	u32 val;
1296 
1297 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1298 
1299 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1300 	/* Enable both bits, even on read. */
1301 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1302 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1303 }
1304 
1305 
1306 /****************************************************************************/
1307 /* Disable NVRAM access.                                                    */
1308 /*                                                                          */
1309 /* When the caller is finished accessing NVRAM access must be disabled.     */
1310 /*                                                                          */
1311 /* Returns:                                                                 */
1312 /*   Nothing.                                                               */
1313 /****************************************************************************/
1314 static void
1315 bce_disable_nvram_access(struct bce_softc *sc)
1316 {
1317 	u32 val;
1318 
1319 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1320 
1321 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1322 
1323 	/* Disable both bits, even after read. */
1324 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1325 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1326 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1327 }
1328 
1329 
1330 #ifdef BCE_NVRAM_WRITE_SUPPORT
1331 /****************************************************************************/
1332 /* Erase NVRAM page before writing.                                         */
1333 /*                                                                          */
1334 /* Non-buffered flash parts require that a page be erased before it is      */
1335 /* written.                                                                 */
1336 /*                                                                          */
1337 /* Returns:                                                                 */
1338 /*   0 on success, positive value on failure.                               */
1339 /****************************************************************************/
1340 static int
1341 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1342 {
1343 	u32 cmd;
1344 	int j;
1345 
1346 	/* Buffered flash doesn't require an erase. */
1347 	if (sc->bce_flash_info->buffered)
1348 		return 0;
1349 
1350 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1351 
1352 	/* Build an erase command. */
1353 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1354 	      BCE_NVM_COMMAND_DOIT;
1355 
1356 	/*
1357 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1358 	 * and issue the erase command.
1359 	 */
1360 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1361 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1362 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1363 
1364 	/* Wait for completion. */
1365 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1366 		u32 val;
1367 
1368 		DELAY(5);
1369 
1370 		val = REG_RD(sc, BCE_NVM_COMMAND);
1371 		if (val & BCE_NVM_COMMAND_DONE)
1372 			break;
1373 	}
1374 
1375 	if (j >= NVRAM_TIMEOUT_COUNT) {
1376 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1377 		return EBUSY;
1378 	}
1379 
1380 	return 0;
1381 }
1382 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1383 
1384 
1385 /****************************************************************************/
1386 /* Read a dword (32 bits) from NVRAM.                                       */
1387 /*                                                                          */
1388 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1389 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1390 /*                                                                          */
1391 /* Returns:                                                                 */
1392 /*   0 on success and the 32 bit value read, positive value on failure.     */
1393 /****************************************************************************/
1394 static int
1395 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1396 							u32 cmd_flags)
1397 {
1398 	u32 cmd;
1399 	int i, rc = 0;
1400 
1401 	/* Build the command word. */
1402 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1403 
1404 	/* Calculate the offset for buffered flash. */
1405 	if (sc->bce_flash_info->buffered) {
1406 		offset = ((offset / sc->bce_flash_info->page_size) <<
1407 			   sc->bce_flash_info->page_bits) +
1408 			  (offset % sc->bce_flash_info->page_size);
1409 	}
1410 
1411 	/*
1412 	 * Clear the DONE bit separately, set the address to read,
1413 	 * and issue the read.
1414 	 */
1415 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1416 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1417 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1418 
1419 	/* Wait for completion. */
1420 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1421 		u32 val;
1422 
1423 		DELAY(5);
1424 
1425 		val = REG_RD(sc, BCE_NVM_COMMAND);
1426 		if (val & BCE_NVM_COMMAND_DONE) {
1427 			val = REG_RD(sc, BCE_NVM_READ);
1428 
1429 			val = bce_be32toh(val);
1430 			memcpy(ret_val, &val, 4);
1431 			break;
1432 		}
1433 	}
1434 
1435 	/* Check for errors. */
1436 	if (i >= NVRAM_TIMEOUT_COUNT) {
1437 		BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1438 			__FILE__, __LINE__, offset);
1439 		rc = EBUSY;
1440 	}
1441 
1442 	return(rc);
1443 }
1444 
1445 
1446 #ifdef BCE_NVRAM_WRITE_SUPPORT
1447 /****************************************************************************/
1448 /* Write a dword (32 bits) to NVRAM.                                        */
1449 /*                                                                          */
1450 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1451 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1452 /* enabled NVRAM write access.                                              */
1453 /*                                                                          */
1454 /* Returns:                                                                 */
1455 /*   0 on success, positive value on failure.                               */
1456 /****************************************************************************/
1457 static int
1458 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1459 	u32 cmd_flags)
1460 {
1461 	u32 cmd, val32;
1462 	int j;
1463 
1464 	/* Build the command word. */
1465 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1466 
1467 	/* Calculate the offset for buffered flash. */
1468 	if (sc->bce_flash_info->buffered) {
1469 		offset = ((offset / sc->bce_flash_info->page_size) <<
1470 			  sc->bce_flash_info->page_bits) +
1471 			 (offset % sc->bce_flash_info->page_size);
1472 	}
1473 
1474 	/*
1475 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1476 	 * set the NVRAM address to write, and issue the write command
1477 	 */
1478 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1479 	memcpy(&val32, val, 4);
1480 	val32 = htobe32(val32);
1481 	REG_WR(sc, BCE_NVM_WRITE, val32);
1482 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1483 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1484 
1485 	/* Wait for completion. */
1486 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1487 		DELAY(5);
1488 
1489 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1490 			break;
1491 	}
1492 	if (j >= NVRAM_TIMEOUT_COUNT) {
1493 		BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1494 			__FILE__, __LINE__, offset);
1495 		return EBUSY;
1496 	}
1497 
1498 	return 0;
1499 }
1500 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1501 
1502 
1503 /****************************************************************************/
1504 /* Initialize NVRAM access.                                                 */
1505 /*                                                                          */
1506 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1507 /* access that device.                                                      */
1508 /*                                                                          */
1509 /* Returns:                                                                 */
1510 /*   0 on success, positive value on failure.                               */
1511 /****************************************************************************/
1512 static int
1513 bce_init_nvram(struct bce_softc *sc)
1514 {
1515 	u32 val;
1516 	int j, entry_count, rc;
1517 	struct flash_spec *flash;
1518 
1519 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1520 
1521 	/* Determine the selected interface. */
1522 	val = REG_RD(sc, BCE_NVM_CFG1);
1523 
1524 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1525 
1526 	rc = 0;
1527 
1528 	/*
1529 	 * Flash reconfiguration is required to support additional
1530 	 * NVRAM devices not directly supported in hardware.
1531 	 * Check if the flash interface was reconfigured
1532 	 * by the bootcode.
1533 	 */
1534 
1535 	if (val & 0x40000000) {
1536 		/* Flash interface reconfigured by bootcode. */
1537 
1538 		DBPRINT(sc,BCE_INFO_LOAD,
1539 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1540 
1541 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1542 		     j++, flash++) {
1543 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1544 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1545 				sc->bce_flash_info = flash;
1546 				break;
1547 			}
1548 		}
1549 	} else {
1550 		/* Flash interface not yet reconfigured. */
1551 		u32 mask;
1552 
1553 		DBPRINT(sc,BCE_INFO_LOAD,
1554 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1555 
1556 		if (val & (1 << 23))
1557 			mask = FLASH_BACKUP_STRAP_MASK;
1558 		else
1559 			mask = FLASH_STRAP_MASK;
1560 
1561 		/* Look for the matching NVRAM device configuration data. */
1562 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1563 
1564 			/* Check if the device matches any of the known devices. */
1565 			if ((val & mask) == (flash->strapping & mask)) {
1566 				/* Found a device match. */
1567 				sc->bce_flash_info = flash;
1568 
1569 				/* Request access to the flash interface. */
1570 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1571 					return rc;
1572 
1573 				/* Reconfigure the flash interface. */
1574 				bce_enable_nvram_access(sc);
1575 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1576 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1577 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1578 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1579 				bce_disable_nvram_access(sc);
1580 				bce_release_nvram_lock(sc);
1581 
1582 				break;
1583 			}
1584 		}
1585 	}
1586 
1587 	/* Check if a matching device was found. */
1588 	if (j == entry_count) {
1589 		sc->bce_flash_info = NULL;
1590 		BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1591 			__FILE__, __LINE__);
1592 		rc = ENODEV;
1593 	}
1594 
1595 	/* Write the flash config data to the shared memory interface. */
1596 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1597 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1598 	if (val)
1599 		sc->bce_flash_size = val;
1600 	else
1601 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1602 
1603 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1604 		sc->bce_flash_info->total_size);
1605 
1606 	DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1607 
1608 	return rc;
1609 }
1610 
1611 
1612 /****************************************************************************/
1613 /* Read an arbitrary range of data from NVRAM.                              */
1614 /*                                                                          */
1615 /* Prepares the NVRAM interface for access and reads the requested data     */
1616 /* into the supplied buffer.                                                */
1617 /*                                                                          */
1618 /* Returns:                                                                 */
1619 /*   0 on success and the data read, positive value on failure.             */
1620 /****************************************************************************/
1621 static int
1622 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1623 	int buf_size)
1624 {
1625 	int rc = 0;
1626 	u32 cmd_flags, offset32, len32, extra;
1627 
1628 	if (buf_size == 0)
1629 		return 0;
1630 
1631 	/* Request access to the flash interface. */
1632 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1633 		return rc;
1634 
1635 	/* Enable access to flash interface */
1636 	bce_enable_nvram_access(sc);
1637 
1638 	len32 = buf_size;
1639 	offset32 = offset;
1640 	extra = 0;
1641 
1642 	cmd_flags = 0;
1643 
1644 	if (offset32 & 3) {
1645 		u8 buf[4];
1646 		u32 pre_len;
1647 
1648 		offset32 &= ~3;
1649 		pre_len = 4 - (offset & 3);
1650 
1651 		if (pre_len >= len32) {
1652 			pre_len = len32;
1653 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1654 		}
1655 		else {
1656 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1657 		}
1658 
1659 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1660 
1661 		if (rc)
1662 			return rc;
1663 
1664 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1665 
1666 		offset32 += 4;
1667 		ret_buf += pre_len;
1668 		len32 -= pre_len;
1669 	}
1670 
1671 	if (len32 & 3) {
1672 		extra = 4 - (len32 & 3);
1673 		len32 = (len32 + 4) & ~3;
1674 	}
1675 
1676 	if (len32 == 4) {
1677 		u8 buf[4];
1678 
1679 		if (cmd_flags)
1680 			cmd_flags = BCE_NVM_COMMAND_LAST;
1681 		else
1682 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1683 				    BCE_NVM_COMMAND_LAST;
1684 
1685 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1686 
1687 		memcpy(ret_buf, buf, 4 - extra);
1688 	}
1689 	else if (len32 > 0) {
1690 		u8 buf[4];
1691 
1692 		/* Read the first word. */
1693 		if (cmd_flags)
1694 			cmd_flags = 0;
1695 		else
1696 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1697 
1698 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1699 
1700 		/* Advance to the next dword. */
1701 		offset32 += 4;
1702 		ret_buf += 4;
1703 		len32 -= 4;
1704 
1705 		while (len32 > 4 && rc == 0) {
1706 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1707 
1708 			/* Advance to the next dword. */
1709 			offset32 += 4;
1710 			ret_buf += 4;
1711 			len32 -= 4;
1712 		}
1713 
1714 		if (rc)
1715 			return rc;
1716 
1717 		cmd_flags = BCE_NVM_COMMAND_LAST;
1718 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1719 
1720 		memcpy(ret_buf, buf, 4 - extra);
1721 	}
1722 
1723 	/* Disable access to flash interface and release the lock. */
1724 	bce_disable_nvram_access(sc);
1725 	bce_release_nvram_lock(sc);
1726 
1727 	return rc;
1728 }
1729 
1730 
1731 #ifdef BCE_NVRAM_WRITE_SUPPORT
1732 /****************************************************************************/
1733 /* Write an arbitrary range of data from NVRAM.                             */
1734 /*                                                                          */
1735 /* Prepares the NVRAM interface for write access and writes the requested   */
1736 /* data from the supplied buffer.  The caller is responsible for            */
1737 /* calculating any appropriate CRCs.                                        */
1738 /*                                                                          */
1739 /* Returns:                                                                 */
1740 /*   0 on success, positive value on failure.                               */
1741 /****************************************************************************/
1742 static int
1743 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1744 	int buf_size)
1745 {
1746 	u32 written, offset32, len32;
1747 	u8 *buf, start[4], end[4];
1748 	int rc = 0;
1749 	int align_start, align_end;
1750 
1751 	buf = data_buf;
1752 	offset32 = offset;
1753 	len32 = buf_size;
1754 	align_start = align_end = 0;
1755 
1756 	if ((align_start = (offset32 & 3))) {
1757 		offset32 &= ~3;
1758 		len32 += align_start;
1759 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1760 			return rc;
1761 	}
1762 
1763 	if (len32 & 3) {
1764 	       	if ((len32 > 4) || !align_start) {
1765 			align_end = 4 - (len32 & 3);
1766 			len32 += align_end;
1767 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1768 				end, 4))) {
1769 				return rc;
1770 			}
1771 		}
1772 	}
1773 
1774 	if (align_start || align_end) {
1775 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1776 		if (buf == 0)
1777 			return ENOMEM;
1778 		if (align_start) {
1779 			memcpy(buf, start, 4);
1780 		}
1781 		if (align_end) {
1782 			memcpy(buf + len32 - 4, end, 4);
1783 		}
1784 		memcpy(buf + align_start, data_buf, buf_size);
1785 	}
1786 
1787 	written = 0;
1788 	while ((written < len32) && (rc == 0)) {
1789 		u32 page_start, page_end, data_start, data_end;
1790 		u32 addr, cmd_flags;
1791 		int i;
1792 		u8 flash_buffer[264];
1793 
1794 	    /* Find the page_start addr */
1795 		page_start = offset32 + written;
1796 		page_start -= (page_start % sc->bce_flash_info->page_size);
1797 		/* Find the page_end addr */
1798 		page_end = page_start + sc->bce_flash_info->page_size;
1799 		/* Find the data_start addr */
1800 		data_start = (written == 0) ? offset32 : page_start;
1801 		/* Find the data_end addr */
1802 		data_end = (page_end > offset32 + len32) ?
1803 			(offset32 + len32) : page_end;
1804 
1805 		/* Request access to the flash interface. */
1806 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1807 			goto nvram_write_end;
1808 
1809 		/* Enable access to flash interface */
1810 		bce_enable_nvram_access(sc);
1811 
1812 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1813 		if (sc->bce_flash_info->buffered == 0) {
1814 			int j;
1815 
1816 			/* Read the whole page into the buffer
1817 			 * (non-buffer flash only) */
1818 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1819 				if (j == (sc->bce_flash_info->page_size - 4)) {
1820 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1821 				}
1822 				rc = bce_nvram_read_dword(sc,
1823 					page_start + j,
1824 					&flash_buffer[j],
1825 					cmd_flags);
1826 
1827 				if (rc)
1828 					goto nvram_write_end;
1829 
1830 				cmd_flags = 0;
1831 			}
1832 		}
1833 
1834 		/* Enable writes to flash interface (unlock write-protect) */
1835 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1836 			goto nvram_write_end;
1837 
1838 		/* Erase the page */
1839 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1840 			goto nvram_write_end;
1841 
1842 		/* Re-enable the write again for the actual write */
1843 		bce_enable_nvram_write(sc);
1844 
1845 		/* Loop to write back the buffer data from page_start to
1846 		 * data_start */
1847 		i = 0;
1848 		if (sc->bce_flash_info->buffered == 0) {
1849 			for (addr = page_start; addr < data_start;
1850 				addr += 4, i += 4) {
1851 
1852 				rc = bce_nvram_write_dword(sc, addr,
1853 					&flash_buffer[i], cmd_flags);
1854 
1855 				if (rc != 0)
1856 					goto nvram_write_end;
1857 
1858 				cmd_flags = 0;
1859 			}
1860 		}
1861 
1862 		/* Loop to write the new data from data_start to data_end */
1863 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1864 			if ((addr == page_end - 4) ||
1865 				((sc->bce_flash_info->buffered) &&
1866 				 (addr == data_end - 4))) {
1867 
1868 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1869 			}
1870 			rc = bce_nvram_write_dword(sc, addr, buf,
1871 				cmd_flags);
1872 
1873 			if (rc != 0)
1874 				goto nvram_write_end;
1875 
1876 			cmd_flags = 0;
1877 			buf += 4;
1878 		}
1879 
1880 		/* Loop to write back the buffer data from data_end
1881 		 * to page_end */
1882 		if (sc->bce_flash_info->buffered == 0) {
1883 			for (addr = data_end; addr < page_end;
1884 				addr += 4, i += 4) {
1885 
1886 				if (addr == page_end-4) {
1887 					cmd_flags = BCE_NVM_COMMAND_LAST;
1888                 		}
1889 				rc = bce_nvram_write_dword(sc, addr,
1890 					&flash_buffer[i], cmd_flags);
1891 
1892 				if (rc != 0)
1893 					goto nvram_write_end;
1894 
1895 				cmd_flags = 0;
1896 			}
1897 		}
1898 
1899 		/* Disable writes to flash interface (lock write-protect) */
1900 		bce_disable_nvram_write(sc);
1901 
1902 		/* Disable access to flash interface */
1903 		bce_disable_nvram_access(sc);
1904 		bce_release_nvram_lock(sc);
1905 
1906 		/* Increment written */
1907 		written += data_end - data_start;
1908 	}
1909 
1910 nvram_write_end:
1911 	if (align_start || align_end)
1912 		free(buf, M_DEVBUF);
1913 
1914 	return rc;
1915 }
1916 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1917 
1918 
1919 /****************************************************************************/
1920 /* Verifies that NVRAM is accessible and contains valid data.               */
1921 /*                                                                          */
1922 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1923 /* correct.                                                                 */
1924 /*                                                                          */
1925 /* Returns:                                                                 */
1926 /*   0 on success, positive value on failure.                               */
1927 /****************************************************************************/
1928 static int
1929 bce_nvram_test(struct bce_softc *sc)
1930 {
1931 	u32 buf[BCE_NVRAM_SIZE / 4];
1932 	u8 *data = (u8 *) buf;
1933 	int rc = 0;
1934 	u32 magic, csum;
1935 
1936 
1937 	/*
1938 	 * Check that the device NVRAM is valid by reading
1939 	 * the magic value at offset 0.
1940 	 */
1941 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1942 		goto bce_nvram_test_done;
1943 
1944 
1945     magic = bce_be32toh(buf[0]);
1946 	if (magic != BCE_NVRAM_MAGIC) {
1947 		rc = ENODEV;
1948 		BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1949 			"Found: 0x%08X\n",
1950 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1951 		goto bce_nvram_test_done;
1952 	}
1953 
1954 	/*
1955 	 * Verify that the device NVRAM includes valid
1956 	 * configuration data.
1957 	 */
1958 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1959 		goto bce_nvram_test_done;
1960 
1961 	csum = ether_crc32_le(data, 0x100);
1962 	if (csum != BCE_CRC32_RESIDUAL) {
1963 		rc = ENODEV;
1964 		BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1965 			"Expected: 0x%08X, Found: 0x%08X\n",
1966 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1967 		goto bce_nvram_test_done;
1968 	}
1969 
1970 	csum = ether_crc32_le(data + 0x100, 0x100);
1971 	if (csum != BCE_CRC32_RESIDUAL) {
1972 		BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1973 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1974 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1975 		rc = ENODEV;
1976 	}
1977 
1978 bce_nvram_test_done:
1979 	return rc;
1980 }
1981 
1982 
1983 /****************************************************************************/
1984 /* Free any DMA memory owned by the driver.                                 */
1985 /*                                                                          */
1986 /* Scans through each data structre that requires DMA memory and frees      */
1987 /* the memory if allocated.                                                 */
1988 /*                                                                          */
1989 /* Returns:                                                                 */
1990 /*   Nothing.                                                               */
1991 /****************************************************************************/
1992 static void
1993 bce_dma_free(struct bce_softc *sc)
1994 {
1995 	int i;
1996 
1997 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1998 
1999 	/* Destroy the status block. */
2000 	if (sc->status_block != NULL)
2001 		bus_dmamem_free(
2002 			sc->status_tag,
2003 		    sc->status_block,
2004 		    sc->status_map);
2005 
2006 	if (sc->status_map != NULL) {
2007 		bus_dmamap_unload(
2008 			sc->status_tag,
2009 		    sc->status_map);
2010 		bus_dmamap_destroy(sc->status_tag,
2011 		    sc->status_map);
2012 	}
2013 
2014 	if (sc->status_tag != NULL)
2015 		bus_dma_tag_destroy(sc->status_tag);
2016 
2017 
2018 	/* Destroy the statistics block. */
2019 	if (sc->stats_block != NULL)
2020 		bus_dmamem_free(
2021 			sc->stats_tag,
2022 		    sc->stats_block,
2023 		    sc->stats_map);
2024 
2025 	if (sc->stats_map != NULL) {
2026 		bus_dmamap_unload(
2027 			sc->stats_tag,
2028 		    sc->stats_map);
2029 		bus_dmamap_destroy(sc->stats_tag,
2030 		    sc->stats_map);
2031 	}
2032 
2033 	if (sc->stats_tag != NULL)
2034 		bus_dma_tag_destroy(sc->stats_tag);
2035 
2036 
2037 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2038 	for (i = 0; i < TX_PAGES; i++ ) {
2039 		if (sc->tx_bd_chain[i] != NULL)
2040 			bus_dmamem_free(
2041 				sc->tx_bd_chain_tag,
2042 			    sc->tx_bd_chain[i],
2043 			    sc->tx_bd_chain_map[i]);
2044 
2045 		if (sc->tx_bd_chain_map[i] != NULL) {
2046 			bus_dmamap_unload(
2047 				sc->tx_bd_chain_tag,
2048 		    	sc->tx_bd_chain_map[i]);
2049 			bus_dmamap_destroy(
2050 				sc->tx_bd_chain_tag,
2051 			    sc->tx_bd_chain_map[i]);
2052 		}
2053 
2054 	}
2055 
2056 	/* Destroy the TX buffer descriptor tag. */
2057 	if (sc->tx_bd_chain_tag != NULL)
2058 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2059 
2060 
2061 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2062 	for (i = 0; i < RX_PAGES; i++ ) {
2063 		if (sc->rx_bd_chain[i] != NULL)
2064 			bus_dmamem_free(
2065 				sc->rx_bd_chain_tag,
2066 			    sc->rx_bd_chain[i],
2067 			    sc->rx_bd_chain_map[i]);
2068 
2069 		if (sc->rx_bd_chain_map[i] != NULL) {
2070 			bus_dmamap_unload(
2071 				sc->rx_bd_chain_tag,
2072 		    	sc->rx_bd_chain_map[i]);
2073 			bus_dmamap_destroy(
2074 				sc->rx_bd_chain_tag,
2075 			    sc->rx_bd_chain_map[i]);
2076 		}
2077 	}
2078 
2079 	/* Destroy the RX buffer descriptor tag. */
2080 	if (sc->rx_bd_chain_tag != NULL)
2081 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2082 
2083 
2084 	/* Unload and destroy the TX mbuf maps. */
2085 	for (i = 0; i < TOTAL_TX_BD; i++) {
2086 		if (sc->tx_mbuf_map[i] != NULL) {
2087 			bus_dmamap_unload(sc->tx_mbuf_tag,
2088 				sc->tx_mbuf_map[i]);
2089 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2090 	 			sc->tx_mbuf_map[i]);
2091 		}
2092 	}
2093 
2094 	/* Destroy the TX mbuf tag. */
2095 	if (sc->tx_mbuf_tag != NULL)
2096 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2097 
2098 
2099 	/* Unload and destroy the RX mbuf maps. */
2100 	for (i = 0; i < TOTAL_RX_BD; i++) {
2101 		if (sc->rx_mbuf_map[i] != NULL) {
2102 			bus_dmamap_unload(sc->rx_mbuf_tag,
2103 				sc->rx_mbuf_map[i]);
2104 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2105 	 			sc->rx_mbuf_map[i]);
2106 		}
2107 	}
2108 
2109 	/* Destroy the RX mbuf tag. */
2110 	if (sc->rx_mbuf_tag != NULL)
2111 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2112 
2113 
2114 	/* Destroy the parent tag */
2115 	if (sc->parent_tag != NULL)
2116 		bus_dma_tag_destroy(sc->parent_tag);
2117 
2118 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2119 
2120 }
2121 
2122 
2123 /****************************************************************************/
2124 /* Get DMA memory from the OS.                                              */
2125 /*                                                                          */
2126 /* Validates that the OS has provided DMA buffers in response to a          */
2127 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2128 /* When the callback is used the OS will return 0 for the mapping function  */
2129 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2130 /* failures back to the caller.                                             */
2131 /*                                                                          */
2132 /* Returns:                                                                 */
2133 /*   Nothing.                                                               */
2134 /****************************************************************************/
2135 static void
2136 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2137 {
2138 	bus_addr_t *busaddr = arg;
2139 
2140 	/* Simulate a mapping failure. */
2141 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2142 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2143 			__FILE__, __LINE__);
2144 		error = ENOMEM);
2145 
2146 	/* Check for an error and signal the caller that an error occurred. */
2147 	if (error) {
2148 		printf("bce %s(%d): DMA mapping error! error = %d, "
2149 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2150 		*busaddr = 0;
2151 		return;
2152 	}
2153 
2154 	*busaddr = segs->ds_addr;
2155 	return;
2156 }
2157 
2158 
2159 /****************************************************************************/
2160 /* Allocate any DMA memory needed by the driver.                            */
2161 /*                                                                          */
2162 /* Allocates DMA memory needed for the various global structures needed by  */
2163 /* hardware.                                                                */
2164 /*                                                                          */
2165 /* Returns:                                                                 */
2166 /*   0 for success, positive value for failure.                             */
2167 /****************************************************************************/
2168 static int
2169 bce_dma_alloc(device_t dev)
2170 {
2171 	struct bce_softc *sc;
2172 	int i, error, rc = 0;
2173 	bus_addr_t busaddr;
2174 
2175 	sc = device_get_softc(dev);
2176 
2177 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2178 
2179 	/*
2180 	 * Allocate the parent bus DMA tag appropriate for PCI.
2181 	 */
2182 	if (bus_dma_tag_create(NULL,		/* parent     */
2183 			1,			/* alignment  */
2184 			BCE_DMA_BOUNDARY,	/* boundary   */
2185 			sc->max_bus_addr,	/* lowaddr    */
2186 			BUS_SPACE_MAXADDR,	/* highaddr   */
2187 			NULL, 			/* filterfunc */
2188 			NULL,			/* filterarg  */
2189 			MAXBSIZE, 		/* maxsize    */
2190 			BUS_SPACE_UNRESTRICTED,	/* nsegments  */
2191 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
2192 			0,			/* flags      */
2193 			NULL, 			/* locfunc    */
2194 			NULL,			/* lockarg    */
2195 			&sc->parent_tag)) {
2196 		BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2197 			__FILE__, __LINE__);
2198 		rc = ENOMEM;
2199 		goto bce_dma_alloc_exit;
2200 	}
2201 
2202 	/*
2203 	 * Create a DMA tag for the status block, allocate and clear the
2204 	 * memory, map the memory into DMA space, and fetch the physical
2205 	 * address of the block.
2206 	 */
2207 	if (bus_dma_tag_create(
2208 		sc->parent_tag,			/* parent      */
2209 	    	BCE_DMA_ALIGN,			/* alignment   */
2210 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2211 	    	sc->max_bus_addr,		/* lowaddr     */
2212 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2213 	    	NULL, 				/* filterfunc  */
2214 	    	NULL, 				/* filterarg   */
2215 	    	BCE_STATUS_BLK_SZ, 		/* maxsize     */
2216 	    	1,				/* nsegments   */
2217 	    	BCE_STATUS_BLK_SZ, 		/* maxsegsize  */
2218 	    	0,				/* flags       */
2219 	    	NULL, 				/* lockfunc    */
2220 	    	NULL,				/* lockarg     */
2221 	    	&sc->status_tag)) {
2222 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2223 			__FILE__, __LINE__);
2224 		rc = ENOMEM;
2225 		goto bce_dma_alloc_exit;
2226 	}
2227 
2228 	if(bus_dmamem_alloc(
2229 		sc->status_tag,			/* dmat        */
2230 	    	(void **)&sc->status_block,	/* vaddr       */
2231 	    	BUS_DMA_NOWAIT,			/* flags       */
2232 	    	&sc->status_map)) {
2233 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2234 			__FILE__, __LINE__);
2235 		rc = ENOMEM;
2236 		goto bce_dma_alloc_exit;
2237 	}
2238 
2239 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2240 
2241 	error = bus_dmamap_load(
2242 		sc->status_tag,	   		/* dmat        */
2243 	    	sc->status_map,	   		/* map         */
2244 	    	sc->status_block,	 	/* buf         */
2245 	    	BCE_STATUS_BLK_SZ,	 	/* buflen      */
2246 	    	bce_dma_map_addr, 	 	/* callback    */
2247 	    	&busaddr,		 	/* callbackarg */
2248 	    	BUS_DMA_NOWAIT);		/* flags       */
2249 
2250 	if (error) {
2251 		BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2252 			__FILE__, __LINE__);
2253 		rc = ENOMEM;
2254 		goto bce_dma_alloc_exit;
2255 	}
2256 
2257 	sc->status_block_paddr = busaddr;
2258 	/* DRC - Fix for 64 bit addresses. */
2259 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2260 		(u32) sc->status_block_paddr);
2261 
2262 	/*
2263 	 * Create a DMA tag for the statistics block, allocate and clear the
2264 	 * memory, map the memory into DMA space, and fetch the physical
2265 	 * address of the block.
2266 	 */
2267 	if (bus_dma_tag_create(
2268 		sc->parent_tag,			/* parent      */
2269 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2270 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2271 	    	sc->max_bus_addr,		/* lowaddr     */
2272 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2273 	    	NULL,		   		/* filterfunc  */
2274 	    	NULL, 		  		/* filterarg   */
2275 	    	BCE_STATS_BLK_SZ, 		/* maxsize     */
2276 	    	1,		  		/* nsegments   */
2277 	    	BCE_STATS_BLK_SZ, 		/* maxsegsize  */
2278 	    	0, 		  		/* flags       */
2279 	    	NULL, 		 		/* lockfunc    */
2280 	    	NULL, 		  		/* lockarg     */
2281 	    	&sc->stats_tag)) {
2282 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2283 			__FILE__, __LINE__);
2284 		rc = ENOMEM;
2285 		goto bce_dma_alloc_exit;
2286 	}
2287 
2288 	if (bus_dmamem_alloc(
2289 		sc->stats_tag,			/* dmat        */
2290 	    	(void **)&sc->stats_block,	/* vaddr       */
2291 	    	BUS_DMA_NOWAIT,			/* flags       */
2292 	    	&sc->stats_map)) {
2293 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2294 			__FILE__, __LINE__);
2295 		rc = ENOMEM;
2296 		goto bce_dma_alloc_exit;
2297 	}
2298 
2299 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2300 
2301 	error = bus_dmamap_load(
2302 		sc->stats_tag,	 	/* dmat        */
2303 	    	sc->stats_map,	 	/* map         */
2304 	    	sc->stats_block, 	/* buf         */
2305 	    	BCE_STATS_BLK_SZ,	/* buflen      */
2306 	    	bce_dma_map_addr,	/* callback    */
2307 	    	&busaddr, 	 	/* callbackarg */
2308 	    	BUS_DMA_NOWAIT);	/* flags       */
2309 
2310 	if(error) {
2311 		BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2312 			__FILE__, __LINE__);
2313 		rc = ENOMEM;
2314 		goto bce_dma_alloc_exit;
2315 	}
2316 
2317 	sc->stats_block_paddr = busaddr;
2318 	/* DRC - Fix for 64 bit address. */
2319 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2320 		(u32) sc->stats_block_paddr);
2321 
2322 	/*
2323 	 * Create a DMA tag for the TX buffer descriptor chain,
2324 	 * allocate and clear the  memory, and fetch the
2325 	 * physical address of the block.
2326 	 */
2327 	if(bus_dma_tag_create(
2328 			sc->parent_tag,		/* parent      */
2329 			BCM_PAGE_SIZE,		/* alignment   */
2330 		    	BCE_DMA_BOUNDARY,	/* boundary    */
2331 			sc->max_bus_addr,	/* lowaddr     */
2332 			BUS_SPACE_MAXADDR, 	/* highaddr    */
2333 			NULL,			/* filterfunc  */
2334 			NULL,			/* filterarg   */
2335 			BCE_TX_CHAIN_PAGE_SZ,	/* maxsize     */
2336 			1,			/* nsegments   */
2337 			BCE_TX_CHAIN_PAGE_SZ,	/* maxsegsize  */
2338 			0,			/* flags       */
2339 			NULL,			/* lockfunc    */
2340 			NULL,			/* lockarg     */
2341 			&sc->tx_bd_chain_tag)) {
2342 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2343 			__FILE__, __LINE__);
2344 		rc = ENOMEM;
2345 		goto bce_dma_alloc_exit;
2346 	}
2347 
2348 	for (i = 0; i < TX_PAGES; i++) {
2349 
2350 		if(bus_dmamem_alloc(
2351 			sc->tx_bd_chain_tag,		/* tag   */
2352 	    		(void **)&sc->tx_bd_chain[i],	/* vaddr */
2353 	    		BUS_DMA_NOWAIT,			/* flags */
2354 		    	&sc->tx_bd_chain_map[i])) {
2355 			BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2356 				"chain DMA memory!\n", __FILE__, __LINE__);
2357 			rc = ENOMEM;
2358 			goto bce_dma_alloc_exit;
2359 		}
2360 
2361 		error = bus_dmamap_load(
2362 			sc->tx_bd_chain_tag,		/* dmat        */
2363 	    		sc->tx_bd_chain_map[i],		/* map         */
2364 	    		sc->tx_bd_chain[i],		/* buf         */
2365 		    	BCE_TX_CHAIN_PAGE_SZ,		/* buflen      */
2366 		    	bce_dma_map_addr,		/* callback    */
2367 	    		&busaddr,			/* callbackarg */
2368 	    		BUS_DMA_NOWAIT);		/* flags       */
2369 
2370 		if (error) {
2371 			BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2372 				__FILE__, __LINE__);
2373 			rc = ENOMEM;
2374 			goto bce_dma_alloc_exit;
2375 		}
2376 
2377 		sc->tx_bd_chain_paddr[i] = busaddr;
2378 		/* DRC - Fix for 64 bit systems. */
2379 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2380 			i, (u32) sc->tx_bd_chain_paddr[i]);
2381 	}
2382 
2383 	/* Create a DMA tag for TX mbufs. */
2384 	if (bus_dma_tag_create(
2385 			sc->parent_tag,	 	 	/* parent      */
2386 			1,		 		/* alignment   */
2387 			BCE_DMA_BOUNDARY, 		/* boundary    */
2388 			sc->max_bus_addr,		/* lowaddr     */
2389 			BUS_SPACE_MAXADDR,		/* highaddr    */
2390 			NULL,				/* filterfunc  */
2391 			NULL,				/* filterarg   */
2392 			MCLBYTES * BCE_MAX_SEGMENTS,	/* maxsize     */
2393 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2394 			MCLBYTES,			/* maxsegsize  */
2395 			0,				/* flags       */
2396 			NULL,				/* lockfunc    */
2397 			NULL,				/* lockarg     */
2398 			&sc->tx_mbuf_tag)) {
2399 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2400 			__FILE__, __LINE__);
2401 		rc = ENOMEM;
2402 		goto bce_dma_alloc_exit;
2403 	}
2404 
2405 	/* Create DMA maps for the TX mbufs clusters. */
2406 	for (i = 0; i < TOTAL_TX_BD; i++) {
2407 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2408 			&sc->tx_mbuf_map[i])) {
2409 			BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2410 				__FILE__, __LINE__);
2411 			rc = ENOMEM;
2412 			goto bce_dma_alloc_exit;
2413 		}
2414 	}
2415 
2416 	/*
2417 	 * Create a DMA tag for the RX buffer descriptor chain,
2418 	 * allocate and clear the  memory, and fetch the physical
2419 	 * address of the blocks.
2420 	 */
2421 	if (bus_dma_tag_create(
2422 			sc->parent_tag,			/* parent      */
2423 			BCM_PAGE_SIZE,			/* alignment   */
2424 			BCE_DMA_BOUNDARY,		/* boundary    */
2425 			BUS_SPACE_MAXADDR,		/* lowaddr     */
2426 			sc->max_bus_addr,		/* lowaddr     */
2427 			NULL,				/* filter      */
2428 			NULL, 				/* filterarg   */
2429 			BCE_RX_CHAIN_PAGE_SZ,		/* maxsize     */
2430 			1, 				/* nsegments   */
2431 			BCE_RX_CHAIN_PAGE_SZ,		/* maxsegsize  */
2432 			0,		 		/* flags       */
2433 			NULL,				/* lockfunc    */
2434 			NULL,				/* lockarg     */
2435 			&sc->rx_bd_chain_tag)) {
2436 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2437 			__FILE__, __LINE__);
2438 		rc = ENOMEM;
2439 		goto bce_dma_alloc_exit;
2440 	}
2441 
2442 	for (i = 0; i < RX_PAGES; i++) {
2443 
2444 		if (bus_dmamem_alloc(
2445 			sc->rx_bd_chain_tag,		/* tag   */
2446 	    		(void **)&sc->rx_bd_chain[i], 	/* vaddr */
2447 	    		BUS_DMA_NOWAIT,		  	/* flags */
2448 		    	&sc->rx_bd_chain_map[i])) {
2449 			BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2450 				"DMA memory!\n", __FILE__, __LINE__);
2451 			rc = ENOMEM;
2452 			goto bce_dma_alloc_exit;
2453 		}
2454 
2455 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2456 
2457 		error = bus_dmamap_load(
2458 			sc->rx_bd_chain_tag,	/* dmat        */
2459 	    		sc->rx_bd_chain_map[i],	/* map         */
2460 	    		sc->rx_bd_chain[i],	/* buf         */
2461 		    	BCE_RX_CHAIN_PAGE_SZ,  	/* buflen      */
2462 		    	bce_dma_map_addr,   	/* callback    */
2463 	    		&busaddr,	   	/* callbackarg */
2464 	    		BUS_DMA_NOWAIT);	/* flags       */
2465 
2466 		if (error) {
2467 			BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2468 				__FILE__, __LINE__);
2469 			rc = ENOMEM;
2470 			goto bce_dma_alloc_exit;
2471 		}
2472 
2473 		sc->rx_bd_chain_paddr[i] = busaddr;
2474 		/* DRC - Fix for 64 bit systems. */
2475 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2476 			i, (u32) sc->rx_bd_chain_paddr[i]);
2477 	}
2478 
2479 	/*
2480 	 * Create a DMA tag for RX mbufs.
2481 	 */
2482 	if (bus_dma_tag_create(
2483 			sc->parent_tag,		/* parent      */
2484 			1,			/* alignment   */
2485 			BCE_DMA_BOUNDARY,  	/* boundary    */
2486 			sc->max_bus_addr,  	/* lowaddr     */
2487 			BUS_SPACE_MAXADDR,	/* highaddr    */
2488 			NULL, 			/* filterfunc  */
2489 			NULL, 			/* filterarg   */
2490 			MJUM9BYTES,		/* maxsize     */
2491 			BCE_MAX_SEGMENTS, 	/* nsegments   */
2492 			MJUM9BYTES,		/* maxsegsize  */
2493 			0,			/* flags       */
2494 			NULL, 			/* lockfunc    */
2495 			NULL,			/* lockarg     */
2496 	    	&sc->rx_mbuf_tag)) {
2497 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2498 			__FILE__, __LINE__);
2499 		rc = ENOMEM;
2500 		goto bce_dma_alloc_exit;
2501 	}
2502 
2503 	/* Create DMA maps for the RX mbuf clusters. */
2504 	for (i = 0; i < TOTAL_RX_BD; i++) {
2505 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2506 				&sc->rx_mbuf_map[i])) {
2507 			BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2508 				__FILE__, __LINE__);
2509 			rc = ENOMEM;
2510 			goto bce_dma_alloc_exit;
2511 		}
2512 	}
2513 
2514 bce_dma_alloc_exit:
2515 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2516 
2517 	return(rc);
2518 }
2519 
2520 
2521 /****************************************************************************/
2522 /* Release all resources used by the driver.                                */
2523 /*                                                                          */
2524 /* Releases all resources acquired by the driver including interrupts,      */
2525 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2526 /*                                                                          */
2527 /* Returns:                                                                 */
2528 /*   Nothing.                                                               */
2529 /****************************************************************************/
2530 static void
2531 bce_release_resources(struct bce_softc *sc)
2532 {
2533 	device_t dev;
2534 
2535 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2536 
2537 	dev = sc->bce_dev;
2538 
2539 	bce_dma_free(sc);
2540 
2541 	if (sc->bce_intrhand != NULL)
2542 		bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2543 
2544 	if (sc->bce_irq != NULL)
2545 		bus_release_resource(dev,
2546 			SYS_RES_IRQ,
2547 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2548 			sc->bce_irq);
2549 
2550 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
2551 		pci_release_msi(dev);
2552 
2553 	if (sc->bce_res != NULL)
2554 		bus_release_resource(dev,
2555 			SYS_RES_MEMORY,
2556 		    PCIR_BAR(0),
2557 		    sc->bce_res);
2558 
2559 	if (sc->bce_ifp != NULL)
2560 		if_free(sc->bce_ifp);
2561 
2562 
2563 	if (mtx_initialized(&sc->bce_mtx))
2564 		BCE_LOCK_DESTROY(sc);
2565 
2566 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2567 
2568 }
2569 
2570 
2571 /****************************************************************************/
2572 /* Firmware synchronization.                                                */
2573 /*                                                                          */
2574 /* Before performing certain events such as a chip reset, synchronize with  */
2575 /* the firmware first.                                                      */
2576 /*                                                                          */
2577 /* Returns:                                                                 */
2578 /*   0 for success, positive value for failure.                             */
2579 /****************************************************************************/
2580 static int
2581 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2582 {
2583 	int i, rc = 0;
2584 	u32 val;
2585 
2586 	/* Don't waste any time if we've timed out before. */
2587 	if (sc->bce_fw_timed_out) {
2588 		rc = EBUSY;
2589 		goto bce_fw_sync_exit;
2590 	}
2591 
2592 	/* Increment the message sequence number. */
2593 	sc->bce_fw_wr_seq++;
2594 	msg_data |= sc->bce_fw_wr_seq;
2595 
2596  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2597 
2598 	/* Send the message to the bootcode driver mailbox. */
2599 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2600 
2601 	/* Wait for the bootcode to acknowledge the message. */
2602 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2603 		/* Check for a response in the bootcode firmware mailbox. */
2604 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2605 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2606 			break;
2607 		DELAY(1000);
2608 	}
2609 
2610 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2611 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2612 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2613 
2614 		BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2615 			"msg_data = 0x%08X\n",
2616 			__FILE__, __LINE__, msg_data);
2617 
2618 		msg_data &= ~BCE_DRV_MSG_CODE;
2619 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2620 
2621 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2622 
2623 		sc->bce_fw_timed_out = 1;
2624 		rc = EBUSY;
2625 	}
2626 
2627 bce_fw_sync_exit:
2628 	return (rc);
2629 }
2630 
2631 
2632 /****************************************************************************/
2633 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2634 /*                                                                          */
2635 /* Returns:                                                                 */
2636 /*   Nothing.                                                               */
2637 /****************************************************************************/
2638 static void
2639 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2640 	u32 rv2p_code_len, u32 rv2p_proc)
2641 {
2642 	int i;
2643 	u32 val;
2644 
2645 	for (i = 0; i < rv2p_code_len; i += 8) {
2646 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2647 		rv2p_code++;
2648 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2649 		rv2p_code++;
2650 
2651 		if (rv2p_proc == RV2P_PROC1) {
2652 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2653 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2654 		}
2655 		else {
2656 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2657 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2658 		}
2659 	}
2660 
2661 	/* Reset the processor, un-stall is done later. */
2662 	if (rv2p_proc == RV2P_PROC1) {
2663 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2664 	}
2665 	else {
2666 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2667 	}
2668 }
2669 
2670 
2671 /****************************************************************************/
2672 /* Load RISC processor firmware.                                            */
2673 /*                                                                          */
2674 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2675 /* associated with a particular processor.                                  */
2676 /*                                                                          */
2677 /* Returns:                                                                 */
2678 /*   Nothing.                                                               */
2679 /****************************************************************************/
2680 static void
2681 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2682 	struct fw_info *fw)
2683 {
2684 	u32 offset;
2685 	u32 val;
2686 
2687 	/* Halt the CPU. */
2688 	val = REG_RD_IND(sc, cpu_reg->mode);
2689 	val |= cpu_reg->mode_value_halt;
2690 	REG_WR_IND(sc, cpu_reg->mode, val);
2691 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2692 
2693 	/* Load the Text area. */
2694 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2695 	if (fw->text) {
2696 		int j;
2697 
2698 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2699 			REG_WR_IND(sc, offset, fw->text[j]);
2700 	        }
2701 	}
2702 
2703 	/* Load the Data area. */
2704 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2705 	if (fw->data) {
2706 		int j;
2707 
2708 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2709 			REG_WR_IND(sc, offset, fw->data[j]);
2710 		}
2711 	}
2712 
2713 	/* Load the SBSS area. */
2714 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2715 	if (fw->sbss) {
2716 		int j;
2717 
2718 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2719 			REG_WR_IND(sc, offset, fw->sbss[j]);
2720 		}
2721 	}
2722 
2723 	/* Load the BSS area. */
2724 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2725 	if (fw->bss) {
2726 		int j;
2727 
2728 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2729 			REG_WR_IND(sc, offset, fw->bss[j]);
2730 		}
2731 	}
2732 
2733 	/* Load the Read-Only area. */
2734 	offset = cpu_reg->spad_base +
2735 		(fw->rodata_addr - cpu_reg->mips_view_base);
2736 	if (fw->rodata) {
2737 		int j;
2738 
2739 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2740 			REG_WR_IND(sc, offset, fw->rodata[j]);
2741 		}
2742 	}
2743 
2744 	/* Clear the pre-fetch instruction. */
2745 	REG_WR_IND(sc, cpu_reg->inst, 0);
2746 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2747 
2748 	/* Start the CPU. */
2749 	val = REG_RD_IND(sc, cpu_reg->mode);
2750 	val &= ~cpu_reg->mode_value_halt;
2751 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2752 	REG_WR_IND(sc, cpu_reg->mode, val);
2753 }
2754 
2755 
2756 /****************************************************************************/
2757 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2758 /*                                                                          */
2759 /* Loads the firmware for each CPU and starts the CPU.                      */
2760 /*                                                                          */
2761 /* Returns:                                                                 */
2762 /*   Nothing.                                                               */
2763 /****************************************************************************/
2764 static void
2765 bce_init_cpus(struct bce_softc *sc)
2766 {
2767 	struct cpu_reg cpu_reg;
2768 	struct fw_info fw;
2769 
2770 	/* Initialize the RV2P processor. */
2771 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2772 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2773 
2774 	/* Initialize the RX Processor. */
2775 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2776 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2777 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2778 	cpu_reg.state = BCE_RXP_CPU_STATE;
2779 	cpu_reg.state_value_clear = 0xffffff;
2780 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2781 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2782 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2783 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2784 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2785 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2786 	cpu_reg.mips_view_base = 0x8000000;
2787 
2788 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2789 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2790 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2791 	fw.start_addr = bce_RXP_b06FwStartAddr;
2792 
2793 	fw.text_addr = bce_RXP_b06FwTextAddr;
2794 	fw.text_len = bce_RXP_b06FwTextLen;
2795 	fw.text_index = 0;
2796 	fw.text = bce_RXP_b06FwText;
2797 
2798 	fw.data_addr = bce_RXP_b06FwDataAddr;
2799 	fw.data_len = bce_RXP_b06FwDataLen;
2800 	fw.data_index = 0;
2801 	fw.data = bce_RXP_b06FwData;
2802 
2803 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2804 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2805 	fw.sbss_index = 0;
2806 	fw.sbss = bce_RXP_b06FwSbss;
2807 
2808 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2809 	fw.bss_len = bce_RXP_b06FwBssLen;
2810 	fw.bss_index = 0;
2811 	fw.bss = bce_RXP_b06FwBss;
2812 
2813 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2814 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2815 	fw.rodata_index = 0;
2816 	fw.rodata = bce_RXP_b06FwRodata;
2817 
2818 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2819 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2820 
2821 	/* Initialize the TX Processor. */
2822 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2823 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2824 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2825 	cpu_reg.state = BCE_TXP_CPU_STATE;
2826 	cpu_reg.state_value_clear = 0xffffff;
2827 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2828 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2829 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2830 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2831 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2832 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2833 	cpu_reg.mips_view_base = 0x8000000;
2834 
2835 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2836 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2837 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2838 	fw.start_addr = bce_TXP_b06FwStartAddr;
2839 
2840 	fw.text_addr = bce_TXP_b06FwTextAddr;
2841 	fw.text_len = bce_TXP_b06FwTextLen;
2842 	fw.text_index = 0;
2843 	fw.text = bce_TXP_b06FwText;
2844 
2845 	fw.data_addr = bce_TXP_b06FwDataAddr;
2846 	fw.data_len = bce_TXP_b06FwDataLen;
2847 	fw.data_index = 0;
2848 	fw.data = bce_TXP_b06FwData;
2849 
2850 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2851 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2852 	fw.sbss_index = 0;
2853 	fw.sbss = bce_TXP_b06FwSbss;
2854 
2855 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2856 	fw.bss_len = bce_TXP_b06FwBssLen;
2857 	fw.bss_index = 0;
2858 	fw.bss = bce_TXP_b06FwBss;
2859 
2860 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2861 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2862 	fw.rodata_index = 0;
2863 	fw.rodata = bce_TXP_b06FwRodata;
2864 
2865 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2866 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2867 
2868 	/* Initialize the TX Patch-up Processor. */
2869 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2870 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2871 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2872 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2873 	cpu_reg.state_value_clear = 0xffffff;
2874 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2875 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2876 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2877 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2878 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2879 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2880 	cpu_reg.mips_view_base = 0x8000000;
2881 
2882 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2883 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2884 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2885 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2886 
2887 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2888 	fw.text_len = bce_TPAT_b06FwTextLen;
2889 	fw.text_index = 0;
2890 	fw.text = bce_TPAT_b06FwText;
2891 
2892 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2893 	fw.data_len = bce_TPAT_b06FwDataLen;
2894 	fw.data_index = 0;
2895 	fw.data = bce_TPAT_b06FwData;
2896 
2897 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2898 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2899 	fw.sbss_index = 0;
2900 	fw.sbss = bce_TPAT_b06FwSbss;
2901 
2902 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2903 	fw.bss_len = bce_TPAT_b06FwBssLen;
2904 	fw.bss_index = 0;
2905 	fw.bss = bce_TPAT_b06FwBss;
2906 
2907 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2908 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2909 	fw.rodata_index = 0;
2910 	fw.rodata = bce_TPAT_b06FwRodata;
2911 
2912 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2913 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2914 
2915 	/* Initialize the Completion Processor. */
2916 	cpu_reg.mode = BCE_COM_CPU_MODE;
2917 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2918 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2919 	cpu_reg.state = BCE_COM_CPU_STATE;
2920 	cpu_reg.state_value_clear = 0xffffff;
2921 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2922 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2923 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2924 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2925 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2926 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2927 	cpu_reg.mips_view_base = 0x8000000;
2928 
2929 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2930 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2931 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2932 	fw.start_addr = bce_COM_b06FwStartAddr;
2933 
2934 	fw.text_addr = bce_COM_b06FwTextAddr;
2935 	fw.text_len = bce_COM_b06FwTextLen;
2936 	fw.text_index = 0;
2937 	fw.text = bce_COM_b06FwText;
2938 
2939 	fw.data_addr = bce_COM_b06FwDataAddr;
2940 	fw.data_len = bce_COM_b06FwDataLen;
2941 	fw.data_index = 0;
2942 	fw.data = bce_COM_b06FwData;
2943 
2944 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2945 	fw.sbss_len = bce_COM_b06FwSbssLen;
2946 	fw.sbss_index = 0;
2947 	fw.sbss = bce_COM_b06FwSbss;
2948 
2949 	fw.bss_addr = bce_COM_b06FwBssAddr;
2950 	fw.bss_len = bce_COM_b06FwBssLen;
2951 	fw.bss_index = 0;
2952 	fw.bss = bce_COM_b06FwBss;
2953 
2954 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2955 	fw.rodata_len = bce_COM_b06FwRodataLen;
2956 	fw.rodata_index = 0;
2957 	fw.rodata = bce_COM_b06FwRodata;
2958 
2959 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2960 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2961 }
2962 
2963 
2964 /****************************************************************************/
2965 /* Initialize context memory.                                               */
2966 /*                                                                          */
2967 /* Clears the memory associated with each Context ID (CID).                 */
2968 /*                                                                          */
2969 /* Returns:                                                                 */
2970 /*   Nothing.                                                               */
2971 /****************************************************************************/
2972 static void
2973 bce_init_context(struct bce_softc *sc)
2974 {
2975 	u32 vcid;
2976 
2977 	vcid = 96;
2978 	while (vcid) {
2979 		u32 vcid_addr, pcid_addr, offset;
2980 
2981 		vcid--;
2982 
2983    		vcid_addr = GET_CID_ADDR(vcid);
2984 		pcid_addr = vcid_addr;
2985 
2986 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
2987 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2988 
2989 		/* Zero out the context. */
2990 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
2991 			CTX_WR(sc, 0x00, offset, 0);
2992 		}
2993 
2994 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2995 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2996 	}
2997 }
2998 
2999 
3000 /****************************************************************************/
3001 /* Fetch the permanent MAC address of the controller.                       */
3002 /*                                                                          */
3003 /* Returns:                                                                 */
3004 /*   Nothing.                                                               */
3005 /****************************************************************************/
3006 static void
3007 bce_get_mac_addr(struct bce_softc *sc)
3008 {
3009 	u32 mac_lo = 0, mac_hi = 0;
3010 
3011 	/*
3012 	 * The NetXtreme II bootcode populates various NIC
3013 	 * power-on and runtime configuration items in a
3014 	 * shared memory area.  The factory configured MAC
3015 	 * address is available from both NVRAM and the
3016 	 * shared memory area so we'll read the value from
3017 	 * shared memory for speed.
3018 	 */
3019 
3020 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3021 		BCE_PORT_HW_CFG_MAC_UPPER);
3022 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3023 		BCE_PORT_HW_CFG_MAC_LOWER);
3024 
3025 	if ((mac_lo == 0) && (mac_hi == 0)) {
3026 		BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3027 			__FILE__, __LINE__);
3028 	} else {
3029 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3030 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3031 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3032 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3033 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3034 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3035 	}
3036 
3037 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3038 }
3039 
3040 
3041 /****************************************************************************/
3042 /* Program the MAC address.                                                 */
3043 /*                                                                          */
3044 /* Returns:                                                                 */
3045 /*   Nothing.                                                               */
3046 /****************************************************************************/
3047 static void
3048 bce_set_mac_addr(struct bce_softc *sc)
3049 {
3050 	u32 val;
3051 	u8 *mac_addr = sc->eaddr;
3052 
3053 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3054 
3055 	val = (mac_addr[0] << 8) | mac_addr[1];
3056 
3057 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3058 
3059 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3060 		(mac_addr[4] << 8) | mac_addr[5];
3061 
3062 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3063 }
3064 
3065 
3066 /****************************************************************************/
3067 /* Stop the controller.                                                     */
3068 /*                                                                          */
3069 /* Returns:                                                                 */
3070 /*   Nothing.                                                               */
3071 /****************************************************************************/
3072 static void
3073 bce_stop(struct bce_softc *sc)
3074 {
3075 	struct ifnet *ifp;
3076 	struct ifmedia_entry *ifm;
3077 	struct mii_data *mii = NULL;
3078 	int mtmp, itmp;
3079 
3080 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3081 
3082 	BCE_LOCK_ASSERT(sc);
3083 
3084 	ifp = sc->bce_ifp;
3085 
3086 	mii = device_get_softc(sc->bce_miibus);
3087 
3088 	callout_stop(&sc->bce_stat_ch);
3089 
3090 	/* Disable the transmit/receive blocks. */
3091 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3092 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3093 	DELAY(20);
3094 
3095 	bce_disable_intr(sc);
3096 
3097 	/* Tell firmware that the driver is going away. */
3098 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3099 
3100 	/* Free the RX lists. */
3101 	bce_free_rx_chain(sc);
3102 
3103 	/* Free TX buffers. */
3104 	bce_free_tx_chain(sc);
3105 
3106 	/*
3107 	 * Isolate/power down the PHY, but leave the media selection
3108 	 * unchanged so that things will be put back to normal when
3109 	 * we bring the interface back up.
3110 	 */
3111 
3112 	itmp = ifp->if_flags;
3113 	ifp->if_flags |= IFF_UP;
3114 	/*
3115 	 * If we are called from bce_detach(), mii is already NULL.
3116 	 */
3117 	if (mii != NULL) {
3118 		ifm = mii->mii_media.ifm_cur;
3119 		mtmp = ifm->ifm_media;
3120 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3121 		mii_mediachg(mii);
3122 		ifm->ifm_media = mtmp;
3123 	}
3124 
3125 	ifp->if_flags = itmp;
3126 	ifp->if_timer = 0;
3127 
3128 	sc->bce_link = 0;
3129 
3130 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3131 
3132 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3133 
3134 	bce_mgmt_init_locked(sc);
3135 }
3136 
3137 
3138 static int
3139 bce_reset(struct bce_softc *sc, u32 reset_code)
3140 {
3141 	u32 val;
3142 	int i, rc = 0;
3143 
3144 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3145 
3146 	/* Wait for pending PCI transactions to complete. */
3147 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3148 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3149 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3150 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3151 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3152 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3153 	DELAY(5);
3154 
3155 	/* Assume bootcode is running. */
3156 	sc->bce_fw_timed_out = 0;
3157 
3158 	/* Give the firmware a chance to prepare for the reset. */
3159 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3160 	if (rc)
3161 		goto bce_reset_exit;
3162 
3163 	/* Set a firmware reminder that this is a soft reset. */
3164 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3165 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3166 
3167 	/* Dummy read to force the chip to complete all current transactions. */
3168 	val = REG_RD(sc, BCE_MISC_ID);
3169 
3170 	/* Chip reset. */
3171 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3172 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3173 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3174 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3175 
3176 	/* Allow up to 30us for reset to complete. */
3177 	for (i = 0; i < 10; i++) {
3178 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3179 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3180 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3181 			break;
3182 		}
3183 		DELAY(10);
3184 	}
3185 
3186 	/* Check that reset completed successfully. */
3187 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3188 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3189 		BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3190 			__FILE__, __LINE__);
3191 		rc = EBUSY;
3192 		goto bce_reset_exit;
3193 	}
3194 
3195 	/* Make sure byte swapping is properly configured. */
3196 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3197 	if (val != 0x01020304) {
3198 		BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3199 			__FILE__, __LINE__);
3200 		rc = ENODEV;
3201 		goto bce_reset_exit;
3202 	}
3203 
3204 	/* Just completed a reset, assume that firmware is running again. */
3205 	sc->bce_fw_timed_out = 0;
3206 
3207 	/* Wait for the firmware to finish its initialization. */
3208 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3209 	if (rc)
3210 		BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3211 			__FILE__, __LINE__);
3212 
3213 bce_reset_exit:
3214 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3215 
3216 	return (rc);
3217 }
3218 
3219 
3220 static int
3221 bce_chipinit(struct bce_softc *sc)
3222 {
3223 	u32 val;
3224 	int rc = 0;
3225 
3226 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3227 
3228 	/* Make sure the interrupt is not active. */
3229 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3230 
3231 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3232 	/* channels and PCI clock compensation delay.                      */
3233 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3234 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3235 #if BYTE_ORDER == BIG_ENDIAN
3236 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3237 #endif
3238 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3239 	      DMA_READ_CHANS << 12 |
3240 	      DMA_WRITE_CHANS << 16;
3241 
3242 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3243 
3244 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3245 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3246 
3247 	/*
3248 	 * This setting resolves a problem observed on certain Intel PCI
3249 	 * chipsets that cannot handle multiple outstanding DMA operations.
3250 	 * See errata E9_5706A1_65.
3251 	 */
3252 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3253 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3254 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3255 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3256 
3257 	REG_WR(sc, BCE_DMA_CONFIG, val);
3258 
3259 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3260 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3261 		u16 val;
3262 
3263 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3264 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3265 	}
3266 
3267 	/* Enable the RX_V2P and Context state machines before access. */
3268 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3269 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3270 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3271 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3272 
3273 	/* Initialize context mapping and zero out the quick contexts. */
3274 	bce_init_context(sc);
3275 
3276 	/* Initialize the on-boards CPUs */
3277 	bce_init_cpus(sc);
3278 
3279 	/* Prepare NVRAM for access. */
3280 	if (bce_init_nvram(sc)) {
3281 		rc = ENODEV;
3282 		goto bce_chipinit_exit;
3283 	}
3284 
3285 	/* Set the kernel bypass block size */
3286 	val = REG_RD(sc, BCE_MQ_CONFIG);
3287 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3288 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3289 	REG_WR(sc, BCE_MQ_CONFIG, val);
3290 
3291 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3292 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3293 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3294 
3295 	val = (BCM_PAGE_BITS - 8) << 24;
3296 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3297 
3298 	/* Configure page size. */
3299 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3300 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3301 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3302 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3303 
3304 bce_chipinit_exit:
3305 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3306 
3307 	return(rc);
3308 }
3309 
3310 
3311 /****************************************************************************/
3312 /* Initialize the controller in preparation to send/receive traffic.        */
3313 /*                                                                          */
3314 /* Returns:                                                                 */
3315 /*   0 for success, positive value for failure.                             */
3316 /****************************************************************************/
3317 static int
3318 bce_blockinit(struct bce_softc *sc)
3319 {
3320 	u32 reg, val;
3321 	int rc = 0;
3322 
3323 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3324 
3325 	/* Load the hardware default MAC address. */
3326 	bce_set_mac_addr(sc);
3327 
3328 	/* Set the Ethernet backoff seed value */
3329 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3330 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3331 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3332 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3333 
3334 	sc->last_status_idx = 0;
3335 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3336 
3337 	/* Set up link change interrupt generation. */
3338 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3339 
3340 	/* Program the physical address of the status block. */
3341 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3342 		BCE_ADDR_LO(sc->status_block_paddr));
3343 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3344 		BCE_ADDR_HI(sc->status_block_paddr));
3345 
3346 	/* Program the physical address of the statistics block. */
3347 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3348 		BCE_ADDR_LO(sc->stats_block_paddr));
3349 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3350 		BCE_ADDR_HI(sc->stats_block_paddr));
3351 
3352 	/* Program various host coalescing parameters. */
3353 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3354 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3355 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3356 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3357 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3358 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3359 	REG_WR(sc, BCE_HC_TX_TICKS,
3360 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3361 	REG_WR(sc, BCE_HC_RX_TICKS,
3362 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3363 	REG_WR(sc, BCE_HC_COM_TICKS,
3364 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3365 	REG_WR(sc, BCE_HC_CMD_TICKS,
3366 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3367 	REG_WR(sc, BCE_HC_STATS_TICKS,
3368 		(sc->bce_stats_ticks & 0xffff00));
3369 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3370 		0xbb8);  /* 3ms */
3371 	REG_WR(sc, BCE_HC_CONFIG,
3372 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3373 		BCE_HC_CONFIG_COLLECT_STATS));
3374 
3375 	/* Clear the internal statistics counters. */
3376 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3377 
3378 	/* Verify that bootcode is running. */
3379 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3380 
3381 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3382 		BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3383 			__FILE__, __LINE__);
3384 		reg = 0);
3385 
3386 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3387 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3388 		BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3389 			"Expected: 08%08X\n", __FILE__, __LINE__,
3390 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3391 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3392 		rc = ENODEV;
3393 		goto bce_blockinit_exit;
3394 	}
3395 
3396 	/* Check if any management firmware is running. */
3397 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3398 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3399 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3400 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3401 	}
3402 
3403 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3404 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3405 
3406 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3407 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3408 
3409 	/* Enable link state change interrupt generation. */
3410 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3411 
3412 	/* Enable all remaining blocks in the MAC. */
3413 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3414 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3415 	DELAY(20);
3416 
3417 bce_blockinit_exit:
3418 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3419 
3420 	return (rc);
3421 }
3422 
3423 
3424 /****************************************************************************/
3425 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3426 /*                                                                          */
3427 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3428 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3429 /* necessary.                                                               */
3430 /*                                                                          */
3431 /* Returns:                                                                 */
3432 /*   0 for success, positive value for failure.                             */
3433 /****************************************************************************/
3434 static int
3435 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3436 	u32 *prod_bseq)
3437 {
3438 	bus_dmamap_t		map;
3439 	bus_dma_segment_t	segs[4];
3440 	struct mbuf *m_new = NULL;
3441 	struct rx_bd		*rxbd;
3442 	int i, nsegs, error, rc = 0;
3443 #ifdef BCE_DEBUG
3444 	u16 debug_chain_prod = *chain_prod;
3445 #endif
3446 
3447 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3448 		__FUNCTION__);
3449 
3450 	/* Make sure the inputs are valid. */
3451 	DBRUNIF((*chain_prod > MAX_RX_BD),
3452 		BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3453 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3454 
3455 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3456 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3457 
3458 	if (m == NULL) {
3459 
3460 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3461 			BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3462 				__FILE__, __LINE__);
3463 			sc->mbuf_alloc_failed++;
3464 			rc = ENOBUFS;
3465 			goto bce_get_buf_exit);
3466 
3467 		/* This is a new mbuf allocation. */
3468 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3469 		if (m_new == NULL) {
3470 
3471 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3472 				__FILE__, __LINE__);
3473 
3474 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3475 
3476 			rc = ENOBUFS;
3477 			goto bce_get_buf_exit;
3478 		}
3479 
3480 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3481 		m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3482 		if (!(m_new->m_flags & M_EXT)) {
3483 
3484 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3485 				__FILE__, __LINE__);
3486 
3487 			m_freem(m_new);
3488 
3489 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3490 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3491 
3492 			rc = ENOBUFS;
3493 			goto bce_get_buf_exit;
3494 		}
3495 
3496 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3497 	} else {
3498 		m_new = m;
3499 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3500 		m_new->m_data = m_new->m_ext.ext_buf;
3501 	}
3502 
3503 	/* Map the mbuf cluster into device memory. */
3504 	map = sc->rx_mbuf_map[*chain_prod];
3505 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3506 	    segs, &nsegs, BUS_DMA_NOWAIT);
3507 
3508 	if (error) {
3509 		BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3510 			__FILE__, __LINE__);
3511 
3512 		m_freem(m_new);
3513 
3514 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3515 
3516 		rc = ENOBUFS;
3517 		goto bce_get_buf_exit;
3518 	}
3519 
3520 	/* Watch for overflow. */
3521 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3522 		BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3523 			__FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3524 
3525 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3526 		sc->rx_low_watermark = sc->free_rx_bd);
3527 
3528 	/* Setup the rx_bd for the first segment. */
3529 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3530 
3531 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3532 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3533 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3534 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3535 	*prod_bseq += segs[0].ds_len;
3536 
3537 	for (i = 1; i < nsegs; i++) {
3538 
3539 		*prod = NEXT_RX_BD(*prod);
3540 		*chain_prod = RX_CHAIN_IDX(*prod);
3541 
3542 		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3543 
3544 		rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3545 		rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3546 		rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3547 		rxbd->rx_bd_flags     = 0;
3548 		*prod_bseq += segs[i].ds_len;
3549 	}
3550 
3551 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3552 
3553 	/* Save the mbuf and update our counter. */
3554 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3555 	sc->free_rx_bd -= nsegs;
3556 
3557 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3558 		nsegs));
3559 
3560 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3561 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3562 
3563 bce_get_buf_exit:
3564 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3565 		__FUNCTION__);
3566 
3567 	return(rc);
3568 }
3569 
3570 
3571 /****************************************************************************/
3572 /* Allocate memory and initialize the TX data structures.                   */
3573 /*                                                                          */
3574 /* Returns:                                                                 */
3575 /*   0 for success, positive value for failure.                             */
3576 /****************************************************************************/
3577 static int
3578 bce_init_tx_chain(struct bce_softc *sc)
3579 {
3580 	struct tx_bd *txbd;
3581 	u32 val;
3582 	int i, rc = 0;
3583 
3584 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3585 
3586 	/* Set the initial TX producer/consumer indices. */
3587 	sc->tx_prod        = 0;
3588 	sc->tx_cons        = 0;
3589 	sc->tx_prod_bseq   = 0;
3590 	sc->used_tx_bd = 0;
3591 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3592 
3593 	/*
3594 	 * The NetXtreme II supports a linked-list structre called
3595 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3596 	 * consists of a series of 1 or more chain pages, each of which
3597 	 * consists of a fixed number of BD entries.
3598 	 * The last BD entry on each page is a pointer to the next page
3599 	 * in the chain, and the last pointer in the BD chain
3600 	 * points back to the beginning of the chain.
3601 	 */
3602 
3603 	/* Set the TX next pointer chain entries. */
3604 	for (i = 0; i < TX_PAGES; i++) {
3605 		int j;
3606 
3607 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3608 
3609 		/* Check if we've reached the last page. */
3610 		if (i == (TX_PAGES - 1))
3611 			j = 0;
3612 		else
3613 			j = i + 1;
3614 
3615 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3616 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3617 	}
3618 
3619 	/*
3620 	 * Initialize the context ID for an L2 TX chain.
3621 	 */
3622 	val = BCE_L2CTX_TYPE_TYPE_L2;
3623 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3624 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3625 
3626 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3627 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3628 
3629 	/* Point the hardware to the first page in the chain. */
3630 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3631 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3632 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3633 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3634 
3635 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3636 
3637 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3638 
3639 	return(rc);
3640 }
3641 
3642 
3643 /****************************************************************************/
3644 /* Free memory and clear the TX data structures.                            */
3645 /*                                                                          */
3646 /* Returns:                                                                 */
3647 /*   Nothing.                                                               */
3648 /****************************************************************************/
3649 static void
3650 bce_free_tx_chain(struct bce_softc *sc)
3651 {
3652 	int i;
3653 
3654 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3655 
3656 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3657 	for (i = 0; i < TOTAL_TX_BD; i++) {
3658 		if (sc->tx_mbuf_ptr[i] != NULL) {
3659 			if (sc->tx_mbuf_map != NULL)
3660 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3661 					BUS_DMASYNC_POSTWRITE);
3662 			m_freem(sc->tx_mbuf_ptr[i]);
3663 			sc->tx_mbuf_ptr[i] = NULL;
3664 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3665 		}
3666 	}
3667 
3668 	/* Clear each TX chain page. */
3669 	for (i = 0; i < TX_PAGES; i++)
3670 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3671 
3672 	/* Check if we lost any mbufs in the process. */
3673 	DBRUNIF((sc->tx_mbuf_alloc),
3674 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3675 			"from tx chain!\n",
3676 			__FILE__, __LINE__, sc->tx_mbuf_alloc));
3677 
3678 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3679 }
3680 
3681 
3682 /****************************************************************************/
3683 /* Allocate memory and initialize the RX data structures.                   */
3684 /*                                                                          */
3685 /* Returns:                                                                 */
3686 /*   0 for success, positive value for failure.                             */
3687 /****************************************************************************/
3688 static int
3689 bce_init_rx_chain(struct bce_softc *sc)
3690 {
3691 	struct rx_bd *rxbd;
3692 	int i, rc = 0;
3693 	u16 prod, chain_prod;
3694 	u32 prod_bseq, val;
3695 
3696 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3697 
3698 	/* Initialize the RX producer and consumer indices. */
3699 	sc->rx_prod        = 0;
3700 	sc->rx_cons        = 0;
3701 	sc->rx_prod_bseq   = 0;
3702 	sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3703 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3704 
3705 	/* Initialize the RX next pointer chain entries. */
3706 	for (i = 0; i < RX_PAGES; i++) {
3707 		int j;
3708 
3709 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3710 
3711 		/* Check if we've reached the last page. */
3712 		if (i == (RX_PAGES - 1))
3713 			j = 0;
3714 		else
3715 			j = i + 1;
3716 
3717 		/* Setup the chain page pointers. */
3718 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3719 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3720 	}
3721 
3722 	/* Initialize the context ID for an L2 RX chain. */
3723 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3724 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3725 	val |= 0x02 << 8;
3726 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3727 
3728 	/* Point the hardware to the first page in the chain. */
3729 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3730 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3731 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3732 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3733 
3734 	/* Allocate mbuf clusters for the rx_bd chain. */
3735 	prod = prod_bseq = 0;
3736 	while (prod < BCE_RX_SLACK_SPACE) {
3737 		chain_prod = RX_CHAIN_IDX(prod);
3738 		if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3739 			BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3740 				__FILE__, __LINE__, chain_prod);
3741 			rc = ENOBUFS;
3742 			break;
3743 		}
3744 		prod = NEXT_RX_BD(prod);
3745 	}
3746 
3747 	/* Save the RX chain producer index. */
3748 	sc->rx_prod      = prod;
3749 	sc->rx_prod_bseq = prod_bseq;
3750 
3751 	for (i = 0; i < RX_PAGES; i++) {
3752 		bus_dmamap_sync(
3753 			sc->rx_bd_chain_tag,
3754 	    	sc->rx_bd_chain_map[i],
3755 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3756 	}
3757 
3758 	/* Tell the chip about the waiting rx_bd's. */
3759 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3760 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3761 
3762 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3763 
3764 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3765 
3766 	return(rc);
3767 }
3768 
3769 
3770 /****************************************************************************/
3771 /* Free memory and clear the RX data structures.                            */
3772 /*                                                                          */
3773 /* Returns:                                                                 */
3774 /*   Nothing.                                                               */
3775 /****************************************************************************/
3776 static void
3777 bce_free_rx_chain(struct bce_softc *sc)
3778 {
3779 	int i;
3780 
3781 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3782 
3783 	/* Free any mbufs still in the RX mbuf chain. */
3784 	for (i = 0; i < TOTAL_RX_BD; i++) {
3785 		if (sc->rx_mbuf_ptr[i] != NULL) {
3786 			if (sc->rx_mbuf_map[i] != NULL)
3787 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3788 					BUS_DMASYNC_POSTREAD);
3789 			m_freem(sc->rx_mbuf_ptr[i]);
3790 			sc->rx_mbuf_ptr[i] = NULL;
3791 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3792 		}
3793 	}
3794 
3795 	/* Clear each RX chain page. */
3796 	for (i = 0; i < RX_PAGES; i++)
3797 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3798 
3799 	/* Check if we lost any mbufs in the process. */
3800 	DBRUNIF((sc->rx_mbuf_alloc),
3801 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3802 			__FILE__, __LINE__, sc->rx_mbuf_alloc));
3803 
3804 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3805 }
3806 
3807 
3808 /****************************************************************************/
3809 /* Set media options.                                                       */
3810 /*                                                                          */
3811 /* Returns:                                                                 */
3812 /*   0 for success, positive value for failure.                             */
3813 /****************************************************************************/
3814 static int
3815 bce_ifmedia_upd(struct ifnet *ifp)
3816 {
3817 	struct bce_softc *sc;
3818 	struct mii_data *mii;
3819 	struct ifmedia *ifm;
3820 	int rc = 0;
3821 
3822 	sc = ifp->if_softc;
3823 	ifm = &sc->bce_ifmedia;
3824 
3825 	/* DRC - ToDo: Add SerDes support. */
3826 
3827 	mii = device_get_softc(sc->bce_miibus);
3828 	sc->bce_link = 0;
3829 	if (mii->mii_instance) {
3830 		struct mii_softc *miisc;
3831 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3832 		    miisc = LIST_NEXT(miisc, mii_list))
3833 			mii_phy_reset(miisc);
3834 	}
3835 	mii_mediachg(mii);
3836 
3837 	return(rc);
3838 }
3839 
3840 
3841 /****************************************************************************/
3842 /* Reports current media status.                                            */
3843 /*                                                                          */
3844 /* Returns:                                                                 */
3845 /*   Nothing.                                                               */
3846 /****************************************************************************/
3847 static void
3848 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3849 {
3850 	struct bce_softc *sc;
3851 	struct mii_data *mii;
3852 
3853 	sc = ifp->if_softc;
3854 
3855 	BCE_LOCK(sc);
3856 
3857 	mii = device_get_softc(sc->bce_miibus);
3858 
3859 	/* DRC - ToDo: Add SerDes support. */
3860 
3861 	mii_pollstat(mii);
3862 	ifmr->ifm_active = mii->mii_media_active;
3863 	ifmr->ifm_status = mii->mii_media_status;
3864 
3865 	BCE_UNLOCK(sc);
3866 }
3867 
3868 
3869 /****************************************************************************/
3870 /* Handles PHY generated interrupt events.                                  */
3871 /*                                                                          */
3872 /* Returns:                                                                 */
3873 /*   Nothing.                                                               */
3874 /****************************************************************************/
3875 static void
3876 bce_phy_intr(struct bce_softc *sc)
3877 {
3878 	u32 new_link_state, old_link_state;
3879 
3880 	new_link_state = sc->status_block->status_attn_bits &
3881 		STATUS_ATTN_BITS_LINK_STATE;
3882 	old_link_state = sc->status_block->status_attn_bits_ack &
3883 		STATUS_ATTN_BITS_LINK_STATE;
3884 
3885 	/* Handle any changes if the link state has changed. */
3886 	if (new_link_state != old_link_state) {
3887 
3888 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3889 
3890 		sc->bce_link = 0;
3891 		callout_stop(&sc->bce_stat_ch);
3892 		bce_tick_locked(sc);
3893 
3894 		/* Update the status_attn_bits_ack field in the status block. */
3895 		if (new_link_state) {
3896 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3897 				STATUS_ATTN_BITS_LINK_STATE);
3898 			DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
3899 		}
3900 		else {
3901 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3902 				STATUS_ATTN_BITS_LINK_STATE);
3903 			DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
3904 		}
3905 
3906 	}
3907 
3908 	/* Acknowledge the link change interrupt. */
3909 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3910 }
3911 
3912 
3913 /****************************************************************************/
3914 /* Handles received frame interrupt events.                                 */
3915 /*                                                                          */
3916 /* Returns:                                                                 */
3917 /*   Nothing.                                                               */
3918 /****************************************************************************/
3919 static void
3920 bce_rx_intr(struct bce_softc *sc)
3921 {
3922 	struct status_block *sblk = sc->status_block;
3923 	struct ifnet *ifp = sc->bce_ifp;
3924 	u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3925 	u32 sw_prod_bseq;
3926 	struct l2_fhdr *l2fhdr;
3927 
3928 	DBRUNIF(1, sc->rx_interrupts++);
3929 
3930 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3931 	for (int i = 0; i < RX_PAGES; i++)
3932 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3933 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
3934 
3935 	/* Get the hardware's view of the RX consumer index. */
3936 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3937 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3938 		hw_cons++;
3939 
3940 	/* Get working copies of the driver's view of the RX indices. */
3941 	sw_cons = sc->rx_cons;
3942 	sw_prod = sc->rx_prod;
3943 	sw_prod_bseq = sc->rx_prod_bseq;
3944 
3945 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3946 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3947 		__FUNCTION__, sw_prod, sw_cons,
3948 		sw_prod_bseq);
3949 
3950 	/* Prevent speculative reads from getting ahead of the status block. */
3951 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3952 		BUS_SPACE_BARRIER_READ);
3953 
3954 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3955 		sc->rx_low_watermark = sc->free_rx_bd);
3956 
3957 	/*
3958 	 * Scan through the receive chain as long
3959 	 * as there is work to do.
3960 	 */
3961 	while (sw_cons != hw_cons) {
3962 		struct mbuf *m;
3963 		struct rx_bd *rxbd;
3964 		unsigned int len;
3965 		u32 status;
3966 
3967 		/* Convert the producer/consumer indices to an actual rx_bd index. */
3968 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3969 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3970 
3971 		/* Get the used rx_bd. */
3972 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3973 		sc->free_rx_bd++;
3974 
3975 		DBRUN(BCE_VERBOSE_RECV,
3976 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
3977 			bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3978 
3979 #ifdef DEVICE_POLLING
3980 		if (ifp->if_capenable & IFCAP_POLLING) {
3981 			if (sc->bce_rxcycles <= 0)
3982 				break;
3983 			sc->bce_rxcycles--;
3984 		}
3985 #endif
3986 
3987 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3988 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3989 
3990 			/* Validate that this is the last rx_bd. */
3991 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3992 				BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
3993 				__FILE__, __LINE__, sw_chain_cons);
3994 				bce_breakpoint(sc));
3995 
3996 			/* DRC - ToDo: If the received packet is small, say less */
3997 			/*             than 128 bytes, allocate a new mbuf here, */
3998 			/*             copy the data to that mbuf, and recycle   */
3999 			/*             the mapped jumbo frame.                   */
4000 
4001 			/* Unmap the mbuf from DMA space. */
4002 			bus_dmamap_sync(sc->rx_mbuf_tag,
4003 			    sc->rx_mbuf_map[sw_chain_cons],
4004 		    	BUS_DMASYNC_POSTREAD);
4005 			bus_dmamap_unload(sc->rx_mbuf_tag,
4006 			    sc->rx_mbuf_map[sw_chain_cons]);
4007 
4008 			/* Remove the mbuf from the driver's chain. */
4009 			m = sc->rx_mbuf_ptr[sw_chain_cons];
4010 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4011 
4012 			/*
4013 			 * Frames received on the NetXteme II are prepended
4014 			 * with the l2_fhdr structure which provides status
4015 			 * information about the received frame (including
4016 			 * VLAN tags and checksum info) and are also
4017 			 * automatically adjusted to align the IP header
4018 			 * (i.e. two null bytes are inserted before the
4019 			 * Ethernet header).
4020 			 */
4021 			l2fhdr = mtod(m, struct l2_fhdr *);
4022 
4023 			len    = l2fhdr->l2_fhdr_pkt_len;
4024 			status = l2fhdr->l2_fhdr_status;
4025 
4026 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4027 				BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4028 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
4029 
4030 			/* Watch for unusual sized frames. */
4031 			DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4032 				BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4033 					"Min(%d), Actual(%d), Max(%d)\n",
4034 					__FILE__, __LINE__, (int) BCE_MIN_MTU,
4035 					len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4036 				bce_dump_mbuf(sc, m);
4037 		 		bce_breakpoint(sc));
4038 
4039 			len -= ETHER_CRC_LEN;
4040 
4041 			/* Check the received frame for errors. */
4042 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4043 				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4044 				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4045 
4046 				ifp->if_ierrors++;
4047 				DBRUNIF(1, sc->l2fhdr_status_errors++);
4048 
4049 				/* Reuse the mbuf for a new frame. */
4050 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4051 
4052 					DBRUNIF(1, bce_breakpoint(sc));
4053 					panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4054 
4055 				}
4056 				goto bce_rx_int_next_rx;
4057 			}
4058 
4059 			/*
4060 			 * Get a new mbuf for the rx_bd.   If no new
4061 			 * mbufs are available then reuse the current mbuf,
4062 			 * log an ierror on the interface, and generate
4063 			 * an error in the system log.
4064 			 */
4065 			if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4066 
4067 				DBRUN(BCE_WARN,
4068 					BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4069 					"new mbuf, incoming frame dropped!\n",
4070 					__FILE__, __LINE__));
4071 
4072 				ifp->if_ierrors++;
4073 
4074 				/* Try and reuse the exisitng mbuf. */
4075 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4076 
4077 					DBRUNIF(1, bce_breakpoint(sc));
4078 					panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4079 
4080 				}
4081 				goto bce_rx_int_next_rx;
4082 			}
4083 
4084 			/* Skip over the l2_fhdr when passing the data up the stack. */
4085 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4086 
4087 			/* Adjust the packet length to match the received data. */
4088 			m->m_pkthdr.len = m->m_len = len;
4089 
4090 			/* Send the packet to the appropriate interface. */
4091 			m->m_pkthdr.rcvif = ifp;
4092 
4093 			DBRUN(BCE_VERBOSE_RECV,
4094 				struct ether_header *eh;
4095 				eh = mtod(m, struct ether_header *);
4096 				BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4097 					__FUNCTION__, eh->ether_dhost, ":",
4098 					eh->ether_shost, ":", htons(eh->ether_type)));
4099 
4100 			/* Validate the checksum if offload enabled. */
4101 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4102 
4103 				/* Check for an IP datagram. */
4104 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4105 					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4106 
4107 					/* Check if the IP checksum is valid. */
4108 					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4109 						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4110 					else
4111 						DBPRINT(sc, BCE_WARN_SEND,
4112 							"%s(): Invalid IP checksum = 0x%04X!\n",
4113 							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4114 				}
4115 
4116 				/* Check for a valid TCP/UDP frame. */
4117 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4118 					L2_FHDR_STATUS_UDP_DATAGRAM)) {
4119 
4120 					/* Check for a good TCP/UDP checksum. */
4121 					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4122 						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4123 						m->m_pkthdr.csum_data =
4124 						    l2fhdr->l2_fhdr_tcp_udp_xsum;
4125 						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4126 							| CSUM_PSEUDO_HDR);
4127 					} else
4128 						DBPRINT(sc, BCE_WARN_SEND,
4129 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4130 							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4131 				}
4132 			}
4133 
4134 
4135 			/*
4136 			 * If we received a packet with a vlan tag,
4137 			 * attach that information to the packet.
4138 			 */
4139 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4140 				DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4141 					__FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4142 #if __FreeBSD_version < 700000
4143 				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4144 #else
4145 				m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4146 				m->m_flags |= M_VLANTAG;
4147 #endif
4148 			}
4149 
4150 			/* Pass the mbuf off to the upper layers. */
4151 			ifp->if_ipackets++;
4152 			DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4153 				__FUNCTION__);
4154 			BCE_UNLOCK(sc);
4155 			(*ifp->if_input)(ifp, m);
4156 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4157 			BCE_LOCK(sc);
4158 
4159 bce_rx_int_next_rx:
4160 			sw_prod = NEXT_RX_BD(sw_prod);
4161 		}
4162 
4163 		sw_cons = NEXT_RX_BD(sw_cons);
4164 
4165 		/* Refresh hw_cons to see if there's new work */
4166 		if (sw_cons == hw_cons) {
4167 			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4168 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4169 				hw_cons++;
4170 		}
4171 
4172 		/* Prevent speculative reads from getting ahead of the status block. */
4173 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4174 			BUS_SPACE_BARRIER_READ);
4175 	}
4176 
4177 	for (int i = 0; i < RX_PAGES; i++)
4178 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4179 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4180 
4181 	sc->rx_cons = sw_cons;
4182 	sc->rx_prod = sw_prod;
4183 	sc->rx_prod_bseq = sw_prod_bseq;
4184 
4185 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4186 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4187 
4188 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4189 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4190 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4191 }
4192 
4193 
4194 /****************************************************************************/
4195 /* Handles transmit completion interrupt events.                            */
4196 /*                                                                          */
4197 /* Returns:                                                                 */
4198 /*   Nothing.                                                               */
4199 /****************************************************************************/
4200 static void
4201 bce_tx_intr(struct bce_softc *sc)
4202 {
4203 	struct status_block *sblk = sc->status_block;
4204 	struct ifnet *ifp = sc->bce_ifp;
4205 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4206 
4207 	BCE_LOCK_ASSERT(sc);
4208 
4209 	DBRUNIF(1, sc->tx_interrupts++);
4210 
4211 	/* Get the hardware's view of the TX consumer index. */
4212 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4213 
4214 	/* Skip to the next entry if this is a chain page pointer. */
4215 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4216 		hw_tx_cons++;
4217 
4218 	sw_tx_cons = sc->tx_cons;
4219 
4220 	/* Prevent speculative reads from getting ahead of the status block. */
4221 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4222 		BUS_SPACE_BARRIER_READ);
4223 
4224 	/* Cycle through any completed TX chain page entries. */
4225 	while (sw_tx_cons != hw_tx_cons) {
4226 #ifdef BCE_DEBUG
4227 		struct tx_bd *txbd = NULL;
4228 #endif
4229 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4230 
4231 		DBPRINT(sc, BCE_INFO_SEND,
4232 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4233 			"sw_tx_chain_cons = 0x%04X\n",
4234 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4235 
4236 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4237 			BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4238 				" 0x%04X > 0x%04X\n",
4239 				__FILE__, __LINE__, sw_tx_chain_cons,
4240 				(int) MAX_TX_BD);
4241 			bce_breakpoint(sc));
4242 
4243 		DBRUNIF(1,
4244 			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4245 				[TX_IDX(sw_tx_chain_cons)]);
4246 
4247 		DBRUNIF((txbd == NULL),
4248 			BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4249 				__FILE__, __LINE__, sw_tx_chain_cons);
4250 			bce_breakpoint(sc));
4251 
4252 		DBRUN(BCE_INFO_SEND,
4253 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4254 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4255 
4256 		/*
4257 		 * Free the associated mbuf. Remember
4258 		 * that only the last tx_bd of a packet
4259 		 * has an mbuf pointer and DMA map.
4260 		 */
4261 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4262 
4263 			/* Validate that this is the last tx_bd. */
4264 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4265 				BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4266 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4267 				bce_breakpoint(sc));
4268 
4269 			DBRUN(BCE_INFO_SEND,
4270 				BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4271 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4272 
4273 			/* Unmap the mbuf. */
4274 			bus_dmamap_unload(sc->tx_mbuf_tag,
4275 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4276 
4277 			/* Free the mbuf. */
4278 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4279 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4280 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4281 
4282 			ifp->if_opackets++;
4283 		}
4284 
4285 		sc->used_tx_bd--;
4286 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4287 
4288 		/* Refresh hw_cons to see if there's new work. */
4289 		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4290 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4291 			hw_tx_cons++;
4292 
4293 		/* Prevent speculative reads from getting ahead of the status block. */
4294 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4295 			BUS_SPACE_BARRIER_READ);
4296 	}
4297 
4298 	/* Clear the TX timeout timer. */
4299 	ifp->if_timer = 0;
4300 
4301 	/* Clear the tx hardware queue full flag. */
4302 	if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4303 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4304 			BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4305 				__FUNCTION__, sc->used_tx_bd));
4306 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4307 	}
4308 
4309 	sc->tx_cons = sw_tx_cons;
4310 }
4311 
4312 
4313 /****************************************************************************/
4314 /* Disables interrupt generation.                                           */
4315 /*                                                                          */
4316 /* Returns:                                                                 */
4317 /*   Nothing.                                                               */
4318 /****************************************************************************/
4319 static void
4320 bce_disable_intr(struct bce_softc *sc)
4321 {
4322 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4323 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4324 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4325 }
4326 
4327 
4328 /****************************************************************************/
4329 /* Enables interrupt generation.                                            */
4330 /*                                                                          */
4331 /* Returns:                                                                 */
4332 /*   Nothing.                                                               */
4333 /****************************************************************************/
4334 static void
4335 bce_enable_intr(struct bce_softc *sc)
4336 {
4337 	u32 val;
4338 
4339 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4340 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4341 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4342 
4343 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4344 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4345 
4346 	val = REG_RD(sc, BCE_HC_COMMAND);
4347 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4348 }
4349 
4350 
4351 /****************************************************************************/
4352 /* Handles controller initialization.                                       */
4353 /*                                                                          */
4354 /* Must be called from a locked routine.                                    */
4355 /*                                                                          */
4356 /* Returns:                                                                 */
4357 /*   Nothing.                                                               */
4358 /****************************************************************************/
4359 static void
4360 bce_init_locked(struct bce_softc *sc)
4361 {
4362 	struct ifnet *ifp;
4363 	u32 ether_mtu;
4364 
4365 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4366 
4367 	BCE_LOCK_ASSERT(sc);
4368 
4369 	ifp = sc->bce_ifp;
4370 
4371 	/* Check if the driver is still running and bail out if it is. */
4372 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4373 		goto bce_init_locked_exit;
4374 
4375 	bce_stop(sc);
4376 
4377 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4378 		BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4379 			__FILE__, __LINE__);
4380 		goto bce_init_locked_exit;
4381 	}
4382 
4383 	if (bce_chipinit(sc)) {
4384 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4385 			__FILE__, __LINE__);
4386 		goto bce_init_locked_exit;
4387 	}
4388 
4389 	if (bce_blockinit(sc)) {
4390 		BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4391 			__FILE__, __LINE__);
4392 		goto bce_init_locked_exit;
4393 	}
4394 
4395 	/* Load our MAC address. */
4396 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4397 	bce_set_mac_addr(sc);
4398 
4399 	/* Calculate and program the Ethernet MTU size. */
4400 	ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4401 		ETHER_CRC_LEN;
4402 
4403 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4404 
4405 	/*
4406 	 * Program the mtu, enabling jumbo frame
4407 	 * support if necessary.  Also set the mbuf
4408 	 * allocation count for RX frames.
4409 	 */
4410 	if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4411 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4412 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4413 		sc->mbuf_alloc_size = MJUM9BYTES;
4414 	} else {
4415 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4416 		sc->mbuf_alloc_size = MCLBYTES;
4417 	}
4418 
4419 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4420 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4421 
4422 	DBPRINT(sc, BCE_INFO,
4423 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4424 		"max_frame_size = %d\n",
4425 		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4426 
4427 	/* Program appropriate promiscuous/multicast filtering. */
4428 	bce_set_rx_mode(sc);
4429 
4430 	/* Init RX buffer descriptor chain. */
4431 	bce_init_rx_chain(sc);
4432 
4433 	/* Init TX buffer descriptor chain. */
4434 	bce_init_tx_chain(sc);
4435 
4436 #ifdef DEVICE_POLLING
4437 	/* Disable interrupts if we are polling. */
4438 	if (ifp->if_capenable & IFCAP_POLLING) {
4439 		bce_disable_intr(sc);
4440 
4441 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4442 			(1 << 16) | sc->bce_rx_quick_cons_trip);
4443 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4444 			(1 << 16) | sc->bce_tx_quick_cons_trip);
4445 	} else
4446 #endif
4447 	/* Enable host interrupts. */
4448 	bce_enable_intr(sc);
4449 
4450 	bce_ifmedia_upd(ifp);
4451 
4452 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4453 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4454 
4455 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4456 
4457 bce_init_locked_exit:
4458 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4459 
4460 	return;
4461 }
4462 
4463 static void
4464 bce_mgmt_init_locked(struct bce_softc *sc)
4465 {
4466 	u32 val;
4467 	struct ifnet *ifp;
4468 
4469 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4470 
4471 	BCE_LOCK_ASSERT(sc);
4472 
4473 	ifp = sc->bce_ifp;
4474 
4475 	/* Check if the driver is still running and bail out if it is. */
4476 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4477 		goto bce_mgmt_init_locked_exit;
4478 
4479 	/* Initialize the on-boards CPUs */
4480 	bce_init_cpus(sc);
4481 
4482 	val = (BCM_PAGE_BITS - 8) << 24;
4483 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4484 
4485 	/* Enable all critical blocks in the MAC. */
4486 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4487 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4488 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4489 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4490 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4491 	DELAY(20);
4492 
4493 	bce_ifmedia_upd(ifp);
4494 bce_mgmt_init_locked_exit:
4495 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4496 
4497 	return;
4498 }
4499 
4500 
4501 /****************************************************************************/
4502 /* Handles controller initialization when called from an unlocked routine.  */
4503 /*                                                                          */
4504 /* Returns:                                                                 */
4505 /*   Nothing.                                                               */
4506 /****************************************************************************/
4507 static void
4508 bce_init(void *xsc)
4509 {
4510 	struct bce_softc *sc = xsc;
4511 
4512 	BCE_LOCK(sc);
4513 	bce_init_locked(sc);
4514 	BCE_UNLOCK(sc);
4515 }
4516 
4517 
4518 /****************************************************************************/
4519 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4520 /* memory visible to the controller.                                        */
4521 /*                                                                          */
4522 /* Returns:                                                                 */
4523 /*   0 for success, positive value for failure.                             */
4524 /****************************************************************************/
4525 static int
4526 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
4527 {
4528 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4529 	bus_dmamap_t map;
4530 	struct tx_bd *txbd = NULL;
4531 	struct mbuf *m0;
4532 	u16 vlan_tag = 0, flags = 0;
4533 	u16 chain_prod, prod;
4534 	u32 prod_bseq;
4535 
4536 #ifdef BCE_DEBUG
4537 	u16 debug_prod;
4538 #endif
4539 	int i, error, nsegs, rc = 0;
4540 
4541 	/* Transfer any checksum offload flags to the bd. */
4542 	m0 = *m_head;
4543 	if (m0->m_pkthdr.csum_flags) {
4544 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4545 			flags |= TX_BD_FLAGS_IP_CKSUM;
4546 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4547 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4548 	}
4549 
4550 	/* Transfer any VLAN tags to the bd. */
4551 	if (m0->m_flags & M_VLANTAG) {
4552 		flags |= TX_BD_FLAGS_VLAN_TAG;
4553 		vlan_tag = m0->m_pkthdr.ether_vtag;
4554 	}
4555 
4556 	/* Map the mbuf into DMAable memory. */
4557 	prod = sc->tx_prod;
4558 	chain_prod = TX_CHAIN_IDX(prod);
4559 	map = sc->tx_mbuf_map[chain_prod];
4560 
4561 	/* Map the mbuf into our DMA address space. */
4562 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4563 	    segs, &nsegs, BUS_DMA_NOWAIT);
4564 
4565 	if (error == EFBIG) {
4566 
4567 		/* Try to defrag the mbuf if there are too many segments. */
4568 	        DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4569                     __FUNCTION__, nsegs);
4570 
4571                 m0 = m_defrag(*m_head, M_DONTWAIT);
4572                 if (m0 == NULL) {
4573 			m_freem(*m_head);
4574 			*m_head = NULL;
4575 			return (ENOBUFS);
4576 		}
4577 
4578 		*m_head = m0;
4579 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4580 		    segs, &nsegs, BUS_DMA_NOWAIT);
4581 
4582 		/* Still getting an error after a defrag. */
4583 		if (error == ENOMEM) {
4584 			return (error);
4585 		} else if (error != 0) {
4586 			BCE_PRINTF(sc,
4587 			    "%s(%d): Error mapping mbuf into TX chain!\n",
4588 			    __FILE__, __LINE__);
4589 			m_freem(m0);
4590 			*m_head = NULL;
4591 			return (ENOBUFS);
4592 		}
4593 	} else if (error == ENOMEM) {
4594 		return (error);
4595 	} else if (error != 0) {
4596 		m_freem(m0);
4597 		*m_head = NULL;
4598 		return (error);
4599 	}
4600 
4601 	/*
4602 	 * The chip seems to require that at least 16 descriptors be kept
4603 	 * empty at all times.  Make sure we honor that.
4604 	 * XXX Would it be faster to assume worst case scenario for nsegs
4605 	 * and do this calculation higher up?
4606 	 */
4607 	if (nsegs > (USABLE_TX_BD - sc->used_tx_bd - BCE_TX_SLACK_SPACE)) {
4608 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
4609 		return (ENOBUFS);
4610 	}
4611 
4612 	/* prod points to an empty tx_bd at this point. */
4613 	prod_bseq  = sc->tx_prod_bseq;
4614 
4615 #ifdef BCE_DEBUG
4616 	debug_prod = chain_prod;
4617 #endif
4618 
4619 	DBPRINT(sc, BCE_INFO_SEND,
4620 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4621 		"prod_bseq = 0x%08X\n",
4622 		__FUNCTION__, prod, chain_prod, prod_bseq);
4623 
4624 	/*
4625 	 * Cycle through each mbuf segment that makes up
4626 	 * the outgoing frame, gathering the mapping info
4627 	 * for that segment and creating a tx_bd to for
4628 	 * the mbuf.
4629 	 */
4630 	for (i = 0; i < nsegs ; i++) {
4631 
4632 		chain_prod = TX_CHAIN_IDX(prod);
4633 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4634 
4635 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4636 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4637 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4638 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4639 		txbd->tx_bd_flags = htole16(flags);
4640 		prod_bseq += segs[i].ds_len;
4641 		if (i == 0)
4642 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4643 		prod = NEXT_TX_BD(prod);
4644 	}
4645 
4646 	/* Set the END flag on the last TX buffer descriptor. */
4647 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4648 
4649 	DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
4650 
4651 	DBPRINT(sc, BCE_INFO_SEND,
4652 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4653 		"prod_bseq = 0x%08X\n",
4654 		__FUNCTION__, prod, chain_prod, prod_bseq);
4655 
4656 	/*
4657 	 * Ensure that the mbuf pointer for this transmission
4658 	 * is placed at the array index of the last
4659 	 * descriptor in this chain.  This is done
4660 	 * because a single map is used for all
4661 	 * segments of the mbuf and we don't want to
4662 	 * unload the map before all of the segments
4663 	 * have been freed.
4664 	 */
4665 	sc->tx_mbuf_ptr[chain_prod] = m0;
4666 	sc->used_tx_bd += nsegs;
4667 
4668 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4669 		sc->tx_hi_watermark = sc->used_tx_bd);
4670 
4671 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4672 
4673 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
4674 
4675 	/* prod points to the next free tx_bd at this point. */
4676 	sc->tx_prod = prod;
4677 	sc->tx_prod_bseq = prod_bseq;
4678 
4679 	return(rc);
4680 }
4681 
4682 
4683 /****************************************************************************/
4684 /* Main transmit routine when called from another routine with a lock.      */
4685 /*                                                                          */
4686 /* Returns:                                                                 */
4687 /*   Nothing.                                                               */
4688 /****************************************************************************/
4689 static void
4690 bce_start_locked(struct ifnet *ifp)
4691 {
4692 	struct bce_softc *sc = ifp->if_softc;
4693 	struct mbuf *m_head = NULL;
4694 	int count = 0;
4695 	u16 tx_prod, tx_chain_prod;
4696 
4697 	/* If there's no link or the transmit queue is empty then just exit. */
4698 	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4699 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4700 			__FUNCTION__);
4701 		goto bce_start_locked_exit;
4702 	}
4703 
4704 	/* prod points to the next free tx_bd. */
4705 	tx_prod = sc->tx_prod;
4706 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4707 
4708 	DBPRINT(sc, BCE_INFO_SEND,
4709 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4710 		"tx_prod_bseq = 0x%08X\n",
4711 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4712 
4713 	/*
4714 	 * Keep adding entries while there is space in the ring.  We keep
4715 	 * BCE_TX_SLACK_SPACE entries unused at all times.
4716 	 */
4717 	while (sc->used_tx_bd < USABLE_TX_BD - BCE_TX_SLACK_SPACE) {
4718 
4719 		/* Check for any frames to send. */
4720 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4721 		if (m_head == NULL)
4722 			break;
4723 
4724 		/*
4725 		 * Pack the data into the transmit ring. If we
4726 		 * don't have room, place the mbuf back at the
4727 		 * head of the queue and set the OACTIVE flag
4728 		 * to wait for the NIC to drain the chain.
4729 		 */
4730 		if (bce_tx_encap(sc, &m_head)) {
4731 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4732 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4733 			DBPRINT(sc, BCE_INFO_SEND,
4734 				"TX chain is closed for business! Total tx_bd used = %d\n",
4735 				sc->used_tx_bd);
4736 			break;
4737 		}
4738 
4739 		count++;
4740 
4741 		/* Send a copy of the frame to any BPF listeners. */
4742 		BPF_MTAP(ifp, m_head);
4743 	}
4744 
4745 	if (count == 0) {
4746 		/* no packets were dequeued */
4747 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4748 			__FUNCTION__);
4749 		goto bce_start_locked_exit;
4750 	}
4751 
4752 	/* Update the driver's counters. */
4753 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4754 
4755 	DBPRINT(sc, BCE_INFO_SEND,
4756 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4757 		"tx_prod_bseq = 0x%08X\n",
4758 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4759 
4760 	/* Start the transmit. */
4761 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4762 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4763 
4764 	/* Set the tx timeout. */
4765 	ifp->if_timer = BCE_TX_TIMEOUT;
4766 
4767 bce_start_locked_exit:
4768 	return;
4769 }
4770 
4771 
4772 /****************************************************************************/
4773 /* Main transmit routine when called from another routine without a lock.   */
4774 /*                                                                          */
4775 /* Returns:                                                                 */
4776 /*   Nothing.                                                               */
4777 /****************************************************************************/
4778 static void
4779 bce_start(struct ifnet *ifp)
4780 {
4781 	struct bce_softc *sc = ifp->if_softc;
4782 
4783 	BCE_LOCK(sc);
4784 	bce_start_locked(ifp);
4785 	BCE_UNLOCK(sc);
4786 }
4787 
4788 
4789 /****************************************************************************/
4790 /* Handles any IOCTL calls from the operating system.                       */
4791 /*                                                                          */
4792 /* Returns:                                                                 */
4793 /*   0 for success, positive value for failure.                             */
4794 /****************************************************************************/
4795 static int
4796 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4797 {
4798 	struct bce_softc *sc = ifp->if_softc;
4799 	struct ifreq *ifr = (struct ifreq *) data;
4800 	struct mii_data *mii;
4801 	int mask, error = 0;
4802 
4803 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4804 
4805 	switch(command) {
4806 
4807 		/* Set the MTU. */
4808 		case SIOCSIFMTU:
4809 			/* Check that the MTU setting is supported. */
4810 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4811 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4812 				error = EINVAL;
4813 				break;
4814 			}
4815 
4816 			DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4817 
4818 			BCE_LOCK(sc);
4819 			ifp->if_mtu = ifr->ifr_mtu;
4820 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4821 			bce_init_locked(sc);
4822 			BCE_UNLOCK(sc);
4823 			break;
4824 
4825 		/* Set interface. */
4826 		case SIOCSIFFLAGS:
4827 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4828 
4829 			BCE_LOCK(sc);
4830 
4831 			/* Check if the interface is up. */
4832 			if (ifp->if_flags & IFF_UP) {
4833 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4834 					/* Change the promiscuous/multicast flags as necessary. */
4835 					bce_set_rx_mode(sc);
4836 				} else {
4837 					/* Start the HW */
4838 					bce_init_locked(sc);
4839 				}
4840 			} else {
4841 				/* The interface is down.  Check if the driver is running. */
4842 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4843 					bce_stop(sc);
4844 				}
4845 			}
4846 
4847 			BCE_UNLOCK(sc);
4848 			error = 0;
4849 
4850 			break;
4851 
4852 		/* Add/Delete multicast address */
4853 		case SIOCADDMULTI:
4854 		case SIOCDELMULTI:
4855 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4856 
4857 			BCE_LOCK(sc);
4858 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4859 				bce_set_rx_mode(sc);
4860 				error = 0;
4861 			}
4862 			BCE_UNLOCK(sc);
4863 
4864 			break;
4865 
4866 		/* Set/Get Interface media */
4867 		case SIOCSIFMEDIA:
4868 		case SIOCGIFMEDIA:
4869 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4870 
4871 			DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4872 				sc->bce_phy_flags);
4873 
4874 			if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4875 				DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4876 
4877 				error = ifmedia_ioctl(ifp, ifr,
4878 				    &sc->bce_ifmedia, command);
4879 			} else {
4880 				DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4881 				mii = device_get_softc(sc->bce_miibus);
4882 				error = ifmedia_ioctl(ifp, ifr,
4883 				    &mii->mii_media, command);
4884 			}
4885 			break;
4886 
4887 		/* Set interface capability */
4888 		case SIOCSIFCAP:
4889 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4890 			DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4891 
4892 #ifdef DEVICE_POLLING
4893 			if (mask & IFCAP_POLLING) {
4894 				if (ifr->ifr_reqcap & IFCAP_POLLING) {
4895 
4896 					/* Setup the poll routine to call. */
4897 					error = ether_poll_register(bce_poll, ifp);
4898 					if (error) {
4899 						BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4900 							__FILE__, __LINE__);
4901 						goto bce_ioctl_exit;
4902 					}
4903 
4904 					/* Clear the interrupt. */
4905 					BCE_LOCK(sc);
4906 					bce_disable_intr(sc);
4907 
4908 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4909 						(1 << 16) | sc->bce_rx_quick_cons_trip);
4910 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4911 						(1 << 16) | sc->bce_tx_quick_cons_trip);
4912 
4913 					ifp->if_capenable |= IFCAP_POLLING;
4914 					BCE_UNLOCK(sc);
4915 				} else {
4916 					/* Clear the poll routine. */
4917 					error = ether_poll_deregister(ifp);
4918 
4919 					/* Enable interrupt even in error case */
4920 					BCE_LOCK(sc);
4921 					bce_enable_intr(sc);
4922 
4923 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4924 						(sc->bce_tx_quick_cons_trip_int << 16) |
4925 						sc->bce_tx_quick_cons_trip);
4926 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4927 						(sc->bce_rx_quick_cons_trip_int << 16) |
4928 						sc->bce_rx_quick_cons_trip);
4929 
4930 					ifp->if_capenable &= ~IFCAP_POLLING;
4931 					BCE_UNLOCK(sc);
4932 				}
4933 			}
4934 #endif /*DEVICE_POLLING */
4935 
4936 			/* Toggle the TX checksum capabilites enable flag. */
4937 			if (mask & IFCAP_TXCSUM) {
4938 				ifp->if_capenable ^= IFCAP_TXCSUM;
4939 				if (IFCAP_TXCSUM & ifp->if_capenable)
4940 					ifp->if_hwassist = BCE_IF_HWASSIST;
4941 				else
4942 					ifp->if_hwassist = 0;
4943 			}
4944 
4945 			/* Toggle the RX checksum capabilities enable flag. */
4946 			if (mask & IFCAP_RXCSUM) {
4947 				ifp->if_capenable ^= IFCAP_RXCSUM;
4948 				if (IFCAP_RXCSUM & ifp->if_capenable)
4949 					ifp->if_hwassist = BCE_IF_HWASSIST;
4950 				else
4951 					ifp->if_hwassist = 0;
4952 			}
4953 
4954 			/* Toggle VLAN_MTU capabilities enable flag. */
4955 			if (mask & IFCAP_VLAN_MTU) {
4956 				BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4957 					__FILE__, __LINE__);
4958 			}
4959 
4960 			/* Toggle VLANHWTAG capabilities enabled flag. */
4961 			if (mask & IFCAP_VLAN_HWTAGGING) {
4962 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4963 					BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4964 						"management firmware (ASF/IPMI/UMP) is running!\n",
4965 						__FILE__, __LINE__);
4966 				else
4967 					BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4968 						__FILE__, __LINE__);
4969 			}
4970 
4971 			break;
4972 		default:
4973 			DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4974 				(u32) command);
4975 
4976 			/* We don't know how to handle the IOCTL, pass it on. */
4977 			error = ether_ioctl(ifp, command, data);
4978 			break;
4979 	}
4980 
4981 #ifdef DEVICE_POLLING
4982 bce_ioctl_exit:
4983 #endif
4984 
4985 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4986 
4987 	return(error);
4988 }
4989 
4990 
4991 /****************************************************************************/
4992 /* Transmit timeout handler.                                                */
4993 /*                                                                          */
4994 /* Returns:                                                                 */
4995 /*   Nothing.                                                               */
4996 /****************************************************************************/
4997 static void
4998 bce_watchdog(struct ifnet *ifp)
4999 {
5000 	struct bce_softc *sc = ifp->if_softc;
5001 
5002 	DBRUN(BCE_WARN_SEND,
5003 		bce_dump_driver_state(sc);
5004 		bce_dump_status_block(sc));
5005 
5006 	BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
5007 		__FILE__, __LINE__);
5008 
5009 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5010 
5011 	BCE_LOCK(sc);
5012 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5013 
5014 	bce_init_locked(sc);
5015 	ifp->if_oerrors++;
5016 	BCE_UNLOCK(sc);
5017 
5018 }
5019 
5020 
5021 #ifdef DEVICE_POLLING
5022 static void
5023 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5024 {
5025 	struct bce_softc *sc = ifp->if_softc;
5026 
5027 	BCE_LOCK_ASSERT(sc);
5028 
5029 	sc->bce_rxcycles = count;
5030 
5031 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5032 	    BUS_DMASYNC_POSTWRITE);
5033 
5034 	/* Check for any completed RX frames. */
5035 	if (sc->status_block->status_rx_quick_consumer_index0 !=
5036 		sc->hw_rx_cons)
5037 		bce_rx_intr(sc);
5038 
5039 	/* Check for any completed TX frames. */
5040 	if (sc->status_block->status_tx_quick_consumer_index0 !=
5041 		sc->hw_tx_cons)
5042 		bce_tx_intr(sc);
5043 
5044 	/* Check for new frames to transmit. */
5045 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5046 		bce_start_locked(ifp);
5047 
5048 }
5049 
5050 
5051 static void
5052 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5053 {
5054 	struct bce_softc *sc = ifp->if_softc;
5055 
5056 	BCE_LOCK(sc);
5057 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5058 		bce_poll_locked(ifp, cmd, count);
5059 	BCE_UNLOCK(sc);
5060 }
5061 #endif /* DEVICE_POLLING */
5062 
5063 
5064 #if 0
5065 static inline int
5066 bce_has_work(struct bce_softc *sc)
5067 {
5068 	struct status_block *stat = sc->status_block;
5069 
5070 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5071 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5072 		return 1;
5073 
5074 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5075 	    bp->link_up)
5076 		return 1;
5077 
5078 	return 0;
5079 }
5080 #endif
5081 
5082 
5083 /*
5084  * Interrupt handler.
5085  */
5086 /****************************************************************************/
5087 /* Main interrupt entry point.  Verifies that the controller generated the  */
5088 /* interrupt and then calls a separate routine for handle the various       */
5089 /* interrupt causes (PHY, TX, RX).                                          */
5090 /*                                                                          */
5091 /* Returns:                                                                 */
5092 /*   0 for success, positive value for failure.                             */
5093 /****************************************************************************/
5094 static void
5095 bce_intr(void *xsc)
5096 {
5097 	struct bce_softc *sc;
5098 	struct ifnet *ifp;
5099 	u32 status_attn_bits;
5100 
5101 	sc = xsc;
5102 	ifp = sc->bce_ifp;
5103 
5104 	BCE_LOCK(sc);
5105 
5106 	DBRUNIF(1, sc->interrupts_generated++);
5107 
5108 #ifdef DEVICE_POLLING
5109 	if (ifp->if_capenable & IFCAP_POLLING) {
5110 		DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5111 		goto bce_intr_exit;
5112 	}
5113 #endif
5114 
5115 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5116 	    BUS_DMASYNC_POSTWRITE);
5117 
5118 	/*
5119 	 * If the hardware status block index
5120 	 * matches the last value read by the
5121 	 * driver and we haven't asserted our
5122 	 * interrupt then there's nothing to do.
5123 	 */
5124 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5125 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5126 		goto bce_intr_exit;
5127 
5128 	/* Ack the interrupt and stop others from occuring. */
5129 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5130 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5131 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5132 
5133 	/* Keep processing data as long as there is work to do. */
5134 	for (;;) {
5135 
5136 		status_attn_bits = sc->status_block->status_attn_bits;
5137 
5138 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5139 			BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5140 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5141 
5142 		/* Was it a link change interrupt? */
5143 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5144 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5145 			bce_phy_intr(sc);
5146 
5147 		/* If any other attention is asserted then the chip is toast. */
5148 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5149 			(sc->status_block->status_attn_bits_ack &
5150 			~STATUS_ATTN_BITS_LINK_STATE))) {
5151 
5152 			DBRUN(1, sc->unexpected_attentions++);
5153 
5154 			BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5155 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5156 
5157 			DBRUN(BCE_FATAL,
5158 				if (bce_debug_unexpected_attention == 0)
5159 					bce_breakpoint(sc));
5160 
5161 			bce_init_locked(sc);
5162 			goto bce_intr_exit;
5163 		}
5164 
5165 		/* Check for any completed RX frames. */
5166 		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5167 			bce_rx_intr(sc);
5168 
5169 		/* Check for any completed TX frames. */
5170 		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5171 			bce_tx_intr(sc);
5172 
5173 		/* Save the status block index value for use during the next interrupt. */
5174 		sc->last_status_idx = sc->status_block->status_idx;
5175 
5176 		/* Prevent speculative reads from getting ahead of the status block. */
5177 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5178 			BUS_SPACE_BARRIER_READ);
5179 
5180 		/* If there's no work left then exit the interrupt service routine. */
5181 		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5182 	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5183 			break;
5184 
5185 	}
5186 
5187 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5188 	    BUS_DMASYNC_PREWRITE);
5189 
5190 	/* Re-enable interrupts. */
5191 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5192 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5193 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5194 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5195 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5196 
5197 	/* Handle any frames that arrived while handling the interrupt. */
5198 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5199 		bce_start_locked(ifp);
5200 
5201 bce_intr_exit:
5202 	BCE_UNLOCK(sc);
5203 }
5204 
5205 
5206 /****************************************************************************/
5207 /* Programs the various packet receive modes (broadcast and multicast).     */
5208 /*                                                                          */
5209 /* Returns:                                                                 */
5210 /*   Nothing.                                                               */
5211 /****************************************************************************/
5212 static void
5213 bce_set_rx_mode(struct bce_softc *sc)
5214 {
5215 	struct ifnet *ifp;
5216 	struct ifmultiaddr *ifma;
5217 	u32 hashes[4] = { 0, 0, 0, 0 };
5218 	u32 rx_mode, sort_mode;
5219 	int h, i;
5220 
5221 	BCE_LOCK_ASSERT(sc);
5222 
5223 	ifp = sc->bce_ifp;
5224 
5225 	/* Initialize receive mode default settings. */
5226 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5227 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5228 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5229 
5230 	/*
5231 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5232 	 * be enbled.
5233 	 */
5234 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5235 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5236 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5237 
5238 	/*
5239 	 * Check for promiscuous, all multicast, or selected
5240 	 * multicast address filtering.
5241 	 */
5242 	if (ifp->if_flags & IFF_PROMISC) {
5243 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5244 
5245 		/* Enable promiscuous mode. */
5246 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5247 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5248 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5249 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5250 
5251 		/* Enable all multicast addresses. */
5252 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5253 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5254        	}
5255 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5256 	} else {
5257 		/* Accept one or more multicast(s). */
5258 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5259 
5260 		IF_ADDR_LOCK(ifp);
5261 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5262 			if (ifma->ifma_addr->sa_family != AF_LINK)
5263 				continue;
5264 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5265 		    	ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5266 			hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5267 		}
5268 		IF_ADDR_UNLOCK(ifp);
5269 
5270 		for (i = 0; i < 4; i++)
5271 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5272 
5273 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5274 	}
5275 
5276 	/* Only make changes if the recive mode has actually changed. */
5277 	if (rx_mode != sc->rx_mode) {
5278 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5279 			rx_mode);
5280 
5281 		sc->rx_mode = rx_mode;
5282 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5283 	}
5284 
5285 	/* Disable and clear the exisitng sort before enabling a new sort. */
5286 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5287 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5288 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5289 }
5290 
5291 
5292 /****************************************************************************/
5293 /* Called periodically to updates statistics from the controllers           */
5294 /* statistics block.                                                        */
5295 /*                                                                          */
5296 /* Returns:                                                                 */
5297 /*   Nothing.                                                               */
5298 /****************************************************************************/
5299 static void
5300 bce_stats_update(struct bce_softc *sc)
5301 {
5302 	struct ifnet *ifp;
5303 	struct statistics_block *stats;
5304 
5305 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5306 
5307 	ifp = sc->bce_ifp;
5308 
5309 	stats = (struct statistics_block *) sc->stats_block;
5310 
5311 	/*
5312 	 * Update the interface statistics from the
5313 	 * hardware statistics.
5314 	 */
5315 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5316 
5317 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5318 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5319 					  (u_long) stats->stat_IfInMBUFDiscards +
5320 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5321 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5322 
5323 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5324 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5325 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5326 
5327 	/*
5328 	 * Certain controllers don't report
5329 	 * carrier sense errors correctly.
5330 	 * See errata E11_5708CA0_1165.
5331 	 */
5332 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5333 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5334 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5335 
5336 	/*
5337 	 * Update the sysctl statistics from the
5338 	 * hardware statistics.
5339 	 */
5340 	sc->stat_IfHCInOctets =
5341 		((u64) stats->stat_IfHCInOctets_hi << 32) +
5342 		 (u64) stats->stat_IfHCInOctets_lo;
5343 
5344 	sc->stat_IfHCInBadOctets =
5345 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5346 		 (u64) stats->stat_IfHCInBadOctets_lo;
5347 
5348 	sc->stat_IfHCOutOctets =
5349 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
5350 		 (u64) stats->stat_IfHCOutOctets_lo;
5351 
5352 	sc->stat_IfHCOutBadOctets =
5353 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5354 		 (u64) stats->stat_IfHCOutBadOctets_lo;
5355 
5356 	sc->stat_IfHCInUcastPkts =
5357 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5358 		 (u64) stats->stat_IfHCInUcastPkts_lo;
5359 
5360 	sc->stat_IfHCInMulticastPkts =
5361 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5362 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
5363 
5364 	sc->stat_IfHCInBroadcastPkts =
5365 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5366 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5367 
5368 	sc->stat_IfHCOutUcastPkts =
5369 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5370 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
5371 
5372 	sc->stat_IfHCOutMulticastPkts =
5373 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5374 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5375 
5376 	sc->stat_IfHCOutBroadcastPkts =
5377 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5378 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5379 
5380 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5381 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5382 
5383 	sc->stat_Dot3StatsCarrierSenseErrors =
5384 		stats->stat_Dot3StatsCarrierSenseErrors;
5385 
5386 	sc->stat_Dot3StatsFCSErrors =
5387 		stats->stat_Dot3StatsFCSErrors;
5388 
5389 	sc->stat_Dot3StatsAlignmentErrors =
5390 		stats->stat_Dot3StatsAlignmentErrors;
5391 
5392 	sc->stat_Dot3StatsSingleCollisionFrames =
5393 		stats->stat_Dot3StatsSingleCollisionFrames;
5394 
5395 	sc->stat_Dot3StatsMultipleCollisionFrames =
5396 		stats->stat_Dot3StatsMultipleCollisionFrames;
5397 
5398 	sc->stat_Dot3StatsDeferredTransmissions =
5399 		stats->stat_Dot3StatsDeferredTransmissions;
5400 
5401 	sc->stat_Dot3StatsExcessiveCollisions =
5402 		stats->stat_Dot3StatsExcessiveCollisions;
5403 
5404 	sc->stat_Dot3StatsLateCollisions =
5405 		stats->stat_Dot3StatsLateCollisions;
5406 
5407 	sc->stat_EtherStatsCollisions =
5408 		stats->stat_EtherStatsCollisions;
5409 
5410 	sc->stat_EtherStatsFragments =
5411 		stats->stat_EtherStatsFragments;
5412 
5413 	sc->stat_EtherStatsJabbers =
5414 		stats->stat_EtherStatsJabbers;
5415 
5416 	sc->stat_EtherStatsUndersizePkts =
5417 		stats->stat_EtherStatsUndersizePkts;
5418 
5419 	sc->stat_EtherStatsOverrsizePkts =
5420 		stats->stat_EtherStatsOverrsizePkts;
5421 
5422 	sc->stat_EtherStatsPktsRx64Octets =
5423 		stats->stat_EtherStatsPktsRx64Octets;
5424 
5425 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5426 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5427 
5428 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5429 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5430 
5431 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5432 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5433 
5434 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5435 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5436 
5437 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5438 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5439 
5440 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5441 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5442 
5443 	sc->stat_EtherStatsPktsTx64Octets =
5444 		stats->stat_EtherStatsPktsTx64Octets;
5445 
5446 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5447 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5448 
5449 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5450 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5451 
5452 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5453 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5454 
5455 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5456 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5457 
5458 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5459 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5460 
5461 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5462 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5463 
5464 	sc->stat_XonPauseFramesReceived =
5465 		stats->stat_XonPauseFramesReceived;
5466 
5467 	sc->stat_XoffPauseFramesReceived =
5468 		stats->stat_XoffPauseFramesReceived;
5469 
5470 	sc->stat_OutXonSent =
5471 		stats->stat_OutXonSent;
5472 
5473 	sc->stat_OutXoffSent =
5474 		stats->stat_OutXoffSent;
5475 
5476 	sc->stat_FlowControlDone =
5477 		stats->stat_FlowControlDone;
5478 
5479 	sc->stat_MacControlFramesReceived =
5480 		stats->stat_MacControlFramesReceived;
5481 
5482 	sc->stat_XoffStateEntered =
5483 		stats->stat_XoffStateEntered;
5484 
5485 	sc->stat_IfInFramesL2FilterDiscards =
5486 		stats->stat_IfInFramesL2FilterDiscards;
5487 
5488 	sc->stat_IfInRuleCheckerDiscards =
5489 		stats->stat_IfInRuleCheckerDiscards;
5490 
5491 	sc->stat_IfInFTQDiscards =
5492 		stats->stat_IfInFTQDiscards;
5493 
5494 	sc->stat_IfInMBUFDiscards =
5495 		stats->stat_IfInMBUFDiscards;
5496 
5497 	sc->stat_IfInRuleCheckerP4Hit =
5498 		stats->stat_IfInRuleCheckerP4Hit;
5499 
5500 	sc->stat_CatchupInRuleCheckerDiscards =
5501 		stats->stat_CatchupInRuleCheckerDiscards;
5502 
5503 	sc->stat_CatchupInFTQDiscards =
5504 		stats->stat_CatchupInFTQDiscards;
5505 
5506 	sc->stat_CatchupInMBUFDiscards =
5507 		stats->stat_CatchupInMBUFDiscards;
5508 
5509 	sc->stat_CatchupInRuleCheckerP4Hit =
5510 		stats->stat_CatchupInRuleCheckerP4Hit;
5511 
5512 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5513 }
5514 
5515 
5516 static void
5517 bce_tick_locked(struct bce_softc *sc)
5518 {
5519 	struct mii_data *mii = NULL;
5520 	struct ifnet *ifp;
5521 	u32 msg;
5522 
5523 	ifp = sc->bce_ifp;
5524 
5525 	BCE_LOCK_ASSERT(sc);
5526 
5527 	/* Tell the firmware that the driver is still running. */
5528 #ifdef BCE_DEBUG
5529 	msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5530 #else
5531 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5532 #endif
5533 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5534 
5535 	/* Update the statistics from the hardware statistics block. */
5536 	bce_stats_update(sc);
5537 
5538 	/* Schedule the next tick. */
5539 	callout_reset(
5540 		&sc->bce_stat_ch,		/* callout */
5541 		hz, 					/* ticks */
5542 		bce_tick, 				/* function */
5543 		sc);					/* function argument */
5544 
5545 	/* If link is up already up then we're done. */
5546 	if (sc->bce_link)
5547 		goto bce_tick_locked_exit;
5548 
5549 	/* DRC - ToDo: Add SerDes support and check SerDes link here. */
5550 
5551 	mii = device_get_softc(sc->bce_miibus);
5552 	mii_tick(mii);
5553 
5554 	/* Check if the link has come up. */
5555 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5556 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5557 		sc->bce_link++;
5558 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5559 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5560 		    bootverbose)
5561 			BCE_PRINTF(sc, "Gigabit link up\n");
5562 		/* Now that link is up, handle any outstanding TX traffic. */
5563 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5564 			bce_start_locked(ifp);
5565 	}
5566 
5567 bce_tick_locked_exit:
5568 	return;
5569 }
5570 
5571 
5572 static void
5573 bce_tick(void *xsc)
5574 {
5575 	struct bce_softc *sc;
5576 
5577 	sc = xsc;
5578 
5579 	BCE_LOCK(sc);
5580 	bce_tick_locked(sc);
5581 	BCE_UNLOCK(sc);
5582 }
5583 
5584 
5585 #ifdef BCE_DEBUG
5586 /****************************************************************************/
5587 /* Allows the driver state to be dumped through the sysctl interface.       */
5588 /*                                                                          */
5589 /* Returns:                                                                 */
5590 /*   0 for success, positive value for failure.                             */
5591 /****************************************************************************/
5592 static int
5593 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5594 {
5595         int error;
5596         int result;
5597         struct bce_softc *sc;
5598 
5599         result = -1;
5600         error = sysctl_handle_int(oidp, &result, 0, req);
5601 
5602         if (error || !req->newptr)
5603                 return (error);
5604 
5605         if (result == 1) {
5606                 sc = (struct bce_softc *)arg1;
5607                 bce_dump_driver_state(sc);
5608         }
5609 
5610         return error;
5611 }
5612 
5613 
5614 /****************************************************************************/
5615 /* Allows the hardware state to be dumped through the sysctl interface.     */
5616 /*                                                                          */
5617 /* Returns:                                                                 */
5618 /*   0 for success, positive value for failure.                             */
5619 /****************************************************************************/
5620 static int
5621 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5622 {
5623         int error;
5624         int result;
5625         struct bce_softc *sc;
5626 
5627         result = -1;
5628         error = sysctl_handle_int(oidp, &result, 0, req);
5629 
5630         if (error || !req->newptr)
5631                 return (error);
5632 
5633         if (result == 1) {
5634                 sc = (struct bce_softc *)arg1;
5635                 bce_dump_hw_state(sc);
5636         }
5637 
5638         return error;
5639 }
5640 
5641 
5642 /****************************************************************************/
5643 /*                                                                          */
5644 /*                                                                          */
5645 /* Returns:                                                                 */
5646 /*   0 for success, positive value for failure.                             */
5647 /****************************************************************************/
5648 static int
5649 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5650 {
5651         int error;
5652         int result;
5653         struct bce_softc *sc;
5654 
5655         result = -1;
5656         error = sysctl_handle_int(oidp, &result, 0, req);
5657 
5658         if (error || !req->newptr)
5659                 return (error);
5660 
5661         if (result == 1) {
5662                 sc = (struct bce_softc *)arg1;
5663                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5664         }
5665 
5666         return error;
5667 }
5668 
5669 
5670 /****************************************************************************/
5671 /*                                                                          */
5672 /*                                                                          */
5673 /* Returns:                                                                 */
5674 /*   0 for success, positive value for failure.                             */
5675 /****************************************************************************/
5676 static int
5677 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5678 {
5679         int error;
5680         int result;
5681         struct bce_softc *sc;
5682 
5683         result = -1;
5684         error = sysctl_handle_int(oidp, &result, 0, req);
5685 
5686         if (error || !req->newptr)
5687                 return (error);
5688 
5689         if (result == 1) {
5690                 sc = (struct bce_softc *)arg1;
5691                 bce_breakpoint(sc);
5692         }
5693 
5694         return error;
5695 }
5696 #endif
5697 
5698 
5699 /****************************************************************************/
5700 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5701 /*                                                                          */
5702 /* Returns:                                                                 */
5703 /*   0 for success, positive value for failure.                             */
5704 /****************************************************************************/
5705 static void
5706 bce_add_sysctls(struct bce_softc *sc)
5707 {
5708 	struct sysctl_ctx_list *ctx;
5709 	struct sysctl_oid_list *children;
5710 
5711 	ctx = device_get_sysctl_ctx(sc->bce_dev);
5712 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5713 
5714 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5715 		"driver_version",
5716 		CTLFLAG_RD, &bce_driver_version,
5717 		0, "bce driver version");
5718 
5719 #ifdef BCE_DEBUG
5720 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5721 		"rx_low_watermark",
5722 		CTLFLAG_RD, &sc->rx_low_watermark,
5723 		0, "Lowest level of free rx_bd's");
5724 
5725 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5726 		"tx_hi_watermark",
5727 		CTLFLAG_RD, &sc->tx_hi_watermark,
5728 		0, "Highest level of used tx_bd's");
5729 
5730 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5731 		"l2fhdr_status_errors",
5732 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5733 		0, "l2_fhdr status errors");
5734 
5735 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5736 		"unexpected_attentions",
5737 		CTLFLAG_RD, &sc->unexpected_attentions,
5738 		0, "unexpected attentions");
5739 
5740 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5741 		"lost_status_block_updates",
5742 		CTLFLAG_RD, &sc->lost_status_block_updates,
5743 		0, "lost status block updates");
5744 
5745 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5746 		"mbuf_alloc_failed",
5747 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5748 		0, "mbuf cluster allocation failures");
5749 #endif
5750 
5751 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5752 		"stat_IfHcInOctets",
5753 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5754 		"Bytes received");
5755 
5756 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5757 		"stat_IfHCInBadOctets",
5758 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5759 		"Bad bytes received");
5760 
5761 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5762 		"stat_IfHCOutOctets",
5763 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5764 		"Bytes sent");
5765 
5766 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5767 		"stat_IfHCOutBadOctets",
5768 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5769 		"Bad bytes sent");
5770 
5771 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5772 		"stat_IfHCInUcastPkts",
5773 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5774 		"Unicast packets received");
5775 
5776 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5777 		"stat_IfHCInMulticastPkts",
5778 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5779 		"Multicast packets received");
5780 
5781 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5782 		"stat_IfHCInBroadcastPkts",
5783 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5784 		"Broadcast packets received");
5785 
5786 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5787 		"stat_IfHCOutUcastPkts",
5788 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5789 		"Unicast packets sent");
5790 
5791 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5792 		"stat_IfHCOutMulticastPkts",
5793 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5794 		"Multicast packets sent");
5795 
5796 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5797 		"stat_IfHCOutBroadcastPkts",
5798 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5799 		"Broadcast packets sent");
5800 
5801 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5802 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5803 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5804 		0, "Internal MAC transmit errors");
5805 
5806 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5807 		"stat_Dot3StatsCarrierSenseErrors",
5808 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5809 		0, "Carrier sense errors");
5810 
5811 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5812 		"stat_Dot3StatsFCSErrors",
5813 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5814 		0, "Frame check sequence errors");
5815 
5816 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5817 		"stat_Dot3StatsAlignmentErrors",
5818 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5819 		0, "Alignment errors");
5820 
5821 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5822 		"stat_Dot3StatsSingleCollisionFrames",
5823 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5824 		0, "Single Collision Frames");
5825 
5826 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5827 		"stat_Dot3StatsMultipleCollisionFrames",
5828 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5829 		0, "Multiple Collision Frames");
5830 
5831 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5832 		"stat_Dot3StatsDeferredTransmissions",
5833 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5834 		0, "Deferred Transmissions");
5835 
5836 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5837 		"stat_Dot3StatsExcessiveCollisions",
5838 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5839 		0, "Excessive Collisions");
5840 
5841 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5842 		"stat_Dot3StatsLateCollisions",
5843 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5844 		0, "Late Collisions");
5845 
5846 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5847 		"stat_EtherStatsCollisions",
5848 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5849 		0, "Collisions");
5850 
5851 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5852 		"stat_EtherStatsFragments",
5853 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5854 		0, "Fragments");
5855 
5856 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5857 		"stat_EtherStatsJabbers",
5858 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5859 		0, "Jabbers");
5860 
5861 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5862 		"stat_EtherStatsUndersizePkts",
5863 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5864 		0, "Undersize packets");
5865 
5866 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5867 		"stat_EtherStatsOverrsizePkts",
5868 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5869 		0, "stat_EtherStatsOverrsizePkts");
5870 
5871 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5872 		"stat_EtherStatsPktsRx64Octets",
5873 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5874 		0, "Bytes received in 64 byte packets");
5875 
5876 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5877 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5878 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5879 		0, "Bytes received in 65 to 127 byte packets");
5880 
5881 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5882 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5883 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5884 		0, "Bytes received in 128 to 255 byte packets");
5885 
5886 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5887 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5888 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5889 		0, "Bytes received in 256 to 511 byte packets");
5890 
5891 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5892 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5893 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5894 		0, "Bytes received in 512 to 1023 byte packets");
5895 
5896 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5897 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5898 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5899 		0, "Bytes received in 1024 t0 1522 byte packets");
5900 
5901 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5902 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5903 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5904 		0, "Bytes received in 1523 to 9022 byte packets");
5905 
5906 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5907 		"stat_EtherStatsPktsTx64Octets",
5908 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5909 		0, "Bytes sent in 64 byte packets");
5910 
5911 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5912 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5913 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5914 		0, "Bytes sent in 65 to 127 byte packets");
5915 
5916 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5917 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5918 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5919 		0, "Bytes sent in 128 to 255 byte packets");
5920 
5921 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5922 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5923 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5924 		0, "Bytes sent in 256 to 511 byte packets");
5925 
5926 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5927 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5928 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5929 		0, "Bytes sent in 512 to 1023 byte packets");
5930 
5931 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5932 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5933 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5934 		0, "Bytes sent in 1024 to 1522 byte packets");
5935 
5936 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5937 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5938 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5939 		0, "Bytes sent in 1523 to 9022 byte packets");
5940 
5941 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5942 		"stat_XonPauseFramesReceived",
5943 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5944 		0, "XON pause frames receved");
5945 
5946 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5947 		"stat_XoffPauseFramesReceived",
5948 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5949 		0, "XOFF pause frames received");
5950 
5951 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5952 		"stat_OutXonSent",
5953 		CTLFLAG_RD, &sc->stat_OutXonSent,
5954 		0, "XON pause frames sent");
5955 
5956 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5957 		"stat_OutXoffSent",
5958 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5959 		0, "XOFF pause frames sent");
5960 
5961 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5962 		"stat_FlowControlDone",
5963 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5964 		0, "Flow control done");
5965 
5966 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5967 		"stat_MacControlFramesReceived",
5968 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5969 		0, "MAC control frames received");
5970 
5971 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5972 		"stat_XoffStateEntered",
5973 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5974 		0, "XOFF state entered");
5975 
5976 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5977 		"stat_IfInFramesL2FilterDiscards",
5978 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5979 		0, "Received L2 packets discarded");
5980 
5981 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5982 		"stat_IfInRuleCheckerDiscards",
5983 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5984 		0, "Received packets discarded by rule");
5985 
5986 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5987 		"stat_IfInFTQDiscards",
5988 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5989 		0, "Received packet FTQ discards");
5990 
5991 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5992 		"stat_IfInMBUFDiscards",
5993 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5994 		0, "Received packets discarded due to lack of controller buffer memory");
5995 
5996 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5997 		"stat_IfInRuleCheckerP4Hit",
5998 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5999 		0, "Received packets rule checker hits");
6000 
6001 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6002 		"stat_CatchupInRuleCheckerDiscards",
6003 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6004 		0, "Received packets discarded in Catchup path");
6005 
6006 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6007 		"stat_CatchupInFTQDiscards",
6008 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6009 		0, "Received packets discarded in FTQ in Catchup path");
6010 
6011 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6012 		"stat_CatchupInMBUFDiscards",
6013 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6014 		0, "Received packets discarded in controller buffer memory in Catchup path");
6015 
6016 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6017 		"stat_CatchupInRuleCheckerP4Hit",
6018 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6019 		0, "Received packets rule checker hits in Catchup path");
6020 
6021 #ifdef BCE_DEBUG
6022 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6023 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6024 		(void *)sc, 0,
6025 		bce_sysctl_driver_state, "I", "Drive state information");
6026 
6027 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6028 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6029 		(void *)sc, 0,
6030 		bce_sysctl_hw_state, "I", "Hardware state information");
6031 
6032 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6033 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6034 		(void *)sc, 0,
6035 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6036 
6037 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6038 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6039 		(void *)sc, 0,
6040 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6041 #endif
6042 
6043 }
6044 
6045 
6046 /****************************************************************************/
6047 /* BCE Debug Routines                                                       */
6048 /****************************************************************************/
6049 #ifdef BCE_DEBUG
6050 
6051 /****************************************************************************/
6052 /* Prints out information about an mbuf.                                    */
6053 /*                                                                          */
6054 /* Returns:                                                                 */
6055 /*   Nothing.                                                               */
6056 /****************************************************************************/
6057 static void
6058 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6059 {
6060 	u32 val_hi, val_lo;
6061 	struct mbuf *mp = m;
6062 
6063 	if (m == NULL) {
6064 		/* Index out of range. */
6065 		printf("mbuf ptr is null!\n");
6066 		return;
6067 	}
6068 
6069 	while (mp) {
6070 		val_hi = BCE_ADDR_HI(mp);
6071 		val_lo = BCE_ADDR_LO(mp);
6072 		BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6073 			   val_hi, val_lo, mp->m_len);
6074 
6075 		if (mp->m_flags & M_EXT)
6076 			printf("M_EXT ");
6077 		if (mp->m_flags & M_PKTHDR)
6078 			printf("M_PKTHDR ");
6079 		printf("\n");
6080 
6081 		if (mp->m_flags & M_EXT) {
6082 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6083 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6084 			BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6085 				val_hi, val_lo, mp->m_ext.ext_size);
6086 		}
6087 
6088 		mp = mp->m_next;
6089 	}
6090 
6091 
6092 }
6093 
6094 
6095 /****************************************************************************/
6096 /* Prints out the mbufs in the TX mbuf chain.                               */
6097 /*                                                                          */
6098 /* Returns:                                                                 */
6099 /*   Nothing.                                                               */
6100 /****************************************************************************/
6101 static void
6102 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6103 {
6104 	struct mbuf *m;
6105 
6106 	BCE_PRINTF(sc,
6107 		"----------------------------"
6108 		"  tx mbuf data  "
6109 		"----------------------------\n");
6110 
6111 	for (int i = 0; i < count; i++) {
6112 	 	m = sc->tx_mbuf_ptr[chain_prod];
6113 		BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6114 		bce_dump_mbuf(sc, m);
6115 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6116 	}
6117 
6118 	BCE_PRINTF(sc,
6119 		"----------------------------"
6120 		"----------------"
6121 		"----------------------------\n");
6122 }
6123 
6124 
6125 /*
6126  * This routine prints the RX mbuf chain.
6127  */
6128 static void
6129 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6130 {
6131 	struct mbuf *m;
6132 
6133 	BCE_PRINTF(sc,
6134 		"----------------------------"
6135 		"  rx mbuf data  "
6136 		"----------------------------\n");
6137 
6138 	for (int i = 0; i < count; i++) {
6139 	 	m = sc->rx_mbuf_ptr[chain_prod];
6140 		BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6141 		bce_dump_mbuf(sc, m);
6142 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6143 	}
6144 
6145 
6146 	BCE_PRINTF(sc,
6147 		"----------------------------"
6148 		"----------------"
6149 		"----------------------------\n");
6150 }
6151 
6152 
6153 static void
6154 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6155 {
6156 	if (idx > MAX_TX_BD)
6157 		/* Index out of range. */
6158 		BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6159 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6160 		/* TX Chain page pointer. */
6161 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6162 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6163 	else
6164 		/* Normal tx_bd entry. */
6165 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6166 			"vlan tag= 0x%4X, flags = 0x%04X\n", idx,
6167 			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6168 			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
6169 			txbd->tx_bd_flags);
6170 }
6171 
6172 
6173 static void
6174 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6175 {
6176 	if (idx > MAX_RX_BD)
6177 		/* Index out of range. */
6178 		BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6179 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6180 		/* TX Chain page pointer. */
6181 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6182 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6183 	else
6184 		/* Normal tx_bd entry. */
6185 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6186 			"flags = 0x%08X\n", idx,
6187 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6188 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
6189 }
6190 
6191 
6192 static void
6193 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6194 {
6195 	BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6196 		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6197 		"tcp_udp_xsum = 0x%04X\n", idx,
6198 		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6199 		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6200 		l2fhdr->l2_fhdr_tcp_udp_xsum);
6201 }
6202 
6203 
6204 /*
6205  * This routine prints the TX chain.
6206  */
6207 static void
6208 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6209 {
6210 	struct tx_bd *txbd;
6211 
6212 	/* First some info about the tx_bd chain structure. */
6213 	BCE_PRINTF(sc,
6214 		"----------------------------"
6215 		"  tx_bd  chain  "
6216 		"----------------------------\n");
6217 
6218 	BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6219 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6220 
6221 	BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6222 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6223 
6224 	BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6225 
6226 	BCE_PRINTF(sc, ""
6227 		"-----------------------------"
6228 		"   tx_bd data   "
6229 		"-----------------------------\n");
6230 
6231 	/* Now print out the tx_bd's themselves. */
6232 	for (int i = 0; i < count; i++) {
6233 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6234 		bce_dump_txbd(sc, tx_prod, txbd);
6235 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6236 	}
6237 
6238 	BCE_PRINTF(sc,
6239 		"-----------------------------"
6240 		"--------------"
6241 		"-----------------------------\n");
6242 }
6243 
6244 
6245 /*
6246  * This routine prints the RX chain.
6247  */
6248 static void
6249 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6250 {
6251 	struct rx_bd *rxbd;
6252 
6253 	/* First some info about the tx_bd chain structure. */
6254 	BCE_PRINTF(sc,
6255 		"----------------------------"
6256 		"  rx_bd  chain  "
6257 		"----------------------------\n");
6258 
6259 	BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6260 
6261 	BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6262 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6263 
6264 	BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6265 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6266 
6267 	BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6268 
6269 	BCE_PRINTF(sc,
6270 		"----------------------------"
6271 		"   rx_bd data   "
6272 		"----------------------------\n");
6273 
6274 	/* Now print out the rx_bd's themselves. */
6275 	for (int i = 0; i < count; i++) {
6276 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6277 		bce_dump_rxbd(sc, rx_prod, rxbd);
6278 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6279 	}
6280 
6281 	BCE_PRINTF(sc,
6282 		"----------------------------"
6283 		"--------------"
6284 		"----------------------------\n");
6285 }
6286 
6287 
6288 /*
6289  * This routine prints the status block.
6290  */
6291 static void
6292 bce_dump_status_block(struct bce_softc *sc)
6293 {
6294 	struct status_block *sblk;
6295 
6296 	sblk = sc->status_block;
6297 
6298    	BCE_PRINTF(sc, "----------------------------- Status Block "
6299 		"-----------------------------\n");
6300 
6301 	BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6302 		sblk->status_attn_bits, sblk->status_attn_bits_ack,
6303 		sblk->status_idx);
6304 
6305 	BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6306 		sblk->status_rx_quick_consumer_index0,
6307 		sblk->status_tx_quick_consumer_index0);
6308 
6309 	BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6310 
6311 	/* Theses indices are not used for normal L2 drivers. */
6312 	if (sblk->status_rx_quick_consumer_index1 ||
6313 		sblk->status_tx_quick_consumer_index1)
6314 		BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6315 			sblk->status_rx_quick_consumer_index1,
6316 			sblk->status_tx_quick_consumer_index1);
6317 
6318 	if (sblk->status_rx_quick_consumer_index2 ||
6319 		sblk->status_tx_quick_consumer_index2)
6320 		BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6321 			sblk->status_rx_quick_consumer_index2,
6322 			sblk->status_tx_quick_consumer_index2);
6323 
6324 	if (sblk->status_rx_quick_consumer_index3 ||
6325 		sblk->status_tx_quick_consumer_index3)
6326 		BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6327 			sblk->status_rx_quick_consumer_index3,
6328 			sblk->status_tx_quick_consumer_index3);
6329 
6330 	if (sblk->status_rx_quick_consumer_index4 ||
6331 		sblk->status_rx_quick_consumer_index5)
6332 		BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6333 			sblk->status_rx_quick_consumer_index4,
6334 			sblk->status_rx_quick_consumer_index5);
6335 
6336 	if (sblk->status_rx_quick_consumer_index6 ||
6337 		sblk->status_rx_quick_consumer_index7)
6338 		BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6339 			sblk->status_rx_quick_consumer_index6,
6340 			sblk->status_rx_quick_consumer_index7);
6341 
6342 	if (sblk->status_rx_quick_consumer_index8 ||
6343 		sblk->status_rx_quick_consumer_index9)
6344 		BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6345 			sblk->status_rx_quick_consumer_index8,
6346 			sblk->status_rx_quick_consumer_index9);
6347 
6348 	if (sblk->status_rx_quick_consumer_index10 ||
6349 		sblk->status_rx_quick_consumer_index11)
6350 		BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6351 			sblk->status_rx_quick_consumer_index10,
6352 			sblk->status_rx_quick_consumer_index11);
6353 
6354 	if (sblk->status_rx_quick_consumer_index12 ||
6355 		sblk->status_rx_quick_consumer_index13)
6356 		BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6357 			sblk->status_rx_quick_consumer_index12,
6358 			sblk->status_rx_quick_consumer_index13);
6359 
6360 	if (sblk->status_rx_quick_consumer_index14 ||
6361 		sblk->status_rx_quick_consumer_index15)
6362 		BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6363 			sblk->status_rx_quick_consumer_index14,
6364 			sblk->status_rx_quick_consumer_index15);
6365 
6366 	if (sblk->status_completion_producer_index ||
6367 		sblk->status_cmd_consumer_index)
6368 		BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6369 			sblk->status_completion_producer_index,
6370 			sblk->status_cmd_consumer_index);
6371 
6372 	BCE_PRINTF(sc, "-------------------------------------------"
6373 		"-----------------------------\n");
6374 }
6375 
6376 
6377 /*
6378  * This routine prints the statistics block.
6379  */
6380 static void
6381 bce_dump_stats_block(struct bce_softc *sc)
6382 {
6383 	struct statistics_block *sblk;
6384 
6385 	sblk = sc->stats_block;
6386 
6387 	BCE_PRINTF(sc, ""
6388 		"-----------------------------"
6389 		" Stats  Block "
6390 		"-----------------------------\n");
6391 
6392 	BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6393 		"IfHcInBadOctets      = 0x%08X:%08X\n",
6394 		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6395 		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6396 
6397 	BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6398 		"IfHcOutBadOctets     = 0x%08X:%08X\n",
6399 		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6400 		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6401 
6402 	BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6403 		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
6404 		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6405 		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6406 
6407 	BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6408 		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
6409 		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6410 		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6411 
6412 	BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6413 		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6414 		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6415 
6416 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6417 		BCE_PRINTF(sc, "0x%08X : "
6418 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6419 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6420 
6421 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6422 		BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6423 			sblk->stat_Dot3StatsCarrierSenseErrors);
6424 
6425 	if (sblk->stat_Dot3StatsFCSErrors)
6426 		BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6427 			sblk->stat_Dot3StatsFCSErrors);
6428 
6429 	if (sblk->stat_Dot3StatsAlignmentErrors)
6430 		BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6431 			sblk->stat_Dot3StatsAlignmentErrors);
6432 
6433 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6434 		BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6435 			sblk->stat_Dot3StatsSingleCollisionFrames);
6436 
6437 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6438 		BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6439 			sblk->stat_Dot3StatsMultipleCollisionFrames);
6440 
6441 	if (sblk->stat_Dot3StatsDeferredTransmissions)
6442 		BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6443 			sblk->stat_Dot3StatsDeferredTransmissions);
6444 
6445 	if (sblk->stat_Dot3StatsExcessiveCollisions)
6446 		BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6447 			sblk->stat_Dot3StatsExcessiveCollisions);
6448 
6449 	if (sblk->stat_Dot3StatsLateCollisions)
6450 		BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6451 			sblk->stat_Dot3StatsLateCollisions);
6452 
6453 	if (sblk->stat_EtherStatsCollisions)
6454 		BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6455 			sblk->stat_EtherStatsCollisions);
6456 
6457 	if (sblk->stat_EtherStatsFragments)
6458 		BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6459 			sblk->stat_EtherStatsFragments);
6460 
6461 	if (sblk->stat_EtherStatsJabbers)
6462 		BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6463 			sblk->stat_EtherStatsJabbers);
6464 
6465 	if (sblk->stat_EtherStatsUndersizePkts)
6466 		BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6467 			sblk->stat_EtherStatsUndersizePkts);
6468 
6469 	if (sblk->stat_EtherStatsOverrsizePkts)
6470 		BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6471 			sblk->stat_EtherStatsOverrsizePkts);
6472 
6473 	if (sblk->stat_EtherStatsPktsRx64Octets)
6474 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6475 			sblk->stat_EtherStatsPktsRx64Octets);
6476 
6477 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6478 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6479 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6480 
6481 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6482 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6483 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6484 
6485 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6486 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6487 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6488 
6489 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6490 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6491 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6492 
6493 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6494 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6495 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6496 
6497 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6498 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6499 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6500 
6501 	if (sblk->stat_EtherStatsPktsTx64Octets)
6502 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6503 			sblk->stat_EtherStatsPktsTx64Octets);
6504 
6505 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6506 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6507 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6508 
6509 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6510 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6511 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6512 
6513 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6514 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6515 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6516 
6517 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6518 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6519 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6520 
6521 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6522 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6523 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6524 
6525 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6526 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6527 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6528 
6529 	if (sblk->stat_XonPauseFramesReceived)
6530 		BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6531 			sblk->stat_XonPauseFramesReceived);
6532 
6533 	if (sblk->stat_XoffPauseFramesReceived)
6534 	   BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6535 			sblk->stat_XoffPauseFramesReceived);
6536 
6537 	if (sblk->stat_OutXonSent)
6538 		BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6539 			sblk->stat_OutXonSent);
6540 
6541 	if (sblk->stat_OutXoffSent)
6542 		BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6543 			sblk->stat_OutXoffSent);
6544 
6545 	if (sblk->stat_FlowControlDone)
6546 		BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6547 			sblk->stat_FlowControlDone);
6548 
6549 	if (sblk->stat_MacControlFramesReceived)
6550 		BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6551 			sblk->stat_MacControlFramesReceived);
6552 
6553 	if (sblk->stat_XoffStateEntered)
6554 		BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6555 			sblk->stat_XoffStateEntered);
6556 
6557 	if (sblk->stat_IfInFramesL2FilterDiscards)
6558 		BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6559 			sblk->stat_IfInFramesL2FilterDiscards);
6560 
6561 	if (sblk->stat_IfInRuleCheckerDiscards)
6562 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6563 			sblk->stat_IfInRuleCheckerDiscards);
6564 
6565 	if (sblk->stat_IfInFTQDiscards)
6566 		BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6567 			sblk->stat_IfInFTQDiscards);
6568 
6569 	if (sblk->stat_IfInMBUFDiscards)
6570 		BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6571 			sblk->stat_IfInMBUFDiscards);
6572 
6573 	if (sblk->stat_IfInRuleCheckerP4Hit)
6574 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6575 			sblk->stat_IfInRuleCheckerP4Hit);
6576 
6577 	if (sblk->stat_CatchupInRuleCheckerDiscards)
6578 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6579 			sblk->stat_CatchupInRuleCheckerDiscards);
6580 
6581 	if (sblk->stat_CatchupInFTQDiscards)
6582 		BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6583 			sblk->stat_CatchupInFTQDiscards);
6584 
6585 	if (sblk->stat_CatchupInMBUFDiscards)
6586 		BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6587 			sblk->stat_CatchupInMBUFDiscards);
6588 
6589 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6590 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6591 			sblk->stat_CatchupInRuleCheckerP4Hit);
6592 
6593 	BCE_PRINTF(sc,
6594 		"-----------------------------"
6595 		"--------------"
6596 		"-----------------------------\n");
6597 }
6598 
6599 
6600 static void
6601 bce_dump_driver_state(struct bce_softc *sc)
6602 {
6603 	u32 val_hi, val_lo;
6604 
6605 	BCE_PRINTF(sc,
6606 		"-----------------------------"
6607 		" Driver State "
6608 		"-----------------------------\n");
6609 
6610 	val_hi = BCE_ADDR_HI(sc);
6611 	val_lo = BCE_ADDR_LO(sc);
6612 	BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6613 		val_hi, val_lo);
6614 
6615 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6616 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6617 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6618 		val_hi, val_lo);
6619 
6620 	val_hi = BCE_ADDR_HI(sc->status_block);
6621 	val_lo = BCE_ADDR_LO(sc->status_block);
6622 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6623 		val_hi, val_lo);
6624 
6625 	val_hi = BCE_ADDR_HI(sc->stats_block);
6626 	val_lo = BCE_ADDR_LO(sc->stats_block);
6627 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6628 		val_hi, val_lo);
6629 
6630 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6631 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6632 	BCE_PRINTF(sc,
6633 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6634 		val_hi, val_lo);
6635 
6636 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6637 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6638 	BCE_PRINTF(sc,
6639 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6640 		val_hi, val_lo);
6641 
6642 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6643 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6644 	BCE_PRINTF(sc,
6645 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6646 		val_hi, val_lo);
6647 
6648 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6649 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6650 	BCE_PRINTF(sc,
6651 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6652 		val_hi, val_lo);
6653 
6654 	BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6655 		sc->interrupts_generated);
6656 
6657 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6658 		sc->rx_interrupts);
6659 
6660 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6661 		sc->tx_interrupts);
6662 
6663 	BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6664 		sc->last_status_idx);
6665 
6666 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6667 		sc->tx_prod);
6668 
6669 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6670 		sc->tx_cons);
6671 
6672 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6673 		sc->tx_prod_bseq);
6674 
6675 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6676 		sc->rx_prod);
6677 
6678 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6679 		sc->rx_cons);
6680 
6681 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6682 		sc->rx_prod_bseq);
6683 
6684 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6685 		sc->rx_mbuf_alloc);
6686 
6687 	BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6688 		sc->free_rx_bd);
6689 
6690 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6691 		sc->rx_low_watermark, (u32) USABLE_RX_BD);
6692 
6693 	BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6694 		sc->tx_mbuf_alloc);
6695 
6696 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6697 		sc->rx_mbuf_alloc);
6698 
6699 	BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6700 		sc->used_tx_bd);
6701 
6702 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6703 		sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6704 
6705 	BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6706 		sc->mbuf_alloc_failed);
6707 
6708 	BCE_PRINTF(sc,
6709 		"-----------------------------"
6710 		"--------------"
6711 		"-----------------------------\n");
6712 }
6713 
6714 
6715 static void
6716 bce_dump_hw_state(struct bce_softc *sc)
6717 {
6718 	u32 val1;
6719 
6720 	BCE_PRINTF(sc,
6721 		"----------------------------"
6722 		" Hardware State "
6723 		"----------------------------\n");
6724 
6725 	BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6726 
6727 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6728 	BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6729 		val1, BCE_MISC_ENABLE_STATUS_BITS);
6730 
6731 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6732 	BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6733 
6734 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6735 	BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6736 
6737 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6738 	BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6739 
6740 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6741 	BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6742 
6743 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6744 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6745 
6746 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6747 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6748 
6749 	val1 = REG_RD(sc, BCE_HC_STATUS);
6750 	BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6751 
6752 	BCE_PRINTF(sc,
6753 		"----------------------------"
6754 		"----------------"
6755 		"----------------------------\n");
6756 
6757 	BCE_PRINTF(sc,
6758 		"----------------------------"
6759 		" Register  Dump "
6760 		"----------------------------\n");
6761 
6762 	for (int i = 0x400; i < 0x8000; i += 0x10)
6763 		BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6764 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6765 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6766 
6767 	BCE_PRINTF(sc,
6768 		"----------------------------"
6769 		"----------------"
6770 		"----------------------------\n");
6771 }
6772 
6773 
6774 static void
6775 bce_breakpoint(struct bce_softc *sc)
6776 {
6777 
6778 	/* Unreachable code to shut the compiler up about unused functions. */
6779 	if (0) {
6780    		bce_dump_txbd(sc, 0, NULL);
6781 		bce_dump_rxbd(sc, 0, NULL);
6782 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6783 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6784 		bce_dump_l2fhdr(sc, 0, NULL);
6785 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6786 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6787 		bce_dump_status_block(sc);
6788 		bce_dump_stats_block(sc);
6789 		bce_dump_driver_state(sc);
6790 		bce_dump_hw_state(sc);
6791 	}
6792 
6793 	bce_dump_driver_state(sc);
6794 	/* Print the important status block fields. */
6795 	bce_dump_status_block(sc);
6796 
6797 	/* Call the debugger. */
6798 	breakpoint();
6799 
6800 	return;
6801 }
6802 #endif
6803