xref: /freebsd/sys/dev/bce/if_bce.c (revision ebfbcb8bec539ff98a9a0cf36397776540ea59d0)
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  *
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
57 
58 
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63 	u32 bce_debug = BCE_WARN;
64 
65 	/*          0 = Never              */
66 	/*          1 = 1 in 2,147,483,648 */
67 	/*        256 = 1 in     8,388,608 */
68 	/*       2048 = 1 in     1,048,576 */
69 	/*      65536 = 1 in        32,768 */
70 	/*    1048576 = 1 in         2,048 */
71 	/*  268435456 =	1 in             8 */
72 	/*  536870912 = 1 in             4 */
73 	/* 1073741824 = 1 in             2 */
74 
75 	/* Controls how often the l2_fhdr frame error check will fail. */
76 	int bce_debug_l2fhdr_status_check = 0;
77 
78 	/* Controls how often the unexpected attention check will fail. */
79 	int bce_debug_unexpected_attention = 0;
80 
81 	/* Controls how often to simulate an mbuf allocation failure. */
82 	int bce_debug_mbuf_allocation_failure = 0;
83 
84 	/* Controls how often to simulate a DMA mapping failure. */
85 	int bce_debug_dma_map_addr_failure = 0;
86 
87 	/* Controls how often to simulate a bootcode failure. */
88 	int bce_debug_bootcode_running_failure = 0;
89 #endif
90 
91 
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX		64
98 
99 static struct bce_type bce_devs[] = {
100 	/* BCM5706C Controllers and OEM boards. */
101 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102 		"HP NC370T Multifunction Gigabit Server Adapter" },
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104 		"HP NC370i Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
107 
108 	/* BCM5706S controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110 		"HP NC370F Multifunction Gigabit Server Adapter" },
111 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 
114 	/* BCM5708C controllers and OEM boards. */
115 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
117 
118 	/* BCM5708S controllers and OEM boards. */
119 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
121 	{ 0, 0, 0, 0, NULL }
122 };
123 
124 
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130 	/* Slow EEPROM */
131 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 	 "EEPROM - slow"},
135 	/* Expansion entry 0001 */
136 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 	 "Entry 0001"},
140 	/* Saifun SA25F010 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145 	 "Non-buffered flash (128kB)"},
146 	/* Saifun SA25F020 (non-buffered flash) */
147 	/* strap, cfg1, & write1 need updates */
148 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151 	 "Non-buffered flash (256kB)"},
152 	/* Expansion entry 0100 */
153 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0100"},
157 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
163 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167 	/* Saifun SA25F005 (non-buffered flash) */
168 	/* strap, cfg1, & write1 need updates */
169 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172 	 "Non-buffered flash (64kB)"},
173 	/* Fast EEPROM */
174 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - fast"},
178 	/* Expansion entry 1001 */
179 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 1001"},
183 	/* Expansion entry 1010 */
184 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 	 "Entry 1010"},
188 	/* ATMEL AT45DB011B (buffered flash) */
189 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192 	 "Buffered flash (128kB)"},
193 	/* Expansion entry 1100 */
194 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 	 "Entry 1100"},
198 	/* Expansion entry 1101 */
199 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 	 "Entry 1101"},
203 	/* Ateml Expansion entry 1110 */
204 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207 	 "Entry 1110 (Atmel)"},
208 	/* ATMEL AT45DB021B (buffered flash) */
209 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212 	 "Buffered flash (256kB)"},
213 };
214 
215 
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe				(device_t);
220 static int  bce_attach				(device_t);
221 static int  bce_detach				(device_t);
222 static void bce_shutdown			(device_t);
223 
224 
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, int, int);
232 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain		(struct bce_softc *, int, int);
236 static void bce_dump_rx_chain		(struct bce_softc *, int, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_breakpoint			(struct bce_softc *);
242 #endif
243 
244 
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
249 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
250 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg		(device_t, int, int);
252 static int  bce_miibus_write_reg	(device_t, int, int, int);
253 static void bce_miibus_statchg		(device_t);
254 
255 
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock	(struct bce_softc *);
260 static int  bce_release_nvram_lock	(struct bce_softc *);
261 static void bce_enable_nvram_access	(struct bce_softc *);
262 static void	bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram			(struct bce_softc *);
265 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test			(struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write	(struct bce_softc *);
269 static void bce_disable_nvram_write	(struct bce_softc *);
270 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
271 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
273 #endif
274 
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
279 static void bce_dma_map_tx_desc		(void *, bus_dma_segment_t *, int, bus_size_t, int);
280 static int  bce_dma_alloc			(device_t);
281 static void bce_dma_free			(struct bce_softc *);
282 static void bce_release_resources	(struct bce_softc *);
283 
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync				(struct bce_softc *, u32);
288 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus			(struct bce_softc *);
291 
292 static void bce_stop				(struct bce_softc *);
293 static int  bce_reset				(struct bce_softc *, u32);
294 static int  bce_chipinit 			(struct bce_softc *);
295 static int  bce_blockinit 			(struct bce_softc *);
296 static int  bce_get_buf				(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297 
298 static int  bce_init_tx_chain		(struct bce_softc *);
299 static int  bce_init_rx_chain		(struct bce_softc *);
300 static void bce_free_rx_chain		(struct bce_softc *);
301 static void bce_free_tx_chain		(struct bce_softc *);
302 
303 static int  bce_tx_encap			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
304 static void bce_start_locked		(struct ifnet *);
305 static void bce_start				(struct ifnet *);
306 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
307 static void bce_watchdog			(struct ifnet *);
308 static int  bce_ifmedia_upd			(struct ifnet *);
309 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked			(struct bce_softc *);
311 static void bce_init				(void *);
312 static void bce_mgmt_init_locked(struct bce_softc *sc);
313 
314 static void bce_init_context		(struct bce_softc *);
315 static void bce_get_mac_addr		(struct bce_softc *);
316 static void bce_set_mac_addr		(struct bce_softc *);
317 static void bce_phy_intr			(struct bce_softc *);
318 static void bce_rx_intr				(struct bce_softc *);
319 static void bce_tx_intr				(struct bce_softc *);
320 static void bce_disable_intr		(struct bce_softc *);
321 static void bce_enable_intr			(struct bce_softc *);
322 
323 #ifdef DEVICE_POLLING
324 static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
325 static void bce_poll				(struct ifnet *, enum poll_cmd, int);
326 #endif
327 static void bce_intr				(void *);
328 static void bce_set_rx_mode			(struct bce_softc *);
329 static void bce_stats_update		(struct bce_softc *);
330 static void bce_tick_locked			(struct bce_softc *);
331 static void bce_tick				(void *);
332 static void bce_add_sysctls			(struct bce_softc *);
333 
334 
335 /****************************************************************************/
336 /* FreeBSD device dispatch table.                                           */
337 /****************************************************************************/
338 static device_method_t bce_methods[] = {
339 	/* Device interface */
340 	DEVMETHOD(device_probe,		bce_probe),
341 	DEVMETHOD(device_attach,	bce_attach),
342 	DEVMETHOD(device_detach,	bce_detach),
343 	DEVMETHOD(device_shutdown,	bce_shutdown),
344 
345 	/* bus interface */
346 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
347 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
348 
349 	/* MII interface */
350 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
351 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
352 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
353 
354 	{ 0, 0 }
355 };
356 
357 static driver_t bce_driver = {
358 	"bce",
359 	bce_methods,
360 	sizeof(struct bce_softc)
361 };
362 
363 static devclass_t bce_devclass;
364 
365 MODULE_DEPEND(bce, pci, 1, 1, 1);
366 MODULE_DEPEND(bce, ether, 1, 1, 1);
367 MODULE_DEPEND(bce, miibus, 1, 1, 1);
368 
369 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
370 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
371 
372 
373 /****************************************************************************/
374 /* Device probe function.                                                   */
375 /*                                                                          */
376 /* Compares the device to the driver's list of supported devices and        */
377 /* reports back to the OS whether this is the right driver for the device.  */
378 /*                                                                          */
379 /* Returns:                                                                 */
380 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
381 /****************************************************************************/
382 static int
383 bce_probe(device_t dev)
384 {
385 	struct bce_type *t;
386 	struct bce_softc *sc;
387 	char *descbuf;
388 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
389 
390 	t = bce_devs;
391 
392 	sc = device_get_softc(dev);
393 	bzero(sc, sizeof(struct bce_softc));
394 	sc->bce_unit = device_get_unit(dev);
395 	sc->bce_dev = dev;
396 
397 	/* Get the data for the device to be probed. */
398 	vid  = pci_get_vendor(dev);
399 	did  = pci_get_device(dev);
400 	svid = pci_get_subvendor(dev);
401 	sdid = pci_get_subdevice(dev);
402 
403 	DBPRINT(sc, BCE_VERBOSE_LOAD,
404 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
405 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
406 
407 	/* Look through the list of known devices for a match. */
408 	while(t->bce_name != NULL) {
409 
410 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
411 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
412 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
413 
414 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
415 
416 			if (descbuf == NULL)
417 				return(ENOMEM);
418 
419 			/* Print out the device identity. */
420 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
421 				t->bce_name,
422 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
423 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
424 			    bce_driver_version);
425 
426 			device_set_desc_copy(dev, descbuf);
427 			free(descbuf, M_TEMP);
428 			return(BUS_PROBE_DEFAULT);
429 		}
430 		t++;
431 	}
432 
433 	DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
434 		__FILE__, __LINE__);
435 
436 	return(ENXIO);
437 }
438 
439 
440 /****************************************************************************/
441 /* Device attach function.                                                  */
442 /*                                                                          */
443 /* Allocates device resources, performs secondary chip identification,      */
444 /* resets and initializes the hardware, and initializes driver instance     */
445 /* variables.                                                               */
446 /*                                                                          */
447 /* Returns:                                                                 */
448 /*   0 on success, positive value on failure.                               */
449 /****************************************************************************/
450 static int
451 bce_attach(device_t dev)
452 {
453 	struct bce_softc *sc;
454 	struct ifnet *ifp;
455 	u32 val;
456 	int mbuf, rid, rc = 0;
457 
458 	sc = device_get_softc(dev);
459 	sc->bce_dev = dev;
460 
461 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
462 
463 	mbuf = device_get_unit(dev);
464 	sc->bce_unit = mbuf;
465 
466 	pci_enable_busmaster(dev);
467 
468 	/* Allocate PCI memory resources. */
469 	rid = PCIR_BAR(0);
470 	sc->bce_res = bus_alloc_resource_any(
471 		dev, 							/* dev */
472 		SYS_RES_MEMORY, 				/* type */
473 		&rid,							/* rid */
474 	    RF_ACTIVE | PCI_RF_DENSE);		/* flags */
475 
476 	if (sc->bce_res == NULL) {
477 		BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
478 			__FILE__, __LINE__);
479 		rc = ENXIO;
480 		goto bce_attach_fail;
481 	}
482 
483 	/* Get various resource handles. */
484 	sc->bce_btag    = rman_get_bustag(sc->bce_res);
485 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
486 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
487 
488 	/* Allocate PCI IRQ resources. */
489 	rid = 0;
490 	sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
491 	    RF_SHAREABLE | RF_ACTIVE);
492 
493 	if (sc->bce_irq == NULL) {
494 		BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
495 			__FILE__, __LINE__);
496 		rc = ENXIO;
497 		goto bce_attach_fail;
498 	}
499 
500 	/* Initialize mutex for the current device instance. */
501 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
502 
503 	/*
504 	 * Configure byte swap and enable indirect register access.
505 	 * Rely on CPU to do target byte swapping on big endian systems.
506 	 * Access to registers outside of PCI configurtion space are not
507 	 * valid until this is done.
508 	 */
509 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
510 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
511 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
512 
513 	/* Save ASIC revsion info. */
514 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
515 
516 	/* Weed out any non-production controller revisions. */
517 	switch(BCE_CHIP_ID(sc)) {
518 		case BCE_CHIP_ID_5706_A0:
519 		case BCE_CHIP_ID_5706_A1:
520 		case BCE_CHIP_ID_5708_A0:
521 		case BCE_CHIP_ID_5708_B0:
522 			BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
523 				__FILE__, __LINE__,
524 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
525 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
526 			rc = ENODEV;
527 			goto bce_attach_fail;
528 	}
529 
530 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
531 		BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
532 			__FILE__, __LINE__);
533 		rc = ENODEV;
534 		goto bce_attach_fail;
535 	}
536 
537 	/*
538 	 * The embedded PCIe to PCI-X bridge (EPB)
539 	 * in the 5708 cannot address memory above
540 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
541 	 */
542 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
543 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
544 	else
545 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
546 
547 	/*
548 	 * Find the base address for shared memory access.
549 	 * Newer versions of bootcode use a signature and offset
550 	 * while older versions use a fixed address.
551 	 */
552 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
553 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
554 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
555 	else
556 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
557 
558 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
559 
560 	/* Set initial device and PHY flags */
561 	sc->bce_flags = 0;
562 	sc->bce_phy_flags = 0;
563 
564 	/* Get PCI bus information (speed and type). */
565 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
566 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
567 		u32 clkreg;
568 
569 		sc->bce_flags |= BCE_PCIX_FLAG;
570 
571 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
572 
573 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
574 		switch (clkreg) {
575 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
576 			sc->bus_speed_mhz = 133;
577 			break;
578 
579 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
580 			sc->bus_speed_mhz = 100;
581 			break;
582 
583 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
584 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
585 			sc->bus_speed_mhz = 66;
586 			break;
587 
588 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
589 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
590 			sc->bus_speed_mhz = 50;
591 			break;
592 
593 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
594 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
595 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
596 			sc->bus_speed_mhz = 33;
597 			break;
598 		}
599 	} else {
600 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
601 			sc->bus_speed_mhz = 66;
602 		else
603 			sc->bus_speed_mhz = 33;
604 	}
605 
606 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
607 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
608 
609 	BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
610 		sc->bce_chipid,
611 		((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
613 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
614 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
615 		sc->bus_speed_mhz);
616 
617 	/* Reset the controller. */
618 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
619 		rc = ENXIO;
620 		goto bce_attach_fail;
621 	}
622 
623 	/* Initialize the controller. */
624 	if (bce_chipinit(sc)) {
625 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
626 			__FILE__, __LINE__);
627 		rc = ENXIO;
628 		goto bce_attach_fail;
629 	}
630 
631 	/* Perform NVRAM test. */
632 	if (bce_nvram_test(sc)) {
633 		BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
634 			__FILE__, __LINE__);
635 		rc = ENXIO;
636 		goto bce_attach_fail;
637 	}
638 
639 	/* Fetch the permanent Ethernet MAC address. */
640 	bce_get_mac_addr(sc);
641 
642 	/*
643 	 * Trip points control how many BDs
644 	 * should be ready before generating an
645 	 * interrupt while ticks control how long
646 	 * a BD can sit in the chain before
647 	 * generating an interrupt.  Set the default
648 	 * values for the RX and TX rings.
649 	 */
650 
651 #ifdef BCE_DRBUG
652 	/* Force more frequent interrupts. */
653 	sc->bce_tx_quick_cons_trip_int = 1;
654 	sc->bce_tx_quick_cons_trip     = 1;
655 	sc->bce_tx_ticks_int           = 0;
656 	sc->bce_tx_ticks               = 0;
657 
658 	sc->bce_rx_quick_cons_trip_int = 1;
659 	sc->bce_rx_quick_cons_trip     = 1;
660 	sc->bce_rx_ticks_int           = 0;
661 	sc->bce_rx_ticks               = 0;
662 #else
663 	sc->bce_tx_quick_cons_trip_int = 20;
664 	sc->bce_tx_quick_cons_trip     = 20;
665 	sc->bce_tx_ticks_int           = 80;
666 	sc->bce_tx_ticks               = 80;
667 
668 	sc->bce_rx_quick_cons_trip_int = 6;
669 	sc->bce_rx_quick_cons_trip     = 6;
670 	sc->bce_rx_ticks_int           = 18;
671 	sc->bce_rx_ticks               = 18;
672 #endif
673 
674 	/* Update statistics once every second. */
675 	sc->bce_stats_ticks = 1000000 & 0xffff00;
676 
677 	/*
678 	 * The copper based NetXtreme II controllers
679 	 * use an integrated PHY at address 1 while
680 	 * the SerDes controllers use a PHY at
681 	 * address 2.
682 	 */
683 	sc->bce_phy_addr = 1;
684 
685 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
686 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
687 		sc->bce_flags |= BCE_NO_WOL_FLAG;
688 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
689 			sc->bce_phy_addr = 2;
690 			val = REG_RD_IND(sc, sc->bce_shmem_base +
691 					 BCE_SHARED_HW_CFG_CONFIG);
692 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
693 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
694 		}
695 	}
696 
697 	/* Allocate DMA memory resources. */
698 	if (bce_dma_alloc(dev)) {
699 		BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
700 		    __FILE__, __LINE__);
701 		rc = ENXIO;
702 		goto bce_attach_fail;
703 	}
704 
705 	/* Allocate an ifnet structure. */
706 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
707 	if (ifp == NULL) {
708 		BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
709 			__FILE__, __LINE__);
710 		rc = ENXIO;
711 		goto bce_attach_fail;
712 	}
713 
714 	/* Initialize the ifnet interface. */
715 	ifp->if_softc        = sc;
716 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
717 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
718 	ifp->if_ioctl        = bce_ioctl;
719 	ifp->if_start        = bce_start;
720 	ifp->if_timer        = 0;
721 	ifp->if_watchdog     = bce_watchdog;
722 	ifp->if_init         = bce_init;
723 	ifp->if_mtu          = ETHERMTU;
724 	ifp->if_hwassist     = BCE_IF_HWASSIST;
725 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
726 	ifp->if_capenable    = ifp->if_capabilities;
727 
728 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
729 	sc->mbuf_alloc_size  = MCLBYTES;
730 #ifdef DEVICE_POLLING
731 	ifp->if_capabilities |= IFCAP_POLLING;
732 #endif
733 
734 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
735 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
736 		ifp->if_baudrate = IF_Gbps(2.5);
737 	else
738 		ifp->if_baudrate = IF_Gbps(1);
739 
740 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
741 	IFQ_SET_READY(&ifp->if_snd);
742 
743 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
744 		BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n",
745 			__FILE__, __LINE__);
746 		rc = ENODEV;
747 		goto bce_attach_fail;
748 	} else {
749 		/* Look for our PHY. */
750 		if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
751 			bce_ifmedia_sts)) {
752 			BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
753 				__FILE__, __LINE__);
754 			rc = ENXIO;
755 			goto bce_attach_fail;
756 		}
757 	}
758 
759 	/* Attach to the Ethernet interface list. */
760 	ether_ifattach(ifp, sc->eaddr);
761 
762 #if __FreeBSD_version < 500000
763 	callout_init(&sc->bce_stat_ch);
764 #else
765 	callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
766 #endif
767 
768 	/* Hookup IRQ last. */
769 	rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
770 	   bce_intr, sc, &sc->bce_intrhand);
771 
772 	if (rc) {
773 		BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
774 			__FILE__, __LINE__);
775 		bce_detach(dev);
776 		goto bce_attach_exit;
777 	}
778 
779 	/* Print some important debugging info. */
780 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
781 
782 	/* Add the supported sysctls to the kernel. */
783 	bce_add_sysctls(sc);
784 
785 	/* Get the firmware running so IPMI still works */
786 	BCE_LOCK(sc);
787 	bce_mgmt_init_locked(sc);
788 	BCE_UNLOCK(sc);
789 
790 	goto bce_attach_exit;
791 
792 bce_attach_fail:
793 	bce_release_resources(sc);
794 
795 bce_attach_exit:
796 
797 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
798 
799 	return(rc);
800 }
801 
802 
803 /****************************************************************************/
804 /* Device detach function.                                                  */
805 /*                                                                          */
806 /* Stops the controller, resets the controller, and releases resources.     */
807 /*                                                                          */
808 /* Returns:                                                                 */
809 /*   0 on success, positive value on failure.                               */
810 /****************************************************************************/
811 static int
812 bce_detach(device_t dev)
813 {
814 	struct bce_softc *sc;
815 	struct ifnet *ifp;
816 
817 	sc = device_get_softc(dev);
818 
819 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
820 
821 	ifp = sc->bce_ifp;
822 
823 #ifdef DEVICE_POLLING
824 	if (ifp->if_capenable & IFCAP_POLLING)
825 		ether_poll_deregister(ifp);
826 #endif
827 
828 	/* Stop and reset the controller. */
829 	BCE_LOCK(sc);
830 	bce_stop(sc);
831 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
832 	BCE_UNLOCK(sc);
833 
834 	ether_ifdetach(ifp);
835 
836 	/* If we have a child device on the MII bus remove it too. */
837 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
838 		ifmedia_removeall(&sc->bce_ifmedia);
839 	} else {
840 		bus_generic_detach(dev);
841 		device_delete_child(dev, sc->bce_miibus);
842 	}
843 
844 	/* Release all remaining resources. */
845 	bce_release_resources(sc);
846 
847 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
848 
849 	return(0);
850 }
851 
852 
853 /****************************************************************************/
854 /* Device shutdown function.                                                */
855 /*                                                                          */
856 /* Stops and resets the controller.                                         */
857 /*                                                                          */
858 /* Returns:                                                                 */
859 /*   Nothing                                                                */
860 /****************************************************************************/
861 static void
862 bce_shutdown(device_t dev)
863 {
864 	struct bce_softc *sc = device_get_softc(dev);
865 
866 	BCE_LOCK(sc);
867 	bce_stop(sc);
868 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
869 	BCE_UNLOCK(sc);
870 }
871 
872 
873 /****************************************************************************/
874 /* Indirect register read.                                                  */
875 /*                                                                          */
876 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
877 /* configuration space.  Using this mechanism avoids issues with posted     */
878 /* reads but is much slower than memory-mapped I/O.                         */
879 /*                                                                          */
880 /* Returns:                                                                 */
881 /*   The value of the register.                                             */
882 /****************************************************************************/
883 static u32
884 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
885 {
886 	device_t dev;
887 	dev = sc->bce_dev;
888 
889 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
890 #ifdef BCE_DEBUG
891 	{
892 		u32 val;
893 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
895 			__FUNCTION__, offset, val);
896 		return val;
897 	}
898 #else
899 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
900 #endif
901 }
902 
903 
904 /****************************************************************************/
905 /* Indirect register write.                                                 */
906 /*                                                                          */
907 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
908 /* configuration space.  Using this mechanism avoids issues with posted     */
909 /* writes but is muchh slower than memory-mapped I/O.                       */
910 /*                                                                          */
911 /* Returns:                                                                 */
912 /*   Nothing.                                                               */
913 /****************************************************************************/
914 static void
915 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
916 {
917 	device_t dev;
918 	dev = sc->bce_dev;
919 
920 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
921 		__FUNCTION__, offset, val);
922 
923 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
924 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
925 }
926 
927 
928 /****************************************************************************/
929 /* Context memory write.                                                    */
930 /*                                                                          */
931 /* The NetXtreme II controller uses context memory to track connection      */
932 /* information for L2 and higher network protocols.                         */
933 /*                                                                          */
934 /* Returns:                                                                 */
935 /*   Nothing.                                                               */
936 /****************************************************************************/
937 static void
938 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
939 {
940 
941 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
942 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
943 
944 	offset += cid_addr;
945 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
946 	REG_WR(sc, BCE_CTX_DATA, val);
947 }
948 
949 
950 /****************************************************************************/
951 /* PHY register read.                                                       */
952 /*                                                                          */
953 /* Implements register reads on the MII bus.                                */
954 /*                                                                          */
955 /* Returns:                                                                 */
956 /*   The value of the register.                                             */
957 /****************************************************************************/
958 static int
959 bce_miibus_read_reg(device_t dev, int phy, int reg)
960 {
961 	struct bce_softc *sc;
962 	u32 val;
963 	int i;
964 
965 	sc = device_get_softc(dev);
966 
967 	/* Make sure we are accessing the correct PHY address. */
968 	if (phy != sc->bce_phy_addr) {
969 		DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
970 		return(0);
971 	}
972 
973 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
974 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
975 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
976 
977 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
978 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
979 
980 		DELAY(40);
981 	}
982 
983 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
984 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
985 		BCE_EMAC_MDIO_COMM_START_BUSY;
986 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
987 
988 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
989 		DELAY(10);
990 
991 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
992 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
993 			DELAY(5);
994 
995 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 			val &= BCE_EMAC_MDIO_COMM_DATA;
997 
998 			break;
999 		}
1000 	}
1001 
1002 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1003 		BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1004 			__FILE__, __LINE__, phy, reg);
1005 		val = 0x0;
1006 	} else {
1007 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1008 	}
1009 
1010 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1011 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1012 
1013 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1014 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1015 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1016 
1017 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1018 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1019 
1020 		DELAY(40);
1021 	}
1022 
1023 	return (val & 0xffff);
1024 
1025 }
1026 
1027 
1028 /****************************************************************************/
1029 /* PHY register write.                                                      */
1030 /*                                                                          */
1031 /* Implements register writes on the MII bus.                               */
1032 /*                                                                          */
1033 /* Returns:                                                                 */
1034 /*   The value of the register.                                             */
1035 /****************************************************************************/
1036 static int
1037 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1038 {
1039 	struct bce_softc *sc;
1040 	u32 val1;
1041 	int i;
1042 
1043 	sc = device_get_softc(dev);
1044 
1045 	/* Make sure we are accessing the correct PHY address. */
1046 	if (phy != sc->bce_phy_addr) {
1047 		DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1048 		return(0);
1049 	}
1050 
1051 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1052 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1053 
1054 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1055 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1056 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1057 
1058 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1059 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1060 
1061 		DELAY(40);
1062 	}
1063 
1064 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1065 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1066 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1067 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1068 
1069 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1070 		DELAY(10);
1071 
1072 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1073 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1074 			DELAY(5);
1075 			break;
1076 		}
1077 	}
1078 
1079 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1080 		BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1081 			__FILE__, __LINE__);
1082 
1083 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1084 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1085 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1086 
1087 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1088 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1089 
1090 		DELAY(40);
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 
1097 /****************************************************************************/
1098 /* MII bus status change.                                                   */
1099 /*                                                                          */
1100 /* Called by the MII bus driver when the PHY establishes link to set the    */
1101 /* MAC interface registers.                                                 */
1102 /*                                                                          */
1103 /* Returns:                                                                 */
1104 /*   Nothing.                                                               */
1105 /****************************************************************************/
1106 static void
1107 bce_miibus_statchg(device_t dev)
1108 {
1109 	struct bce_softc *sc;
1110 	struct mii_data *mii;
1111 
1112 	sc = device_get_softc(dev);
1113 
1114 	mii = device_get_softc(sc->bce_miibus);
1115 
1116 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1117 
1118 	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1119 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1120 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1121 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1122 	} else {
1123 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1124 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1125 	}
1126 
1127 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1128 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1129 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1130 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1131 	} else {
1132 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1133 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1134 	}
1135 }
1136 
1137 
1138 /****************************************************************************/
1139 /* Acquire NVRAM lock.                                                      */
1140 /*                                                                          */
1141 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1142 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1143 /* for use by the driver.                                                   */
1144 /*                                                                          */
1145 /* Returns:                                                                 */
1146 /*   0 on success, positive value on failure.                               */
1147 /****************************************************************************/
1148 static int
1149 bce_acquire_nvram_lock(struct bce_softc *sc)
1150 {
1151 	u32 val;
1152 	int j;
1153 
1154 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1155 
1156 	/* Request access to the flash interface. */
1157 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1158 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1159 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1160 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1161 			break;
1162 
1163 		DELAY(5);
1164 	}
1165 
1166 	if (j >= NVRAM_TIMEOUT_COUNT) {
1167 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1168 		return EBUSY;
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 
1175 /****************************************************************************/
1176 /* Release NVRAM lock.                                                      */
1177 /*                                                                          */
1178 /* When the caller is finished accessing NVRAM the lock must be released.   */
1179 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1180 /* for use by the driver.                                                   */
1181 /*                                                                          */
1182 /* Returns:                                                                 */
1183 /*   0 on success, positive value on failure.                               */
1184 /****************************************************************************/
1185 static int
1186 bce_release_nvram_lock(struct bce_softc *sc)
1187 {
1188 	int j;
1189 	u32 val;
1190 
1191 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1192 
1193 	/*
1194 	 * Relinquish nvram interface.
1195 	 */
1196 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1197 
1198 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1199 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1200 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1201 			break;
1202 
1203 		DELAY(5);
1204 	}
1205 
1206 	if (j >= NVRAM_TIMEOUT_COUNT) {
1207 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1208 		return EBUSY;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 
1215 #ifdef BCE_NVRAM_WRITE_SUPPORT
1216 /****************************************************************************/
1217 /* Enable NVRAM write access.                                               */
1218 /*                                                                          */
1219 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1220 /*                                                                          */
1221 /* Returns:                                                                 */
1222 /*   0 on success, positive value on failure.                               */
1223 /****************************************************************************/
1224 static int
1225 bce_enable_nvram_write(struct bce_softc *sc)
1226 {
1227 	u32 val;
1228 
1229 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1230 
1231 	val = REG_RD(sc, BCE_MISC_CFG);
1232 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1233 
1234 	if (!sc->bce_flash_info->buffered) {
1235 		int j;
1236 
1237 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1238 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1239 
1240 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1241 			DELAY(5);
1242 
1243 			val = REG_RD(sc, BCE_NVM_COMMAND);
1244 			if (val & BCE_NVM_COMMAND_DONE)
1245 				break;
1246 		}
1247 
1248 		if (j >= NVRAM_TIMEOUT_COUNT) {
1249 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1250 			return EBUSY;
1251 		}
1252 	}
1253 	return 0;
1254 }
1255 
1256 
1257 /****************************************************************************/
1258 /* Disable NVRAM write access.                                              */
1259 /*                                                                          */
1260 /* When the caller is finished writing to NVRAM write access must be        */
1261 /* disabled.                                                                */
1262 /*                                                                          */
1263 /* Returns:                                                                 */
1264 /*   Nothing.                                                               */
1265 /****************************************************************************/
1266 static void
1267 bce_disable_nvram_write(struct bce_softc *sc)
1268 {
1269 	u32 val;
1270 
1271 	DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1272 
1273 	val = REG_RD(sc, BCE_MISC_CFG);
1274 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1275 }
1276 #endif
1277 
1278 
1279 /****************************************************************************/
1280 /* Enable NVRAM access.                                                     */
1281 /*                                                                          */
1282 /* Before accessing NVRAM for read or write operations the caller must      */
1283 /* enabled NVRAM access.                                                    */
1284 /*                                                                          */
1285 /* Returns:                                                                 */
1286 /*   Nothing.                                                               */
1287 /****************************************************************************/
1288 static void
1289 bce_enable_nvram_access(struct bce_softc *sc)
1290 {
1291 	u32 val;
1292 
1293 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1294 
1295 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1296 	/* Enable both bits, even on read. */
1297 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1298 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1299 }
1300 
1301 
1302 /****************************************************************************/
1303 /* Disable NVRAM access.                                                    */
1304 /*                                                                          */
1305 /* When the caller is finished accessing NVRAM access must be disabled.     */
1306 /*                                                                          */
1307 /* Returns:                                                                 */
1308 /*   Nothing.                                                               */
1309 /****************************************************************************/
1310 static void
1311 bce_disable_nvram_access(struct bce_softc *sc)
1312 {
1313 	u32 val;
1314 
1315 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1316 
1317 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1318 
1319 	/* Disable both bits, even after read. */
1320 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1321 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1322 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1323 }
1324 
1325 
1326 #ifdef BCE_NVRAM_WRITE_SUPPORT
1327 /****************************************************************************/
1328 /* Erase NVRAM page before writing.                                         */
1329 /*                                                                          */
1330 /* Non-buffered flash parts require that a page be erased before it is      */
1331 /* written.                                                                 */
1332 /*                                                                          */
1333 /* Returns:                                                                 */
1334 /*   0 on success, positive value on failure.                               */
1335 /****************************************************************************/
1336 static int
1337 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1338 {
1339 	u32 cmd;
1340 	int j;
1341 
1342 	/* Buffered flash doesn't require an erase. */
1343 	if (sc->bce_flash_info->buffered)
1344 		return 0;
1345 
1346 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1347 
1348 	/* Build an erase command. */
1349 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1350 	      BCE_NVM_COMMAND_DOIT;
1351 
1352 	/*
1353 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1354 	 * and issue the erase command.
1355 	 */
1356 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1357 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1358 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1359 
1360 	/* Wait for completion. */
1361 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1362 		u32 val;
1363 
1364 		DELAY(5);
1365 
1366 		val = REG_RD(sc, BCE_NVM_COMMAND);
1367 		if (val & BCE_NVM_COMMAND_DONE)
1368 			break;
1369 	}
1370 
1371 	if (j >= NVRAM_TIMEOUT_COUNT) {
1372 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1373 		return EBUSY;
1374 	}
1375 
1376 	return 0;
1377 }
1378 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1379 
1380 
1381 /****************************************************************************/
1382 /* Read a dword (32 bits) from NVRAM.                                       */
1383 /*                                                                          */
1384 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1385 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1386 /*                                                                          */
1387 /* Returns:                                                                 */
1388 /*   0 on success and the 32 bit value read, positive value on failure.     */
1389 /****************************************************************************/
1390 static int
1391 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1392 							u32 cmd_flags)
1393 {
1394 	u32 cmd;
1395 	int i, rc = 0;
1396 
1397 	/* Build the command word. */
1398 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1399 
1400 	/* Calculate the offset for buffered flash. */
1401 	if (sc->bce_flash_info->buffered) {
1402 		offset = ((offset / sc->bce_flash_info->page_size) <<
1403 			   sc->bce_flash_info->page_bits) +
1404 			  (offset % sc->bce_flash_info->page_size);
1405 	}
1406 
1407 	/*
1408 	 * Clear the DONE bit separately, set the address to read,
1409 	 * and issue the read.
1410 	 */
1411 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1412 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1413 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1414 
1415 	/* Wait for completion. */
1416 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1417 		u32 val;
1418 
1419 		DELAY(5);
1420 
1421 		val = REG_RD(sc, BCE_NVM_COMMAND);
1422 		if (val & BCE_NVM_COMMAND_DONE) {
1423 			val = REG_RD(sc, BCE_NVM_READ);
1424 
1425 			val = bce_be32toh(val);
1426 			memcpy(ret_val, &val, 4);
1427 			break;
1428 		}
1429 	}
1430 
1431 	/* Check for errors. */
1432 	if (i >= NVRAM_TIMEOUT_COUNT) {
1433 		BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1434 			__FILE__, __LINE__, offset);
1435 		rc = EBUSY;
1436 	}
1437 
1438 	return(rc);
1439 }
1440 
1441 
1442 #ifdef BCE_NVRAM_WRITE_SUPPORT
1443 /****************************************************************************/
1444 /* Write a dword (32 bits) to NVRAM.                                        */
1445 /*                                                                          */
1446 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1447 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1448 /* enabled NVRAM write access.                                              */
1449 /*                                                                          */
1450 /* Returns:                                                                 */
1451 /*   0 on success, positive value on failure.                               */
1452 /****************************************************************************/
1453 static int
1454 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1455 	u32 cmd_flags)
1456 {
1457 	u32 cmd, val32;
1458 	int j;
1459 
1460 	/* Build the command word. */
1461 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1462 
1463 	/* Calculate the offset for buffered flash. */
1464 	if (sc->bce_flash_info->buffered) {
1465 		offset = ((offset / sc->bce_flash_info->page_size) <<
1466 			  sc->bce_flash_info->page_bits) +
1467 			 (offset % sc->bce_flash_info->page_size);
1468 	}
1469 
1470 	/*
1471 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1472 	 * set the NVRAM address to write, and issue the write command
1473 	 */
1474 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1475 	memcpy(&val32, val, 4);
1476 	val32 = htobe32(val32);
1477 	REG_WR(sc, BCE_NVM_WRITE, val32);
1478 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1479 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1480 
1481 	/* Wait for completion. */
1482 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1483 		DELAY(5);
1484 
1485 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1486 			break;
1487 	}
1488 	if (j >= NVRAM_TIMEOUT_COUNT) {
1489 		BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1490 			__FILE__, __LINE__, offset);
1491 		return EBUSY;
1492 	}
1493 
1494 	return 0;
1495 }
1496 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1497 
1498 
1499 /****************************************************************************/
1500 /* Initialize NVRAM access.                                                 */
1501 /*                                                                          */
1502 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1503 /* access that device.                                                      */
1504 /*                                                                          */
1505 /* Returns:                                                                 */
1506 /*   0 on success, positive value on failure.                               */
1507 /****************************************************************************/
1508 static int
1509 bce_init_nvram(struct bce_softc *sc)
1510 {
1511 	u32 val;
1512 	int j, entry_count, rc;
1513 	struct flash_spec *flash;
1514 
1515 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1516 
1517 	/* Determine the selected interface. */
1518 	val = REG_RD(sc, BCE_NVM_CFG1);
1519 
1520 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1521 
1522 	rc = 0;
1523 
1524 	/*
1525 	 * Flash reconfiguration is required to support additional
1526 	 * NVRAM devices not directly supported in hardware.
1527 	 * Check if the flash interface was reconfigured
1528 	 * by the bootcode.
1529 	 */
1530 
1531 	if (val & 0x40000000) {
1532 		/* Flash interface reconfigured by bootcode. */
1533 
1534 		DBPRINT(sc,BCE_INFO_LOAD,
1535 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1536 
1537 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1538 		     j++, flash++) {
1539 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1540 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1541 				sc->bce_flash_info = flash;
1542 				break;
1543 			}
1544 		}
1545 	} else {
1546 		/* Flash interface not yet reconfigured. */
1547 		u32 mask;
1548 
1549 		DBPRINT(sc,BCE_INFO_LOAD,
1550 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1551 
1552 		if (val & (1 << 23))
1553 			mask = FLASH_BACKUP_STRAP_MASK;
1554 		else
1555 			mask = FLASH_STRAP_MASK;
1556 
1557 		/* Look for the matching NVRAM device configuration data. */
1558 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1559 
1560 			/* Check if the device matches any of the known devices. */
1561 			if ((val & mask) == (flash->strapping & mask)) {
1562 				/* Found a device match. */
1563 				sc->bce_flash_info = flash;
1564 
1565 				/* Request access to the flash interface. */
1566 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1567 					return rc;
1568 
1569 				/* Reconfigure the flash interface. */
1570 				bce_enable_nvram_access(sc);
1571 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1572 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1573 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1574 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1575 				bce_disable_nvram_access(sc);
1576 				bce_release_nvram_lock(sc);
1577 
1578 				break;
1579 			}
1580 		}
1581 	}
1582 
1583 	/* Check if a matching device was found. */
1584 	if (j == entry_count) {
1585 		sc->bce_flash_info = NULL;
1586 		BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1587 			__FILE__, __LINE__);
1588 		rc = ENODEV;
1589 	}
1590 
1591 	/* Write the flash config data to the shared memory interface. */
1592 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1593 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1594 	if (val)
1595 		sc->bce_flash_size = val;
1596 	else
1597 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1598 
1599 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1600 		sc->bce_flash_info->total_size);
1601 
1602 	DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1603 
1604 	return rc;
1605 }
1606 
1607 
1608 /****************************************************************************/
1609 /* Read an arbitrary range of data from NVRAM.                              */
1610 /*                                                                          */
1611 /* Prepares the NVRAM interface for access and reads the requested data     */
1612 /* into the supplied buffer.                                                */
1613 /*                                                                          */
1614 /* Returns:                                                                 */
1615 /*   0 on success and the data read, positive value on failure.             */
1616 /****************************************************************************/
1617 static int
1618 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1619 	int buf_size)
1620 {
1621 	int rc = 0;
1622 	u32 cmd_flags, offset32, len32, extra;
1623 
1624 	if (buf_size == 0)
1625 		return 0;
1626 
1627 	/* Request access to the flash interface. */
1628 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1629 		return rc;
1630 
1631 	/* Enable access to flash interface */
1632 	bce_enable_nvram_access(sc);
1633 
1634 	len32 = buf_size;
1635 	offset32 = offset;
1636 	extra = 0;
1637 
1638 	cmd_flags = 0;
1639 
1640 	if (offset32 & 3) {
1641 		u8 buf[4];
1642 		u32 pre_len;
1643 
1644 		offset32 &= ~3;
1645 		pre_len = 4 - (offset & 3);
1646 
1647 		if (pre_len >= len32) {
1648 			pre_len = len32;
1649 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1650 		}
1651 		else {
1652 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1653 		}
1654 
1655 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1656 
1657 		if (rc)
1658 			return rc;
1659 
1660 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1661 
1662 		offset32 += 4;
1663 		ret_buf += pre_len;
1664 		len32 -= pre_len;
1665 	}
1666 
1667 	if (len32 & 3) {
1668 		extra = 4 - (len32 & 3);
1669 		len32 = (len32 + 4) & ~3;
1670 	}
1671 
1672 	if (len32 == 4) {
1673 		u8 buf[4];
1674 
1675 		if (cmd_flags)
1676 			cmd_flags = BCE_NVM_COMMAND_LAST;
1677 		else
1678 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1679 				    BCE_NVM_COMMAND_LAST;
1680 
1681 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1682 
1683 		memcpy(ret_buf, buf, 4 - extra);
1684 	}
1685 	else if (len32 > 0) {
1686 		u8 buf[4];
1687 
1688 		/* Read the first word. */
1689 		if (cmd_flags)
1690 			cmd_flags = 0;
1691 		else
1692 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1693 
1694 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1695 
1696 		/* Advance to the next dword. */
1697 		offset32 += 4;
1698 		ret_buf += 4;
1699 		len32 -= 4;
1700 
1701 		while (len32 > 4 && rc == 0) {
1702 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1703 
1704 			/* Advance to the next dword. */
1705 			offset32 += 4;
1706 			ret_buf += 4;
1707 			len32 -= 4;
1708 		}
1709 
1710 		if (rc)
1711 			return rc;
1712 
1713 		cmd_flags = BCE_NVM_COMMAND_LAST;
1714 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1715 
1716 		memcpy(ret_buf, buf, 4 - extra);
1717 	}
1718 
1719 	/* Disable access to flash interface and release the lock. */
1720 	bce_disable_nvram_access(sc);
1721 	bce_release_nvram_lock(sc);
1722 
1723 	return rc;
1724 }
1725 
1726 
1727 #ifdef BCE_NVRAM_WRITE_SUPPORT
1728 /****************************************************************************/
1729 /* Write an arbitrary range of data from NVRAM.                             */
1730 /*                                                                          */
1731 /* Prepares the NVRAM interface for write access and writes the requested   */
1732 /* data from the supplied buffer.  The caller is responsible for            */
1733 /* calculating any appropriate CRCs.                                        */
1734 /*                                                                          */
1735 /* Returns:                                                                 */
1736 /*   0 on success, positive value on failure.                               */
1737 /****************************************************************************/
1738 static int
1739 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1740 	int buf_size)
1741 {
1742 	u32 written, offset32, len32;
1743 	u8 *buf, start[4], end[4];
1744 	int rc = 0;
1745 	int align_start, align_end;
1746 
1747 	buf = data_buf;
1748 	offset32 = offset;
1749 	len32 = buf_size;
1750 	align_start = align_end = 0;
1751 
1752 	if ((align_start = (offset32 & 3))) {
1753 		offset32 &= ~3;
1754 		len32 += align_start;
1755 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1756 			return rc;
1757 	}
1758 
1759 	if (len32 & 3) {
1760 	       	if ((len32 > 4) || !align_start) {
1761 			align_end = 4 - (len32 & 3);
1762 			len32 += align_end;
1763 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1764 				end, 4))) {
1765 				return rc;
1766 			}
1767 		}
1768 	}
1769 
1770 	if (align_start || align_end) {
1771 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1772 		if (buf == 0)
1773 			return ENOMEM;
1774 		if (align_start) {
1775 			memcpy(buf, start, 4);
1776 		}
1777 		if (align_end) {
1778 			memcpy(buf + len32 - 4, end, 4);
1779 		}
1780 		memcpy(buf + align_start, data_buf, buf_size);
1781 	}
1782 
1783 	written = 0;
1784 	while ((written < len32) && (rc == 0)) {
1785 		u32 page_start, page_end, data_start, data_end;
1786 		u32 addr, cmd_flags;
1787 		int i;
1788 		u8 flash_buffer[264];
1789 
1790 	    /* Find the page_start addr */
1791 		page_start = offset32 + written;
1792 		page_start -= (page_start % sc->bce_flash_info->page_size);
1793 		/* Find the page_end addr */
1794 		page_end = page_start + sc->bce_flash_info->page_size;
1795 		/* Find the data_start addr */
1796 		data_start = (written == 0) ? offset32 : page_start;
1797 		/* Find the data_end addr */
1798 		data_end = (page_end > offset32 + len32) ?
1799 			(offset32 + len32) : page_end;
1800 
1801 		/* Request access to the flash interface. */
1802 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1803 			goto nvram_write_end;
1804 
1805 		/* Enable access to flash interface */
1806 		bce_enable_nvram_access(sc);
1807 
1808 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1809 		if (sc->bce_flash_info->buffered == 0) {
1810 			int j;
1811 
1812 			/* Read the whole page into the buffer
1813 			 * (non-buffer flash only) */
1814 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1815 				if (j == (sc->bce_flash_info->page_size - 4)) {
1816 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1817 				}
1818 				rc = bce_nvram_read_dword(sc,
1819 					page_start + j,
1820 					&flash_buffer[j],
1821 					cmd_flags);
1822 
1823 				if (rc)
1824 					goto nvram_write_end;
1825 
1826 				cmd_flags = 0;
1827 			}
1828 		}
1829 
1830 		/* Enable writes to flash interface (unlock write-protect) */
1831 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1832 			goto nvram_write_end;
1833 
1834 		/* Erase the page */
1835 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1836 			goto nvram_write_end;
1837 
1838 		/* Re-enable the write again for the actual write */
1839 		bce_enable_nvram_write(sc);
1840 
1841 		/* Loop to write back the buffer data from page_start to
1842 		 * data_start */
1843 		i = 0;
1844 		if (sc->bce_flash_info->buffered == 0) {
1845 			for (addr = page_start; addr < data_start;
1846 				addr += 4, i += 4) {
1847 
1848 				rc = bce_nvram_write_dword(sc, addr,
1849 					&flash_buffer[i], cmd_flags);
1850 
1851 				if (rc != 0)
1852 					goto nvram_write_end;
1853 
1854 				cmd_flags = 0;
1855 			}
1856 		}
1857 
1858 		/* Loop to write the new data from data_start to data_end */
1859 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1860 			if ((addr == page_end - 4) ||
1861 				((sc->bce_flash_info->buffered) &&
1862 				 (addr == data_end - 4))) {
1863 
1864 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1865 			}
1866 			rc = bce_nvram_write_dword(sc, addr, buf,
1867 				cmd_flags);
1868 
1869 			if (rc != 0)
1870 				goto nvram_write_end;
1871 
1872 			cmd_flags = 0;
1873 			buf += 4;
1874 		}
1875 
1876 		/* Loop to write back the buffer data from data_end
1877 		 * to page_end */
1878 		if (sc->bce_flash_info->buffered == 0) {
1879 			for (addr = data_end; addr < page_end;
1880 				addr += 4, i += 4) {
1881 
1882 				if (addr == page_end-4) {
1883 					cmd_flags = BCE_NVM_COMMAND_LAST;
1884                 		}
1885 				rc = bce_nvram_write_dword(sc, addr,
1886 					&flash_buffer[i], cmd_flags);
1887 
1888 				if (rc != 0)
1889 					goto nvram_write_end;
1890 
1891 				cmd_flags = 0;
1892 			}
1893 		}
1894 
1895 		/* Disable writes to flash interface (lock write-protect) */
1896 		bce_disable_nvram_write(sc);
1897 
1898 		/* Disable access to flash interface */
1899 		bce_disable_nvram_access(sc);
1900 		bce_release_nvram_lock(sc);
1901 
1902 		/* Increment written */
1903 		written += data_end - data_start;
1904 	}
1905 
1906 nvram_write_end:
1907 	if (align_start || align_end)
1908 		free(buf, M_DEVBUF);
1909 
1910 	return rc;
1911 }
1912 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1913 
1914 
1915 /****************************************************************************/
1916 /* Verifies that NVRAM is accessible and contains valid data.               */
1917 /*                                                                          */
1918 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1919 /* correct.                                                                 */
1920 /*                                                                          */
1921 /* Returns:                                                                 */
1922 /*   0 on success, positive value on failure.                               */
1923 /****************************************************************************/
1924 static int
1925 bce_nvram_test(struct bce_softc *sc)
1926 {
1927 	u32 buf[BCE_NVRAM_SIZE / 4];
1928 	u8 *data = (u8 *) buf;
1929 	int rc = 0;
1930 	u32 magic, csum;
1931 
1932 
1933 	/*
1934 	 * Check that the device NVRAM is valid by reading
1935 	 * the magic value at offset 0.
1936 	 */
1937 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1938 		goto bce_nvram_test_done;
1939 
1940 
1941     magic = bce_be32toh(buf[0]);
1942 	if (magic != BCE_NVRAM_MAGIC) {
1943 		rc = ENODEV;
1944 		BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1945 			"Found: 0x%08X\n",
1946 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1947 		goto bce_nvram_test_done;
1948 	}
1949 
1950 	/*
1951 	 * Verify that the device NVRAM includes valid
1952 	 * configuration data.
1953 	 */
1954 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1955 		goto bce_nvram_test_done;
1956 
1957 	csum = ether_crc32_le(data, 0x100);
1958 	if (csum != BCE_CRC32_RESIDUAL) {
1959 		rc = ENODEV;
1960 		BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1961 			"Expected: 0x%08X, Found: 0x%08X\n",
1962 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1963 		goto bce_nvram_test_done;
1964 	}
1965 
1966 	csum = ether_crc32_le(data + 0x100, 0x100);
1967 	if (csum != BCE_CRC32_RESIDUAL) {
1968 		BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1969 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1970 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1971 		rc = ENODEV;
1972 	}
1973 
1974 bce_nvram_test_done:
1975 	return rc;
1976 }
1977 
1978 
1979 /****************************************************************************/
1980 /* Free any DMA memory owned by the driver.                                 */
1981 /*                                                                          */
1982 /* Scans through each data structre that requires DMA memory and frees      */
1983 /* the memory if allocated.                                                 */
1984 /*                                                                          */
1985 /* Returns:                                                                 */
1986 /*   Nothing.                                                               */
1987 /****************************************************************************/
1988 static void
1989 bce_dma_free(struct bce_softc *sc)
1990 {
1991 	int i;
1992 
1993 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1994 
1995 	/* Destroy the status block. */
1996 	if (sc->status_block != NULL)
1997 		bus_dmamem_free(
1998 			sc->status_tag,
1999 		    sc->status_block,
2000 		    sc->status_map);
2001 
2002 	if (sc->status_map != NULL) {
2003 		bus_dmamap_unload(
2004 			sc->status_tag,
2005 		    sc->status_map);
2006 		bus_dmamap_destroy(sc->status_tag,
2007 		    sc->status_map);
2008 	}
2009 
2010 	if (sc->status_tag != NULL)
2011 		bus_dma_tag_destroy(sc->status_tag);
2012 
2013 
2014 	/* Destroy the statistics block. */
2015 	if (sc->stats_block != NULL)
2016 		bus_dmamem_free(
2017 			sc->stats_tag,
2018 		    sc->stats_block,
2019 		    sc->stats_map);
2020 
2021 	if (sc->stats_map != NULL) {
2022 		bus_dmamap_unload(
2023 			sc->stats_tag,
2024 		    sc->stats_map);
2025 		bus_dmamap_destroy(sc->stats_tag,
2026 		    sc->stats_map);
2027 	}
2028 
2029 	if (sc->stats_tag != NULL)
2030 		bus_dma_tag_destroy(sc->stats_tag);
2031 
2032 
2033 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2034 	for (i = 0; i < TX_PAGES; i++ ) {
2035 		if (sc->tx_bd_chain[i] != NULL)
2036 			bus_dmamem_free(
2037 				sc->tx_bd_chain_tag,
2038 			    sc->tx_bd_chain[i],
2039 			    sc->tx_bd_chain_map[i]);
2040 
2041 		if (sc->tx_bd_chain_map[i] != NULL) {
2042 			bus_dmamap_unload(
2043 				sc->tx_bd_chain_tag,
2044 		    	sc->tx_bd_chain_map[i]);
2045 			bus_dmamap_destroy(
2046 				sc->tx_bd_chain_tag,
2047 			    sc->tx_bd_chain_map[i]);
2048 		}
2049 
2050 	}
2051 
2052 	/* Destroy the TX buffer descriptor tag. */
2053 	if (sc->tx_bd_chain_tag != NULL)
2054 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2055 
2056 
2057 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2058 	for (i = 0; i < RX_PAGES; i++ ) {
2059 		if (sc->rx_bd_chain[i] != NULL)
2060 			bus_dmamem_free(
2061 				sc->rx_bd_chain_tag,
2062 			    sc->rx_bd_chain[i],
2063 			    sc->rx_bd_chain_map[i]);
2064 
2065 		if (sc->rx_bd_chain_map[i] != NULL) {
2066 			bus_dmamap_unload(
2067 				sc->rx_bd_chain_tag,
2068 		    	sc->rx_bd_chain_map[i]);
2069 			bus_dmamap_destroy(
2070 				sc->rx_bd_chain_tag,
2071 			    sc->rx_bd_chain_map[i]);
2072 		}
2073 	}
2074 
2075 	/* Destroy the RX buffer descriptor tag. */
2076 	if (sc->rx_bd_chain_tag != NULL)
2077 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2078 
2079 
2080 	/* Unload and destroy the TX mbuf maps. */
2081 	for (i = 0; i < TOTAL_TX_BD; i++) {
2082 		if (sc->tx_mbuf_map[i] != NULL) {
2083 			bus_dmamap_unload(sc->tx_mbuf_tag,
2084 				sc->tx_mbuf_map[i]);
2085 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2086 	 			sc->tx_mbuf_map[i]);
2087 		}
2088 	}
2089 
2090 	/* Destroy the TX mbuf tag. */
2091 	if (sc->tx_mbuf_tag != NULL)
2092 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2093 
2094 
2095 	/* Unload and destroy the RX mbuf maps. */
2096 	for (i = 0; i < TOTAL_RX_BD; i++) {
2097 		if (sc->rx_mbuf_map[i] != NULL) {
2098 			bus_dmamap_unload(sc->rx_mbuf_tag,
2099 				sc->rx_mbuf_map[i]);
2100 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2101 	 			sc->rx_mbuf_map[i]);
2102 		}
2103 	}
2104 
2105 	/* Destroy the RX mbuf tag. */
2106 	if (sc->rx_mbuf_tag != NULL)
2107 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2108 
2109 
2110 	/* Destroy the parent tag */
2111 	if (sc->parent_tag != NULL)
2112 		bus_dma_tag_destroy(sc->parent_tag);
2113 
2114 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2115 
2116 }
2117 
2118 
2119 /****************************************************************************/
2120 /* Get DMA memory from the OS.                                              */
2121 /*                                                                          */
2122 /* Validates that the OS has provided DMA buffers in response to a          */
2123 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2124 /* When the callback is used the OS will return 0 for the mapping function  */
2125 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2126 /* failures back to the caller.                                             */
2127 /*                                                                          */
2128 /* Returns:                                                                 */
2129 /*   Nothing.                                                               */
2130 /****************************************************************************/
2131 static void
2132 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2133 {
2134 	struct bce_dmamap_arg *map_arg = arg;
2135 	struct bce_softc *sc = map_arg->sc;
2136 
2137 	/* Simulate a mapping failure. */
2138 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2139 		BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2140 			__FILE__, __LINE__);
2141 		error = ENOMEM);
2142 
2143 	/* Check for an error and signal the caller that an error occurred. */
2144 	if (error || (nseg > map_arg->maxsegs)) {
2145 		BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2146 		"nseg = %d, maxsegs = %d\n",
2147 			__FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2148 		map_arg->maxsegs = 0;
2149 		goto bce_dma_map_addr_exit;
2150 	}
2151 
2152 	map_arg->busaddr = segs->ds_addr;
2153 
2154 bce_dma_map_addr_exit:
2155 	return;
2156 }
2157 
2158 
2159 /****************************************************************************/
2160 /* Map TX buffers into TX buffer descriptors.                               */
2161 /*                                                                          */
2162 /* Given a series of DMA memory containting an outgoing frame, map the      */
2163 /* segments into the tx_bd structure used by the hardware.                  */
2164 /*                                                                          */
2165 /* Returns:                                                                 */
2166 /*   Nothing.                                                               */
2167 /****************************************************************************/
2168 static void
2169 bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2170 	int nseg, bus_size_t mapsize, int error)
2171 {
2172 	struct bce_dmamap_arg *map_arg;
2173 	struct bce_softc *sc;
2174 	struct tx_bd *txbd = NULL;
2175 	int i = 0;
2176 	u16 prod, chain_prod;
2177 	u32	prod_bseq;
2178 #ifdef BCE_DEBUG
2179 	u16 debug_prod;
2180 #endif
2181 
2182 	map_arg = arg;
2183 	sc = map_arg->sc;
2184 
2185 	if (error) {
2186 		DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2187 			__FUNCTION__, error);
2188 		return;
2189 	}
2190 
2191 	/* Signal error to caller if there's too many segments */
2192 	if (nseg > map_arg->maxsegs) {
2193 		DBPRINT(sc, BCE_WARN,
2194 			"%s(): Mapped TX descriptors: max segs = %d, "
2195 			"actual segs = %d\n",
2196 			__FUNCTION__, map_arg->maxsegs, nseg);
2197 
2198 		map_arg->maxsegs = 0;
2199 		return;
2200 	}
2201 
2202 	/* prod points to an empty tx_bd at this point. */
2203 	prod       = map_arg->prod;
2204 	chain_prod = map_arg->chain_prod;
2205 	prod_bseq  = map_arg->prod_bseq;
2206 
2207 #ifdef BCE_DEBUG
2208 	debug_prod = chain_prod;
2209 #endif
2210 
2211 	DBPRINT(sc, BCE_INFO_SEND,
2212 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2213 		"prod_bseq = 0x%08X\n",
2214 		__FUNCTION__, prod, chain_prod, prod_bseq);
2215 
2216 	/*
2217 	 * Cycle through each mbuf segment that makes up
2218 	 * the outgoing frame, gathering the mapping info
2219 	 * for that segment and creating a tx_bd to for
2220 	 * the mbuf.
2221 	 */
2222 
2223 	txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2224 
2225 	/* Setup the first tx_bd for the first segment. */
2226 	txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2227 	txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2228 	txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2229 	txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2230 			TX_BD_FLAGS_START);
2231 	prod_bseq += segs[i].ds_len;
2232 
2233 	/* Setup any remaing segments. */
2234 	for (i = 1; i < nseg; i++) {
2235 		prod       = NEXT_TX_BD(prod);
2236 		chain_prod = TX_CHAIN_IDX(prod);
2237 
2238 		txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2239 
2240 		txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2241 		txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2242 		txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2243 		txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2244 
2245 		prod_bseq += segs[i].ds_len;
2246 	}
2247 
2248 	/* Set the END flag on the last TX buffer descriptor. */
2249 	txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2250 
2251 	DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2252 
2253 	DBPRINT(sc, BCE_INFO_SEND,
2254 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
2255 		"prod_bseq = 0x%08X\n",
2256 		__FUNCTION__, prod, chain_prod, prod_bseq);
2257 
2258 	/* prod points to the last tx_bd at this point. */
2259 	map_arg->maxsegs    = nseg;
2260 	map_arg->prod       = prod;
2261 	map_arg->chain_prod = chain_prod;
2262 	map_arg->prod_bseq  = prod_bseq;
2263 }
2264 
2265 
2266 /****************************************************************************/
2267 /* Allocate any DMA memory needed by the driver.                            */
2268 /*                                                                          */
2269 /* Allocates DMA memory needed for the various global structures needed by  */
2270 /* hardware.                                                                */
2271 /*                                                                          */
2272 /* Returns:                                                                 */
2273 /*   0 for success, positive value for failure.                             */
2274 /****************************************************************************/
2275 static int
2276 bce_dma_alloc(device_t dev)
2277 {
2278 	struct bce_softc *sc;
2279 	int i, error, rc = 0;
2280 	struct bce_dmamap_arg map_arg;
2281 
2282 	sc = device_get_softc(dev);
2283 
2284 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2285 
2286 	/*
2287 	 * Allocate the parent bus DMA tag appropriate for PCI.
2288 	 */
2289 	if (bus_dma_tag_create(NULL,		/* parent     */
2290 			BCE_DMA_ALIGN,				/* alignment  */
2291 			BCE_DMA_BOUNDARY,			/* boundary   */
2292 			sc->max_bus_addr,			/* lowaddr    */
2293 			BUS_SPACE_MAXADDR,			/* highaddr   */
2294 			NULL, 						/* filterfunc */
2295 			NULL,						/* filterarg  */
2296 			MAXBSIZE, 					/* maxsize    */
2297 			BUS_SPACE_UNRESTRICTED,		/* nsegments  */
2298 			BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2299 			0,							/* flags      */
2300 			NULL, 						/* locfunc    */
2301 			NULL,						/* lockarg    */
2302 			&sc->parent_tag)) {
2303 		BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2304 			__FILE__, __LINE__);
2305 		rc = ENOMEM;
2306 		goto bce_dma_alloc_exit;
2307 	}
2308 
2309 	/*
2310 	 * Create a DMA tag for the status block, allocate and clear the
2311 	 * memory, map the memory into DMA space, and fetch the physical
2312 	 * address of the block.
2313 	 */
2314 	if (bus_dma_tag_create(
2315 			sc->parent_tag,			/* parent      */
2316 	    	BCE_DMA_ALIGN,			/* alignment   */
2317 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2318 	    	sc->max_bus_addr,		/* lowaddr     */
2319 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2320 	    	NULL, 					/* filterfunc  */
2321 	    	NULL, 					/* filterarg   */
2322 	    	BCE_STATUS_BLK_SZ, 		/* maxsize     */
2323 	    	1,						/* nsegments   */
2324 	    	BCE_STATUS_BLK_SZ, 		/* maxsegsize  */
2325 	    	0,						/* flags       */
2326 	    	NULL, 					/* lockfunc    */
2327 	    	NULL,					/* lockarg     */
2328 	    	&sc->status_tag)) {
2329 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2330 			__FILE__, __LINE__);
2331 		rc = ENOMEM;
2332 		goto bce_dma_alloc_exit;
2333 	}
2334 
2335 	if(bus_dmamem_alloc(
2336 			sc->status_tag,				/* dmat        */
2337 	    	(void **)&sc->status_block,	/* vaddr       */
2338 	    	BUS_DMA_NOWAIT,					/* flags       */
2339 	    	&sc->status_map)) {
2340 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2341 			__FILE__, __LINE__);
2342 		rc = ENOMEM;
2343 		goto bce_dma_alloc_exit;
2344 	}
2345 
2346 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2347 
2348 	map_arg.sc = sc;
2349 	map_arg.maxsegs = 1;
2350 
2351 	error = bus_dmamap_load(
2352 			sc->status_tag,	   		/* dmat        */
2353 	    	sc->status_map,	   		/* map         */
2354 	    	sc->status_block,	 	/* buf         */
2355 	    	BCE_STATUS_BLK_SZ,	 	/* buflen      */
2356 	    	bce_dma_map_addr, 	 	/* callback    */
2357 	    	&map_arg,			 	/* callbackarg */
2358 	    	BUS_DMA_NOWAIT);		/* flags       */
2359 
2360 	if(error || (map_arg.maxsegs == 0)) {
2361 		BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2362 			__FILE__, __LINE__);
2363 		rc = ENOMEM;
2364 		goto bce_dma_alloc_exit;
2365 	}
2366 
2367 	sc->status_block_paddr = map_arg.busaddr;
2368 	/* DRC - Fix for 64 bit addresses. */
2369 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2370 		(u32) sc->status_block_paddr);
2371 
2372 	/*
2373 	 * Create a DMA tag for the statistics block, allocate and clear the
2374 	 * memory, map the memory into DMA space, and fetch the physical
2375 	 * address of the block.
2376 	 */
2377 	if (bus_dma_tag_create(
2378 			sc->parent_tag,			/* parent      */
2379 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2380 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2381 	    	sc->max_bus_addr,		/* lowaddr     */
2382 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2383 	    	NULL,		 	  		/* filterfunc  */
2384 	    	NULL, 			  		/* filterarg   */
2385 	    	BCE_STATS_BLK_SZ, 		/* maxsize     */
2386 	    	1,				  		/* nsegments   */
2387 	    	BCE_STATS_BLK_SZ, 		/* maxsegsize  */
2388 	    	0, 				  		/* flags       */
2389 	    	NULL, 			  		/* lockfunc    */
2390 	    	NULL, 			  		/* lockarg     */
2391 	    	&sc->stats_tag)) {
2392 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2393 			__FILE__, __LINE__);
2394 		rc = ENOMEM;
2395 		goto bce_dma_alloc_exit;
2396 	}
2397 
2398 	if (bus_dmamem_alloc(
2399 			sc->stats_tag,				/* dmat        */
2400 	    	(void **)&sc->stats_block,	/* vaddr       */
2401 	    	BUS_DMA_NOWAIT,	 			/* flags       */
2402 	    	&sc->stats_map)) {
2403 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2404 			__FILE__, __LINE__);
2405 		rc = ENOMEM;
2406 		goto bce_dma_alloc_exit;
2407 	}
2408 
2409 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2410 
2411 	map_arg.sc = sc;
2412 	map_arg.maxsegs = 1;
2413 
2414 	error = bus_dmamap_load(
2415 			sc->stats_tag,	 	/* dmat        */
2416 	    	sc->stats_map,	 	/* map         */
2417 	    	sc->stats_block, 	/* buf         */
2418 	    	BCE_STATS_BLK_SZ,	/* buflen      */
2419 	    	bce_dma_map_addr,	/* callback    */
2420 	    	&map_arg, 		 	/* callbackarg */
2421 	    	BUS_DMA_NOWAIT);	/* flags       */
2422 
2423 	if(error || (map_arg.maxsegs == 0)) {
2424 		BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2425 			__FILE__, __LINE__);
2426 		rc = ENOMEM;
2427 		goto bce_dma_alloc_exit;
2428 	}
2429 
2430 	sc->stats_block_paddr = map_arg.busaddr;
2431 	/* DRC - Fix for 64 bit address. */
2432 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2433 		(u32) sc->stats_block_paddr);
2434 
2435 	/*
2436 	 * Create a DMA tag for the TX buffer descriptor chain,
2437 	 * allocate and clear the  memory, and fetch the
2438 	 * physical address of the block.
2439 	 */
2440 	if(bus_dma_tag_create(
2441 			sc->parent_tag,		  /* parent      */
2442 	    	BCM_PAGE_SIZE,		  /* alignment   */
2443 	    	BCE_DMA_BOUNDARY,	  /* boundary    */
2444 			sc->max_bus_addr,	  /* lowaddr     */
2445 			BUS_SPACE_MAXADDR, 	  /* highaddr    */
2446 			NULL, 				  /* filterfunc  */
2447 			NULL, 				  /* filterarg   */
2448 			BCE_TX_CHAIN_PAGE_SZ, /* maxsize     */
2449 			1,			  		  /* nsegments   */
2450 			BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize  */
2451 			0,				 	  /* flags       */
2452 			NULL, 				  /* lockfunc    */
2453 			NULL,				  /* lockarg     */
2454 			&sc->tx_bd_chain_tag)) {
2455 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2456 			__FILE__, __LINE__);
2457 		rc = ENOMEM;
2458 		goto bce_dma_alloc_exit;
2459 	}
2460 
2461 	for (i = 0; i < TX_PAGES; i++) {
2462 
2463 		if(bus_dmamem_alloc(
2464 				sc->tx_bd_chain_tag,			/* tag   */
2465 	    		(void **)&sc->tx_bd_chain[i],	/* vaddr */
2466 	    		BUS_DMA_NOWAIT,					/* flags */
2467 		    	&sc->tx_bd_chain_map[i])) {
2468 			BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2469 				"chain DMA memory!\n", __FILE__, __LINE__);
2470 			rc = ENOMEM;
2471 			goto bce_dma_alloc_exit;
2472 		}
2473 
2474 		map_arg.maxsegs = 1;
2475 		map_arg.sc = sc;
2476 
2477 		error = bus_dmamap_load(
2478 				sc->tx_bd_chain_tag,	 /* dmat        */
2479 	    		sc->tx_bd_chain_map[i],	 /* map         */
2480 	    		sc->tx_bd_chain[i],		 /* buf         */
2481 		    	BCE_TX_CHAIN_PAGE_SZ,  	 /* buflen      */
2482 		    	bce_dma_map_addr, 	   	 /* callback    */
2483 	    		&map_arg, 			   	 /* callbackarg */
2484 	    		BUS_DMA_NOWAIT);	   	 /* flags       */
2485 
2486 		if(error || (map_arg.maxsegs == 0)) {
2487 			BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2488 				__FILE__, __LINE__);
2489 			rc = ENOMEM;
2490 			goto bce_dma_alloc_exit;
2491 		}
2492 
2493 		sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2494 		/* DRC - Fix for 64 bit systems. */
2495 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2496 			i, (u32) sc->tx_bd_chain_paddr[i]);
2497 	}
2498 
2499 	/* Create a DMA tag for TX mbufs. */
2500 	if (bus_dma_tag_create(
2501 			sc->parent_tag,	 	 	/* parent      */
2502 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2503 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2504 			sc->max_bus_addr,		/* lowaddr     */
2505 			BUS_SPACE_MAXADDR,		/* highaddr    */
2506 			NULL, 			  		/* filterfunc  */
2507 			NULL, 			  		/* filterarg   */
2508 			MCLBYTES * BCE_MAX_SEGMENTS,	/* maxsize     */
2509 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2510 			MCLBYTES,				/* maxsegsize  */
2511 			0,				 		/* flags       */
2512 			NULL, 			  		/* lockfunc    */
2513 			NULL,			  		/* lockarg     */
2514 	    	&sc->tx_mbuf_tag)) {
2515 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2516 			__FILE__, __LINE__);
2517 		rc = ENOMEM;
2518 		goto bce_dma_alloc_exit;
2519 	}
2520 
2521 	/* Create DMA maps for the TX mbufs clusters. */
2522 	for (i = 0; i < TOTAL_TX_BD; i++) {
2523 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2524 			&sc->tx_mbuf_map[i])) {
2525 			BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2526 				__FILE__, __LINE__);
2527 			rc = ENOMEM;
2528 			goto bce_dma_alloc_exit;
2529 		}
2530 	}
2531 
2532 	/*
2533 	 * Create a DMA tag for the RX buffer descriptor chain,
2534 	 * allocate and clear the  memory, and fetch the physical
2535 	 * address of the blocks.
2536 	 */
2537 	if (bus_dma_tag_create(
2538 			sc->parent_tag,			/* parent      */
2539 	    	BCM_PAGE_SIZE,			/* alignment   */
2540 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2541 			BUS_SPACE_MAXADDR,		/* lowaddr     */
2542 			sc->max_bus_addr,		/* lowaddr     */
2543 			NULL,					/* filter      */
2544 			NULL, 					/* filterarg   */
2545 			BCE_RX_CHAIN_PAGE_SZ,	/* maxsize     */
2546 			1, 						/* nsegments   */
2547 			BCE_RX_CHAIN_PAGE_SZ,	/* maxsegsize  */
2548 			0,				 		/* flags       */
2549 			NULL,					/* lockfunc    */
2550 			NULL,					/* lockarg     */
2551 			&sc->rx_bd_chain_tag)) {
2552 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2553 			__FILE__, __LINE__);
2554 		rc = ENOMEM;
2555 		goto bce_dma_alloc_exit;
2556 	}
2557 
2558 	for (i = 0; i < RX_PAGES; i++) {
2559 
2560 		if (bus_dmamem_alloc(
2561 				sc->rx_bd_chain_tag,			/* tag   */
2562 	    		(void **)&sc->rx_bd_chain[i], 	/* vaddr */
2563 	    		BUS_DMA_NOWAIT,				  	/* flags */
2564 		    	&sc->rx_bd_chain_map[i])) {
2565 			BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2566 				"DMA memory!\n", __FILE__, __LINE__);
2567 			rc = ENOMEM;
2568 			goto bce_dma_alloc_exit;
2569 		}
2570 
2571 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2572 
2573 		map_arg.maxsegs = 1;
2574 		map_arg.sc = sc;
2575 
2576 		error = bus_dmamap_load(
2577 				sc->rx_bd_chain_tag,	/* dmat        */
2578 	    		sc->rx_bd_chain_map[i],	/* map         */
2579 	    		sc->rx_bd_chain[i],		/* buf         */
2580 		    	BCE_RX_CHAIN_PAGE_SZ,  	/* buflen      */
2581 		    	bce_dma_map_addr,	   	/* callback    */
2582 	    		&map_arg,			   	/* callbackarg */
2583 	    		BUS_DMA_NOWAIT);		/* flags       */
2584 
2585 		if(error || (map_arg.maxsegs == 0)) {
2586 			BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2587 				__FILE__, __LINE__);
2588 			rc = ENOMEM;
2589 			goto bce_dma_alloc_exit;
2590 		}
2591 
2592 		sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2593 		/* DRC - Fix for 64 bit systems. */
2594 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2595 			i, (u32) sc->rx_bd_chain_paddr[i]);
2596 	}
2597 
2598 	/*
2599 	 * Create a DMA tag for RX mbufs.
2600 	 */
2601 	if (bus_dma_tag_create(
2602 			sc->parent_tag,			/* parent      */
2603 	    	BCE_DMA_ALIGN,		  	/* alignment   */
2604 	    	BCE_DMA_BOUNDARY,	  	/* boundary    */
2605 			sc->max_bus_addr,	  	/* lowaddr     */
2606 			BUS_SPACE_MAXADDR, 	  	/* highaddr    */
2607 			NULL, 				  	/* filterfunc  */
2608 			NULL, 				  	/* filterarg   */
2609 			MJUM9BYTES,				/* maxsize     */
2610 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2611 			MJUM9BYTES,				/* maxsegsize  */
2612 			0,				 	  	/* flags       */
2613 			NULL, 				  	/* lockfunc    */
2614 			NULL,				  	/* lockarg     */
2615 	    	&sc->rx_mbuf_tag)) {
2616 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2617 			__FILE__, __LINE__);
2618 		rc = ENOMEM;
2619 		goto bce_dma_alloc_exit;
2620 	}
2621 
2622 	/* Create DMA maps for the RX mbuf clusters. */
2623 	for (i = 0; i < TOTAL_RX_BD; i++) {
2624 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2625 				&sc->rx_mbuf_map[i])) {
2626 			BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2627 				__FILE__, __LINE__);
2628 			rc = ENOMEM;
2629 			goto bce_dma_alloc_exit;
2630 		}
2631 	}
2632 
2633 bce_dma_alloc_exit:
2634 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2635 
2636 	return(rc);
2637 }
2638 
2639 
2640 /****************************************************************************/
2641 /* Release all resources used by the driver.                                */
2642 /*                                                                          */
2643 /* Releases all resources acquired by the driver including interrupts,      */
2644 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2645 /*                                                                          */
2646 /* Returns:                                                                 */
2647 /*   Nothing.                                                               */
2648 /****************************************************************************/
2649 static void
2650 bce_release_resources(struct bce_softc *sc)
2651 {
2652 	device_t dev;
2653 
2654 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2655 
2656 	dev = sc->bce_dev;
2657 
2658 	bce_dma_free(sc);
2659 
2660 	if (sc->bce_intrhand != NULL)
2661 		bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2662 
2663 	if (sc->bce_irq != NULL)
2664 		bus_release_resource(dev,
2665 			SYS_RES_IRQ,
2666 			0,
2667 			sc->bce_irq);
2668 
2669 	if (sc->bce_res != NULL)
2670 		bus_release_resource(dev,
2671 			SYS_RES_MEMORY,
2672 		    PCIR_BAR(0),
2673 		    sc->bce_res);
2674 
2675 	if (sc->bce_ifp != NULL)
2676 		if_free(sc->bce_ifp);
2677 
2678 
2679 	if (mtx_initialized(&sc->bce_mtx))
2680 		BCE_LOCK_DESTROY(sc);
2681 
2682 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2683 
2684 }
2685 
2686 
2687 /****************************************************************************/
2688 /* Firmware synchronization.                                                */
2689 /*                                                                          */
2690 /* Before performing certain events such as a chip reset, synchronize with  */
2691 /* the firmware first.                                                      */
2692 /*                                                                          */
2693 /* Returns:                                                                 */
2694 /*   0 for success, positive value for failure.                             */
2695 /****************************************************************************/
2696 static int
2697 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2698 {
2699 	int i, rc = 0;
2700 	u32 val;
2701 
2702 	/* Don't waste any time if we've timed out before. */
2703 	if (sc->bce_fw_timed_out) {
2704 		rc = EBUSY;
2705 		goto bce_fw_sync_exit;
2706 	}
2707 
2708 	/* Increment the message sequence number. */
2709 	sc->bce_fw_wr_seq++;
2710 	msg_data |= sc->bce_fw_wr_seq;
2711 
2712  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2713 
2714 	/* Send the message to the bootcode driver mailbox. */
2715 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2716 
2717 	/* Wait for the bootcode to acknowledge the message. */
2718 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2719 		/* Check for a response in the bootcode firmware mailbox. */
2720 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2721 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2722 			break;
2723 		DELAY(1000);
2724 	}
2725 
2726 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2727 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2728 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2729 
2730 		BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2731 			"msg_data = 0x%08X\n",
2732 			__FILE__, __LINE__, msg_data);
2733 
2734 		msg_data &= ~BCE_DRV_MSG_CODE;
2735 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2736 
2737 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2738 
2739 		sc->bce_fw_timed_out = 1;
2740 		rc = EBUSY;
2741 	}
2742 
2743 bce_fw_sync_exit:
2744 	return (rc);
2745 }
2746 
2747 
2748 /****************************************************************************/
2749 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2750 /*                                                                          */
2751 /* Returns:                                                                 */
2752 /*   Nothing.                                                               */
2753 /****************************************************************************/
2754 static void
2755 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2756 	u32 rv2p_code_len, u32 rv2p_proc)
2757 {
2758 	int i;
2759 	u32 val;
2760 
2761 	for (i = 0; i < rv2p_code_len; i += 8) {
2762 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2763 		rv2p_code++;
2764 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2765 		rv2p_code++;
2766 
2767 		if (rv2p_proc == RV2P_PROC1) {
2768 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2769 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2770 		}
2771 		else {
2772 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2773 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2774 		}
2775 	}
2776 
2777 	/* Reset the processor, un-stall is done later. */
2778 	if (rv2p_proc == RV2P_PROC1) {
2779 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2780 	}
2781 	else {
2782 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2783 	}
2784 }
2785 
2786 
2787 /****************************************************************************/
2788 /* Load RISC processor firmware.                                            */
2789 /*                                                                          */
2790 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2791 /* associated with a particular processor.                                  */
2792 /*                                                                          */
2793 /* Returns:                                                                 */
2794 /*   Nothing.                                                               */
2795 /****************************************************************************/
2796 static void
2797 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2798 	struct fw_info *fw)
2799 {
2800 	u32 offset;
2801 	u32 val;
2802 
2803 	/* Halt the CPU. */
2804 	val = REG_RD_IND(sc, cpu_reg->mode);
2805 	val |= cpu_reg->mode_value_halt;
2806 	REG_WR_IND(sc, cpu_reg->mode, val);
2807 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2808 
2809 	/* Load the Text area. */
2810 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2811 	if (fw->text) {
2812 		int j;
2813 
2814 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2815 			REG_WR_IND(sc, offset, fw->text[j]);
2816 	        }
2817 	}
2818 
2819 	/* Load the Data area. */
2820 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2821 	if (fw->data) {
2822 		int j;
2823 
2824 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2825 			REG_WR_IND(sc, offset, fw->data[j]);
2826 		}
2827 	}
2828 
2829 	/* Load the SBSS area. */
2830 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2831 	if (fw->sbss) {
2832 		int j;
2833 
2834 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2835 			REG_WR_IND(sc, offset, fw->sbss[j]);
2836 		}
2837 	}
2838 
2839 	/* Load the BSS area. */
2840 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2841 	if (fw->bss) {
2842 		int j;
2843 
2844 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2845 			REG_WR_IND(sc, offset, fw->bss[j]);
2846 		}
2847 	}
2848 
2849 	/* Load the Read-Only area. */
2850 	offset = cpu_reg->spad_base +
2851 		(fw->rodata_addr - cpu_reg->mips_view_base);
2852 	if (fw->rodata) {
2853 		int j;
2854 
2855 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2856 			REG_WR_IND(sc, offset, fw->rodata[j]);
2857 		}
2858 	}
2859 
2860 	/* Clear the pre-fetch instruction. */
2861 	REG_WR_IND(sc, cpu_reg->inst, 0);
2862 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2863 
2864 	/* Start the CPU. */
2865 	val = REG_RD_IND(sc, cpu_reg->mode);
2866 	val &= ~cpu_reg->mode_value_halt;
2867 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2868 	REG_WR_IND(sc, cpu_reg->mode, val);
2869 }
2870 
2871 
2872 /****************************************************************************/
2873 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2874 /*                                                                          */
2875 /* Loads the firmware for each CPU and starts the CPU.                      */
2876 /*                                                                          */
2877 /* Returns:                                                                 */
2878 /*   Nothing.                                                               */
2879 /****************************************************************************/
2880 static void
2881 bce_init_cpus(struct bce_softc *sc)
2882 {
2883 	struct cpu_reg cpu_reg;
2884 	struct fw_info fw;
2885 
2886 	/* Initialize the RV2P processor. */
2887 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2888 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2889 
2890 	/* Initialize the RX Processor. */
2891 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2892 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2893 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2894 	cpu_reg.state = BCE_RXP_CPU_STATE;
2895 	cpu_reg.state_value_clear = 0xffffff;
2896 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2897 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2898 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2899 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2900 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2901 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2902 	cpu_reg.mips_view_base = 0x8000000;
2903 
2904 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2905 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2906 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2907 	fw.start_addr = bce_RXP_b06FwStartAddr;
2908 
2909 	fw.text_addr = bce_RXP_b06FwTextAddr;
2910 	fw.text_len = bce_RXP_b06FwTextLen;
2911 	fw.text_index = 0;
2912 	fw.text = bce_RXP_b06FwText;
2913 
2914 	fw.data_addr = bce_RXP_b06FwDataAddr;
2915 	fw.data_len = bce_RXP_b06FwDataLen;
2916 	fw.data_index = 0;
2917 	fw.data = bce_RXP_b06FwData;
2918 
2919 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2920 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2921 	fw.sbss_index = 0;
2922 	fw.sbss = bce_RXP_b06FwSbss;
2923 
2924 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2925 	fw.bss_len = bce_RXP_b06FwBssLen;
2926 	fw.bss_index = 0;
2927 	fw.bss = bce_RXP_b06FwBss;
2928 
2929 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2930 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2931 	fw.rodata_index = 0;
2932 	fw.rodata = bce_RXP_b06FwRodata;
2933 
2934 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2935 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2936 
2937 	/* Initialize the TX Processor. */
2938 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2939 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2940 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2941 	cpu_reg.state = BCE_TXP_CPU_STATE;
2942 	cpu_reg.state_value_clear = 0xffffff;
2943 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2944 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2945 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2946 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2947 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2948 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2949 	cpu_reg.mips_view_base = 0x8000000;
2950 
2951 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2952 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2953 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2954 	fw.start_addr = bce_TXP_b06FwStartAddr;
2955 
2956 	fw.text_addr = bce_TXP_b06FwTextAddr;
2957 	fw.text_len = bce_TXP_b06FwTextLen;
2958 	fw.text_index = 0;
2959 	fw.text = bce_TXP_b06FwText;
2960 
2961 	fw.data_addr = bce_TXP_b06FwDataAddr;
2962 	fw.data_len = bce_TXP_b06FwDataLen;
2963 	fw.data_index = 0;
2964 	fw.data = bce_TXP_b06FwData;
2965 
2966 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2967 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2968 	fw.sbss_index = 0;
2969 	fw.sbss = bce_TXP_b06FwSbss;
2970 
2971 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2972 	fw.bss_len = bce_TXP_b06FwBssLen;
2973 	fw.bss_index = 0;
2974 	fw.bss = bce_TXP_b06FwBss;
2975 
2976 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2977 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2978 	fw.rodata_index = 0;
2979 	fw.rodata = bce_TXP_b06FwRodata;
2980 
2981 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2982 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2983 
2984 	/* Initialize the TX Patch-up Processor. */
2985 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2986 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2987 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2988 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2989 	cpu_reg.state_value_clear = 0xffffff;
2990 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2991 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2992 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2993 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2994 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2995 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2996 	cpu_reg.mips_view_base = 0x8000000;
2997 
2998 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2999 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3000 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3001 	fw.start_addr = bce_TPAT_b06FwStartAddr;
3002 
3003 	fw.text_addr = bce_TPAT_b06FwTextAddr;
3004 	fw.text_len = bce_TPAT_b06FwTextLen;
3005 	fw.text_index = 0;
3006 	fw.text = bce_TPAT_b06FwText;
3007 
3008 	fw.data_addr = bce_TPAT_b06FwDataAddr;
3009 	fw.data_len = bce_TPAT_b06FwDataLen;
3010 	fw.data_index = 0;
3011 	fw.data = bce_TPAT_b06FwData;
3012 
3013 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3014 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3015 	fw.sbss_index = 0;
3016 	fw.sbss = bce_TPAT_b06FwSbss;
3017 
3018 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3019 	fw.bss_len = bce_TPAT_b06FwBssLen;
3020 	fw.bss_index = 0;
3021 	fw.bss = bce_TPAT_b06FwBss;
3022 
3023 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3024 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3025 	fw.rodata_index = 0;
3026 	fw.rodata = bce_TPAT_b06FwRodata;
3027 
3028 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3029 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3030 
3031 	/* Initialize the Completion Processor. */
3032 	cpu_reg.mode = BCE_COM_CPU_MODE;
3033 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3034 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3035 	cpu_reg.state = BCE_COM_CPU_STATE;
3036 	cpu_reg.state_value_clear = 0xffffff;
3037 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3038 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3039 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3040 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3041 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3042 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3043 	cpu_reg.mips_view_base = 0x8000000;
3044 
3045 	fw.ver_major = bce_COM_b06FwReleaseMajor;
3046 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3047 	fw.ver_fix = bce_COM_b06FwReleaseFix;
3048 	fw.start_addr = bce_COM_b06FwStartAddr;
3049 
3050 	fw.text_addr = bce_COM_b06FwTextAddr;
3051 	fw.text_len = bce_COM_b06FwTextLen;
3052 	fw.text_index = 0;
3053 	fw.text = bce_COM_b06FwText;
3054 
3055 	fw.data_addr = bce_COM_b06FwDataAddr;
3056 	fw.data_len = bce_COM_b06FwDataLen;
3057 	fw.data_index = 0;
3058 	fw.data = bce_COM_b06FwData;
3059 
3060 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3061 	fw.sbss_len = bce_COM_b06FwSbssLen;
3062 	fw.sbss_index = 0;
3063 	fw.sbss = bce_COM_b06FwSbss;
3064 
3065 	fw.bss_addr = bce_COM_b06FwBssAddr;
3066 	fw.bss_len = bce_COM_b06FwBssLen;
3067 	fw.bss_index = 0;
3068 	fw.bss = bce_COM_b06FwBss;
3069 
3070 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3071 	fw.rodata_len = bce_COM_b06FwRodataLen;
3072 	fw.rodata_index = 0;
3073 	fw.rodata = bce_COM_b06FwRodata;
3074 
3075 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3076 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3077 }
3078 
3079 
3080 /****************************************************************************/
3081 /* Initialize context memory.                                               */
3082 /*                                                                          */
3083 /* Clears the memory associated with each Context ID (CID).                 */
3084 /*                                                                          */
3085 /* Returns:                                                                 */
3086 /*   Nothing.                                                               */
3087 /****************************************************************************/
3088 static void
3089 bce_init_context(struct bce_softc *sc)
3090 {
3091 	u32 vcid;
3092 
3093 	vcid = 96;
3094 	while (vcid) {
3095 		u32 vcid_addr, pcid_addr, offset;
3096 
3097 		vcid--;
3098 
3099    		vcid_addr = GET_CID_ADDR(vcid);
3100 		pcid_addr = vcid_addr;
3101 
3102 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3103 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3104 
3105 		/* Zero out the context. */
3106 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3107 			CTX_WR(sc, 0x00, offset, 0);
3108 		}
3109 
3110 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3111 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3112 	}
3113 }
3114 
3115 
3116 /****************************************************************************/
3117 /* Fetch the permanent MAC address of the controller.                       */
3118 /*                                                                          */
3119 /* Returns:                                                                 */
3120 /*   Nothing.                                                               */
3121 /****************************************************************************/
3122 static void
3123 bce_get_mac_addr(struct bce_softc *sc)
3124 {
3125 	u32 mac_lo = 0, mac_hi = 0;
3126 
3127 	/*
3128 	 * The NetXtreme II bootcode populates various NIC
3129 	 * power-on and runtime configuration items in a
3130 	 * shared memory area.  The factory configured MAC
3131 	 * address is available from both NVRAM and the
3132 	 * shared memory area so we'll read the value from
3133 	 * shared memory for speed.
3134 	 */
3135 
3136 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3137 		BCE_PORT_HW_CFG_MAC_UPPER);
3138 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3139 		BCE_PORT_HW_CFG_MAC_LOWER);
3140 
3141 	if ((mac_lo == 0) && (mac_hi == 0)) {
3142 		BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3143 			__FILE__, __LINE__);
3144 	} else {
3145 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3146 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3147 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3148 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3149 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3150 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3151 	}
3152 
3153 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3154 }
3155 
3156 
3157 /****************************************************************************/
3158 /* Program the MAC address.                                                 */
3159 /*                                                                          */
3160 /* Returns:                                                                 */
3161 /*   Nothing.                                                               */
3162 /****************************************************************************/
3163 static void
3164 bce_set_mac_addr(struct bce_softc *sc)
3165 {
3166 	u32 val;
3167 	u8 *mac_addr = sc->eaddr;
3168 
3169 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3170 
3171 	val = (mac_addr[0] << 8) | mac_addr[1];
3172 
3173 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3174 
3175 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3176 		(mac_addr[4] << 8) | mac_addr[5];
3177 
3178 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3179 }
3180 
3181 
3182 /****************************************************************************/
3183 /* Stop the controller.                                                     */
3184 /*                                                                          */
3185 /* Returns:                                                                 */
3186 /*   Nothing.                                                               */
3187 /****************************************************************************/
3188 static void
3189 bce_stop(struct bce_softc *sc)
3190 {
3191 	struct ifnet *ifp;
3192 	struct ifmedia_entry *ifm;
3193 	struct mii_data *mii = NULL;
3194 	int mtmp, itmp;
3195 
3196 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3197 
3198 	BCE_LOCK_ASSERT(sc);
3199 
3200 	ifp = sc->bce_ifp;
3201 
3202 	mii = device_get_softc(sc->bce_miibus);
3203 
3204 	callout_stop(&sc->bce_stat_ch);
3205 
3206 	/* Disable the transmit/receive blocks. */
3207 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3208 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3209 	DELAY(20);
3210 
3211 	bce_disable_intr(sc);
3212 
3213 	/* Tell firmware that the driver is going away. */
3214 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3215 
3216 	/* Free the RX lists. */
3217 	bce_free_rx_chain(sc);
3218 
3219 	/* Free TX buffers. */
3220 	bce_free_tx_chain(sc);
3221 
3222 	/*
3223 	 * Isolate/power down the PHY, but leave the media selection
3224 	 * unchanged so that things will be put back to normal when
3225 	 * we bring the interface back up.
3226 	 */
3227 
3228 	itmp = ifp->if_flags;
3229 	ifp->if_flags |= IFF_UP;
3230 	/*
3231 	 * If we are called from bce_detach(), mii is already NULL.
3232 	 */
3233 	if (mii != NULL) {
3234 		ifm = mii->mii_media.ifm_cur;
3235 		mtmp = ifm->ifm_media;
3236 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3237 		mii_mediachg(mii);
3238 		ifm->ifm_media = mtmp;
3239 	}
3240 
3241 	ifp->if_flags = itmp;
3242 	ifp->if_timer = 0;
3243 
3244 	sc->bce_link = 0;
3245 
3246 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3247 
3248 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3249 
3250 	bce_mgmt_init_locked(sc);
3251 }
3252 
3253 
3254 static int
3255 bce_reset(struct bce_softc *sc, u32 reset_code)
3256 {
3257 	u32 val;
3258 	int i, rc = 0;
3259 
3260 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3261 
3262 	/* Wait for pending PCI transactions to complete. */
3263 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3264 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3265 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3266 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3267 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3268 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3269 	DELAY(5);
3270 
3271 	/* Assume bootcode is running. */
3272 	sc->bce_fw_timed_out = 0;
3273 
3274 	/* Give the firmware a chance to prepare for the reset. */
3275 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3276 	if (rc)
3277 		goto bce_reset_exit;
3278 
3279 	/* Set a firmware reminder that this is a soft reset. */
3280 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3281 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3282 
3283 	/* Dummy read to force the chip to complete all current transactions. */
3284 	val = REG_RD(sc, BCE_MISC_ID);
3285 
3286 	/* Chip reset. */
3287 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3288 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3289 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3290 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3291 
3292 	/* Allow up to 30us for reset to complete. */
3293 	for (i = 0; i < 10; i++) {
3294 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3295 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3296 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3297 			break;
3298 		}
3299 		DELAY(10);
3300 	}
3301 
3302 	/* Check that reset completed successfully. */
3303 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3305 		BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3306 			__FILE__, __LINE__);
3307 		rc = EBUSY;
3308 		goto bce_reset_exit;
3309 	}
3310 
3311 	/* Make sure byte swapping is properly configured. */
3312 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3313 	if (val != 0x01020304) {
3314 		BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3315 			__FILE__, __LINE__);
3316 		rc = ENODEV;
3317 		goto bce_reset_exit;
3318 	}
3319 
3320 	/* Just completed a reset, assume that firmware is running again. */
3321 	sc->bce_fw_timed_out = 0;
3322 
3323 	/* Wait for the firmware to finish its initialization. */
3324 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3325 	if (rc)
3326 		BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3327 			__FILE__, __LINE__);
3328 
3329 bce_reset_exit:
3330 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3331 
3332 	return (rc);
3333 }
3334 
3335 
3336 static int
3337 bce_chipinit(struct bce_softc *sc)
3338 {
3339 	u32 val;
3340 	int rc = 0;
3341 
3342 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3343 
3344 	/* Make sure the interrupt is not active. */
3345 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3346 
3347 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3348 	/* channels and PCI clock compensation delay.                      */
3349 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3350 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3351 #if BYTE_ORDER == BIG_ENDIAN
3352 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3353 #endif
3354 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3355 	      DMA_READ_CHANS << 12 |
3356 	      DMA_WRITE_CHANS << 16;
3357 
3358 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3359 
3360 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3361 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3362 
3363 	/*
3364 	 * This setting resolves a problem observed on certain Intel PCI
3365 	 * chipsets that cannot handle multiple outstanding DMA operations.
3366 	 * See errata E9_5706A1_65.
3367 	 */
3368 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3369 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3370 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3371 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3372 
3373 	REG_WR(sc, BCE_DMA_CONFIG, val);
3374 
3375 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3376 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3377 		u16 val;
3378 
3379 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3380 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3381 	}
3382 
3383 	/* Enable the RX_V2P and Context state machines before access. */
3384 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3385 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3386 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3387 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3388 
3389 	/* Initialize context mapping and zero out the quick contexts. */
3390 	bce_init_context(sc);
3391 
3392 	/* Initialize the on-boards CPUs */
3393 	bce_init_cpus(sc);
3394 
3395 	/* Prepare NVRAM for access. */
3396 	if (bce_init_nvram(sc)) {
3397 		rc = ENODEV;
3398 		goto bce_chipinit_exit;
3399 	}
3400 
3401 	/* Set the kernel bypass block size */
3402 	val = REG_RD(sc, BCE_MQ_CONFIG);
3403 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3404 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3405 	REG_WR(sc, BCE_MQ_CONFIG, val);
3406 
3407 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3408 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3409 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3410 
3411 	val = (BCM_PAGE_BITS - 8) << 24;
3412 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3413 
3414 	/* Configure page size. */
3415 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3416 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3417 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3418 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3419 
3420 bce_chipinit_exit:
3421 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3422 
3423 	return(rc);
3424 }
3425 
3426 
3427 /****************************************************************************/
3428 /* Initialize the controller in preparation to send/receive traffic.        */
3429 /*                                                                          */
3430 /* Returns:                                                                 */
3431 /*   0 for success, positive value for failure.                             */
3432 /****************************************************************************/
3433 static int
3434 bce_blockinit(struct bce_softc *sc)
3435 {
3436 	u32 reg, val;
3437 	int rc = 0;
3438 
3439 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3440 
3441 	/* Load the hardware default MAC address. */
3442 	bce_set_mac_addr(sc);
3443 
3444 	/* Set the Ethernet backoff seed value */
3445 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3446 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3447 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3448 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3449 
3450 	sc->last_status_idx = 0;
3451 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3452 
3453 	/* Set up link change interrupt generation. */
3454 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3455 
3456 	/* Program the physical address of the status block. */
3457 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3458 		BCE_ADDR_LO(sc->status_block_paddr));
3459 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3460 		BCE_ADDR_HI(sc->status_block_paddr));
3461 
3462 	/* Program the physical address of the statistics block. */
3463 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3464 		BCE_ADDR_LO(sc->stats_block_paddr));
3465 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3466 		BCE_ADDR_HI(sc->stats_block_paddr));
3467 
3468 	/* Program various host coalescing parameters. */
3469 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3470 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3471 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3472 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3473 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3474 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3475 	REG_WR(sc, BCE_HC_TX_TICKS,
3476 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3477 	REG_WR(sc, BCE_HC_RX_TICKS,
3478 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3479 	REG_WR(sc, BCE_HC_COM_TICKS,
3480 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3481 	REG_WR(sc, BCE_HC_CMD_TICKS,
3482 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3483 	REG_WR(sc, BCE_HC_STATS_TICKS,
3484 		(sc->bce_stats_ticks & 0xffff00));
3485 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3486 		0xbb8);  /* 3ms */
3487 	REG_WR(sc, BCE_HC_CONFIG,
3488 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3489 		BCE_HC_CONFIG_COLLECT_STATS));
3490 
3491 	/* Clear the internal statistics counters. */
3492 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3493 
3494 	/* Verify that bootcode is running. */
3495 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3496 
3497 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3498 		BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3499 			__FILE__, __LINE__);
3500 		reg = 0);
3501 
3502 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3503 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3504 		BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3505 			"Expected: 08%08X\n", __FILE__, __LINE__,
3506 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3507 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3508 		rc = ENODEV;
3509 		goto bce_blockinit_exit;
3510 	}
3511 
3512 	/* Check if any management firmware is running. */
3513 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3514 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3515 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3516 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3517 	}
3518 
3519 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3520 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3521 
3522 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3523 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3524 
3525 	/* Enable link state change interrupt generation. */
3526 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3527 
3528 	/* Enable all remaining blocks in the MAC. */
3529 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3530 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3531 	DELAY(20);
3532 
3533 bce_blockinit_exit:
3534 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3535 
3536 	return (rc);
3537 }
3538 
3539 
3540 /****************************************************************************/
3541 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3542 /*                                                                          */
3543 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3544 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3545 /* necessary.                                                               */
3546 /*                                                                          */
3547 /* Returns:                                                                 */
3548 /*   0 for success, positive value for failure.                             */
3549 /****************************************************************************/
3550 static int
3551 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3552 	u32 *prod_bseq)
3553 {
3554 	bus_dmamap_t		map;
3555 	bus_dma_segment_t	segs[4];
3556 	struct mbuf *m_new = NULL;
3557 	struct rx_bd		*rxbd;
3558 	int i, nsegs, error, rc = 0;
3559 #ifdef BCE_DEBUG
3560 	u16 debug_chain_prod = *chain_prod;
3561 #endif
3562 
3563 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3564 		__FUNCTION__);
3565 
3566 	/* Make sure the inputs are valid. */
3567 	DBRUNIF((*chain_prod > MAX_RX_BD),
3568 		BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3569 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3570 
3571 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3572 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3573 
3574 	if (m == NULL) {
3575 
3576 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3577 			BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3578 				__FILE__, __LINE__);
3579 			sc->mbuf_alloc_failed++;
3580 			rc = ENOBUFS;
3581 			goto bce_get_buf_exit);
3582 
3583 		/* This is a new mbuf allocation. */
3584 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3585 		if (m_new == NULL) {
3586 
3587 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3588 				__FILE__, __LINE__);
3589 
3590 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3591 
3592 			rc = ENOBUFS;
3593 			goto bce_get_buf_exit;
3594 		}
3595 
3596 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3597 		m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3598 		if (!(m_new->m_flags & M_EXT)) {
3599 
3600 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3601 				__FILE__, __LINE__);
3602 
3603 			m_freem(m_new);
3604 
3605 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3606 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3607 
3608 			rc = ENOBUFS;
3609 			goto bce_get_buf_exit;
3610 		}
3611 
3612 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3613 	} else {
3614 		m_new = m;
3615 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3616 		m_new->m_data = m_new->m_ext.ext_buf;
3617 	}
3618 
3619 	/* Map the mbuf cluster into device memory. */
3620 	map = sc->rx_mbuf_map[*chain_prod];
3621 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3622 	    segs, &nsegs, BUS_DMA_NOWAIT);
3623 
3624 	if (error) {
3625 		BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3626 			__FILE__, __LINE__);
3627 
3628 		m_freem(m_new);
3629 
3630 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3631 
3632 		rc = ENOBUFS;
3633 		goto bce_get_buf_exit;
3634 	}
3635 
3636 	/* Watch for overflow. */
3637 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3638 		BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3639 			__FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3640 
3641 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3642 		sc->rx_low_watermark = sc->free_rx_bd);
3643 
3644 	/* Setup the rx_bd for the first segment. */
3645 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3646 
3647 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3648 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3649 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3650 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3651 	*prod_bseq += segs[0].ds_len;
3652 
3653 	for (i = 1; i < nsegs; i++) {
3654 
3655 		*prod = NEXT_RX_BD(*prod);
3656 		*chain_prod = RX_CHAIN_IDX(*prod);
3657 
3658 		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3659 
3660 		rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3661 		rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3662 		rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3663 		rxbd->rx_bd_flags     = 0;
3664 		*prod_bseq += segs[i].ds_len;
3665 	}
3666 
3667 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3668 
3669 	/* Save the mbuf and update our counter. */
3670 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3671 	sc->free_rx_bd -= nsegs;
3672 
3673 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3674 		nsegs));
3675 
3676 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3677 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3678 
3679 bce_get_buf_exit:
3680 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3681 		__FUNCTION__);
3682 
3683 	return(rc);
3684 }
3685 
3686 
3687 /****************************************************************************/
3688 /* Allocate memory and initialize the TX data structures.                   */
3689 /*                                                                          */
3690 /* Returns:                                                                 */
3691 /*   0 for success, positive value for failure.                             */
3692 /****************************************************************************/
3693 static int
3694 bce_init_tx_chain(struct bce_softc *sc)
3695 {
3696 	struct tx_bd *txbd;
3697 	u32 val;
3698 	int i, rc = 0;
3699 
3700 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3701 
3702 	/* Set the initial TX producer/consumer indices. */
3703 	sc->tx_prod        = 0;
3704 	sc->tx_cons        = 0;
3705 	sc->tx_prod_bseq   = 0;
3706 	sc->used_tx_bd = 0;
3707 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3708 
3709 	/*
3710 	 * The NetXtreme II supports a linked-list structre called
3711 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3712 	 * consists of a series of 1 or more chain pages, each of which
3713 	 * consists of a fixed number of BD entries.
3714 	 * The last BD entry on each page is a pointer to the next page
3715 	 * in the chain, and the last pointer in the BD chain
3716 	 * points back to the beginning of the chain.
3717 	 */
3718 
3719 	/* Set the TX next pointer chain entries. */
3720 	for (i = 0; i < TX_PAGES; i++) {
3721 		int j;
3722 
3723 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3724 
3725 		/* Check if we've reached the last page. */
3726 		if (i == (TX_PAGES - 1))
3727 			j = 0;
3728 		else
3729 			j = i + 1;
3730 
3731 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3732 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3733 	}
3734 
3735 	/*
3736 	 * Initialize the context ID for an L2 TX chain.
3737 	 */
3738 	val = BCE_L2CTX_TYPE_TYPE_L2;
3739 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3740 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3741 
3742 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3743 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3744 
3745 	/* Point the hardware to the first page in the chain. */
3746 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3747 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3748 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3749 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3750 
3751 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3752 
3753 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3754 
3755 	return(rc);
3756 }
3757 
3758 
3759 /****************************************************************************/
3760 /* Free memory and clear the TX data structures.                            */
3761 /*                                                                          */
3762 /* Returns:                                                                 */
3763 /*   Nothing.                                                               */
3764 /****************************************************************************/
3765 static void
3766 bce_free_tx_chain(struct bce_softc *sc)
3767 {
3768 	int i;
3769 
3770 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3771 
3772 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3773 	for (i = 0; i < TOTAL_TX_BD; i++) {
3774 		if (sc->tx_mbuf_ptr[i] != NULL) {
3775 			if (sc->tx_mbuf_map != NULL)
3776 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3777 					BUS_DMASYNC_POSTWRITE);
3778 			m_freem(sc->tx_mbuf_ptr[i]);
3779 			sc->tx_mbuf_ptr[i] = NULL;
3780 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3781 		}
3782 	}
3783 
3784 	/* Clear each TX chain page. */
3785 	for (i = 0; i < TX_PAGES; i++)
3786 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3787 
3788 	/* Check if we lost any mbufs in the process. */
3789 	DBRUNIF((sc->tx_mbuf_alloc),
3790 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3791 			"from tx chain!\n",
3792 			__FILE__, __LINE__, sc->tx_mbuf_alloc));
3793 
3794 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3795 }
3796 
3797 
3798 /****************************************************************************/
3799 /* Allocate memory and initialize the RX data structures.                   */
3800 /*                                                                          */
3801 /* Returns:                                                                 */
3802 /*   0 for success, positive value for failure.                             */
3803 /****************************************************************************/
3804 static int
3805 bce_init_rx_chain(struct bce_softc *sc)
3806 {
3807 	struct rx_bd *rxbd;
3808 	int i, rc = 0;
3809 	u16 prod, chain_prod;
3810 	u32 prod_bseq, val;
3811 
3812 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3813 
3814 	/* Initialize the RX producer and consumer indices. */
3815 	sc->rx_prod        = 0;
3816 	sc->rx_cons        = 0;
3817 	sc->rx_prod_bseq   = 0;
3818 	sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3819 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3820 
3821 	/* Initialize the RX next pointer chain entries. */
3822 	for (i = 0; i < RX_PAGES; i++) {
3823 		int j;
3824 
3825 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3826 
3827 		/* Check if we've reached the last page. */
3828 		if (i == (RX_PAGES - 1))
3829 			j = 0;
3830 		else
3831 			j = i + 1;
3832 
3833 		/* Setup the chain page pointers. */
3834 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3835 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3836 	}
3837 
3838 	/* Initialize the context ID for an L2 RX chain. */
3839 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3840 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3841 	val |= 0x02 << 8;
3842 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3843 
3844 	/* Point the hardware to the first page in the chain. */
3845 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3846 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3847 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3848 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3849 
3850 	/* Allocate mbuf clusters for the rx_bd chain. */
3851 	prod = prod_bseq = 0;
3852 	while (prod < BCE_RX_SLACK_SPACE) {
3853 		chain_prod = RX_CHAIN_IDX(prod);
3854 		if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3855 			BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3856 				__FILE__, __LINE__, chain_prod);
3857 			rc = ENOBUFS;
3858 			break;
3859 		}
3860 		prod = NEXT_RX_BD(prod);
3861 	}
3862 
3863 	/* Save the RX chain producer index. */
3864 	sc->rx_prod      = prod;
3865 	sc->rx_prod_bseq = prod_bseq;
3866 
3867 	for (i = 0; i < RX_PAGES; i++) {
3868 		bus_dmamap_sync(
3869 			sc->rx_bd_chain_tag,
3870 	    	sc->rx_bd_chain_map[i],
3871 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3872 	}
3873 
3874 	/* Tell the chip about the waiting rx_bd's. */
3875 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3876 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3877 
3878 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3879 
3880 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3881 
3882 	return(rc);
3883 }
3884 
3885 
3886 /****************************************************************************/
3887 /* Free memory and clear the RX data structures.                            */
3888 /*                                                                          */
3889 /* Returns:                                                                 */
3890 /*   Nothing.                                                               */
3891 /****************************************************************************/
3892 static void
3893 bce_free_rx_chain(struct bce_softc *sc)
3894 {
3895 	int i;
3896 
3897 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3898 
3899 	/* Free any mbufs still in the RX mbuf chain. */
3900 	for (i = 0; i < TOTAL_RX_BD; i++) {
3901 		if (sc->rx_mbuf_ptr[i] != NULL) {
3902 			if (sc->rx_mbuf_map[i] != NULL)
3903 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3904 					BUS_DMASYNC_POSTREAD);
3905 			m_freem(sc->rx_mbuf_ptr[i]);
3906 			sc->rx_mbuf_ptr[i] = NULL;
3907 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3908 		}
3909 	}
3910 
3911 	/* Clear each RX chain page. */
3912 	for (i = 0; i < RX_PAGES; i++)
3913 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3914 
3915 	/* Check if we lost any mbufs in the process. */
3916 	DBRUNIF((sc->rx_mbuf_alloc),
3917 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3918 			__FILE__, __LINE__, sc->rx_mbuf_alloc));
3919 
3920 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3921 }
3922 
3923 
3924 /****************************************************************************/
3925 /* Set media options.                                                       */
3926 /*                                                                          */
3927 /* Returns:                                                                 */
3928 /*   0 for success, positive value for failure.                             */
3929 /****************************************************************************/
3930 static int
3931 bce_ifmedia_upd(struct ifnet *ifp)
3932 {
3933 	struct bce_softc *sc;
3934 	struct mii_data *mii;
3935 	struct ifmedia *ifm;
3936 	int rc = 0;
3937 
3938 	sc = ifp->if_softc;
3939 	ifm = &sc->bce_ifmedia;
3940 
3941 	/* DRC - ToDo: Add SerDes support. */
3942 
3943 	mii = device_get_softc(sc->bce_miibus);
3944 	sc->bce_link = 0;
3945 	if (mii->mii_instance) {
3946 		struct mii_softc *miisc;
3947 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3948 		    miisc = LIST_NEXT(miisc, mii_list))
3949 			mii_phy_reset(miisc);
3950 	}
3951 	mii_mediachg(mii);
3952 
3953 	return(rc);
3954 }
3955 
3956 
3957 /****************************************************************************/
3958 /* Reports current media status.                                            */
3959 /*                                                                          */
3960 /* Returns:                                                                 */
3961 /*   Nothing.                                                               */
3962 /****************************************************************************/
3963 static void
3964 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3965 {
3966 	struct bce_softc *sc;
3967 	struct mii_data *mii;
3968 
3969 	sc = ifp->if_softc;
3970 
3971 	BCE_LOCK(sc);
3972 
3973 	mii = device_get_softc(sc->bce_miibus);
3974 
3975 	/* DRC - ToDo: Add SerDes support. */
3976 
3977 	mii_pollstat(mii);
3978 	ifmr->ifm_active = mii->mii_media_active;
3979 	ifmr->ifm_status = mii->mii_media_status;
3980 
3981 	BCE_UNLOCK(sc);
3982 }
3983 
3984 
3985 /****************************************************************************/
3986 /* Handles PHY generated interrupt events.                                  */
3987 /*                                                                          */
3988 /* Returns:                                                                 */
3989 /*   Nothing.                                                               */
3990 /****************************************************************************/
3991 static void
3992 bce_phy_intr(struct bce_softc *sc)
3993 {
3994 	u32 new_link_state, old_link_state;
3995 
3996 	new_link_state = sc->status_block->status_attn_bits &
3997 		STATUS_ATTN_BITS_LINK_STATE;
3998 	old_link_state = sc->status_block->status_attn_bits_ack &
3999 		STATUS_ATTN_BITS_LINK_STATE;
4000 
4001 	/* Handle any changes if the link state has changed. */
4002 	if (new_link_state != old_link_state) {
4003 
4004 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4005 
4006 		sc->bce_link = 0;
4007 		callout_stop(&sc->bce_stat_ch);
4008 		bce_tick_locked(sc);
4009 
4010 		/* Update the status_attn_bits_ack field in the status block. */
4011 		if (new_link_state) {
4012 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4013 				STATUS_ATTN_BITS_LINK_STATE);
4014 			DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4015 		}
4016 		else {
4017 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4018 				STATUS_ATTN_BITS_LINK_STATE);
4019 			DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4020 		}
4021 
4022 	}
4023 
4024 	/* Acknowledge the link change interrupt. */
4025 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4026 }
4027 
4028 
4029 /****************************************************************************/
4030 /* Handles received frame interrupt events.                                 */
4031 /*                                                                          */
4032 /* Returns:                                                                 */
4033 /*   Nothing.                                                               */
4034 /****************************************************************************/
4035 static void
4036 bce_rx_intr(struct bce_softc *sc)
4037 {
4038 	struct status_block *sblk = sc->status_block;
4039 	struct ifnet *ifp = sc->bce_ifp;
4040 	u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4041 	u32 sw_prod_bseq;
4042 	struct l2_fhdr *l2fhdr;
4043 
4044 	DBRUNIF(1, sc->rx_interrupts++);
4045 
4046 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4047 	for (int i = 0; i < RX_PAGES; i++)
4048 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4049 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4050 
4051 	/* Get the hardware's view of the RX consumer index. */
4052 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4053 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4054 		hw_cons++;
4055 
4056 	/* Get working copies of the driver's view of the RX indices. */
4057 	sw_cons = sc->rx_cons;
4058 	sw_prod = sc->rx_prod;
4059 	sw_prod_bseq = sc->rx_prod_bseq;
4060 
4061 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4062 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4063 		__FUNCTION__, sw_prod, sw_cons,
4064 		sw_prod_bseq);
4065 
4066 	/* Prevent speculative reads from getting ahead of the status block. */
4067 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4068 		BUS_SPACE_BARRIER_READ);
4069 
4070 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4071 		sc->rx_low_watermark = sc->free_rx_bd);
4072 
4073 	/*
4074 	 * Scan through the receive chain as long
4075 	 * as there is work to do.
4076 	 */
4077 	while (sw_cons != hw_cons) {
4078 		struct mbuf *m;
4079 		struct rx_bd *rxbd;
4080 		unsigned int len;
4081 		u32 status;
4082 
4083 		/* Convert the producer/consumer indices to an actual rx_bd index. */
4084 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4085 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4086 
4087 		/* Get the used rx_bd. */
4088 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4089 		sc->free_rx_bd++;
4090 
4091 		DBRUN(BCE_VERBOSE_RECV,
4092 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4093 			bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4094 
4095 #ifdef DEVICE_POLLING
4096 		if (ifp->if_capenable & IFCAP_POLLING) {
4097 			if (sc->bce_rxcycles <= 0)
4098 				break;
4099 			sc->bce_rxcycles--;
4100 		}
4101 #endif
4102 
4103 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4104 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4105 
4106 			/* Validate that this is the last rx_bd. */
4107 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4108 				BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4109 				__FILE__, __LINE__, sw_chain_cons);
4110 				bce_breakpoint(sc));
4111 
4112 			/* DRC - ToDo: If the received packet is small, say less */
4113 			/*             than 128 bytes, allocate a new mbuf here, */
4114 			/*             copy the data to that mbuf, and recycle   */
4115 			/*             the mapped jumbo frame.                   */
4116 
4117 			/* Unmap the mbuf from DMA space. */
4118 			bus_dmamap_sync(sc->rx_mbuf_tag,
4119 			    sc->rx_mbuf_map[sw_chain_cons],
4120 		    	BUS_DMASYNC_POSTREAD);
4121 			bus_dmamap_unload(sc->rx_mbuf_tag,
4122 			    sc->rx_mbuf_map[sw_chain_cons]);
4123 
4124 			/* Remove the mbuf from the driver's chain. */
4125 			m = sc->rx_mbuf_ptr[sw_chain_cons];
4126 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4127 
4128 			/*
4129 			 * Frames received on the NetXteme II are prepended
4130 			 * with the l2_fhdr structure which provides status
4131 			 * information about the received frame (including
4132 			 * VLAN tags and checksum info) and are also
4133 			 * automatically adjusted to align the IP header
4134 			 * (i.e. two null bytes are inserted before the
4135 			 * Ethernet header).
4136 			 */
4137 			l2fhdr = mtod(m, struct l2_fhdr *);
4138 
4139 			len    = l2fhdr->l2_fhdr_pkt_len;
4140 			status = l2fhdr->l2_fhdr_status;
4141 
4142 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4143 				BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4144 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
4145 
4146 			/* Watch for unusual sized frames. */
4147 			DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4148 				BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4149 					"Min(%d), Actual(%d), Max(%d)\n",
4150 					__FILE__, __LINE__, (int) BCE_MIN_MTU,
4151 					len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4152 				bce_dump_mbuf(sc, m);
4153 		 		bce_breakpoint(sc));
4154 
4155 			len -= ETHER_CRC_LEN;
4156 
4157 			/* Check the received frame for errors. */
4158 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4159 				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4160 				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4161 
4162 				ifp->if_ierrors++;
4163 				DBRUNIF(1, sc->l2fhdr_status_errors++);
4164 
4165 				/* Reuse the mbuf for a new frame. */
4166 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4167 
4168 					DBRUNIF(1, bce_breakpoint(sc));
4169 					panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4170 
4171 				}
4172 				goto bce_rx_int_next_rx;
4173 			}
4174 
4175 			/*
4176 			 * Get a new mbuf for the rx_bd.   If no new
4177 			 * mbufs are available then reuse the current mbuf,
4178 			 * log an ierror on the interface, and generate
4179 			 * an error in the system log.
4180 			 */
4181 			if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4182 
4183 				DBRUN(BCE_WARN,
4184 					BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4185 					"new mbuf, incoming frame dropped!\n",
4186 					__FILE__, __LINE__));
4187 
4188 				ifp->if_ierrors++;
4189 
4190 				/* Try and reuse the exisitng mbuf. */
4191 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4192 
4193 					DBRUNIF(1, bce_breakpoint(sc));
4194 					panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4195 
4196 				}
4197 				goto bce_rx_int_next_rx;
4198 			}
4199 
4200 			/* Skip over the l2_fhdr when passing the data up the stack. */
4201 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4202 
4203 			/* Adjust the packet length to match the received data. */
4204 			m->m_pkthdr.len = m->m_len = len;
4205 
4206 			/* Send the packet to the appropriate interface. */
4207 			m->m_pkthdr.rcvif = ifp;
4208 
4209 			DBRUN(BCE_VERBOSE_RECV,
4210 				struct ether_header *eh;
4211 				eh = mtod(m, struct ether_header *);
4212 				BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4213 					__FUNCTION__, eh->ether_dhost, ":",
4214 					eh->ether_shost, ":", htons(eh->ether_type)));
4215 
4216 			/* Validate the checksum if offload enabled. */
4217 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4218 
4219 				/* Check for an IP datagram. */
4220 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4221 					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4222 
4223 					/* Check if the IP checksum is valid. */
4224 					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4225 						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4226 					else
4227 						DBPRINT(sc, BCE_WARN_SEND,
4228 							"%s(): Invalid IP checksum = 0x%04X!\n",
4229 							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4230 				}
4231 
4232 				/* Check for a valid TCP/UDP frame. */
4233 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4234 					L2_FHDR_STATUS_UDP_DATAGRAM)) {
4235 
4236 					/* Check for a good TCP/UDP checksum. */
4237 					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4238 						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4239 						m->m_pkthdr.csum_data =
4240 						    l2fhdr->l2_fhdr_tcp_udp_xsum;
4241 						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4242 							| CSUM_PSEUDO_HDR);
4243 					} else
4244 						DBPRINT(sc, BCE_WARN_SEND,
4245 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4246 							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4247 				}
4248 			}
4249 
4250 
4251 			/*
4252 			 * If we received a packet with a vlan tag,
4253 			 * attach that information to the packet.
4254 			 */
4255 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4256 				DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4257 					__FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4258 #if __FreeBSD_version < 700000
4259 				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4260 #else
4261 				m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4262 				m->m_flags |= M_VLANTAG;
4263 #endif
4264 			}
4265 
4266 			/* Pass the mbuf off to the upper layers. */
4267 			ifp->if_ipackets++;
4268 			DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4269 				__FUNCTION__);
4270 			BCE_UNLOCK(sc);
4271 			(*ifp->if_input)(ifp, m);
4272 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4273 			BCE_LOCK(sc);
4274 
4275 bce_rx_int_next_rx:
4276 			sw_prod = NEXT_RX_BD(sw_prod);
4277 		}
4278 
4279 		sw_cons = NEXT_RX_BD(sw_cons);
4280 
4281 		/* Refresh hw_cons to see if there's new work */
4282 		if (sw_cons == hw_cons) {
4283 			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4284 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4285 				hw_cons++;
4286 		}
4287 
4288 		/* Prevent speculative reads from getting ahead of the status block. */
4289 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4290 			BUS_SPACE_BARRIER_READ);
4291 	}
4292 
4293 	for (int i = 0; i < RX_PAGES; i++)
4294 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4295 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4296 
4297 	sc->rx_cons = sw_cons;
4298 	sc->rx_prod = sw_prod;
4299 	sc->rx_prod_bseq = sw_prod_bseq;
4300 
4301 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4302 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4303 
4304 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4305 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4306 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4307 }
4308 
4309 
4310 /****************************************************************************/
4311 /* Handles transmit completion interrupt events.                            */
4312 /*                                                                          */
4313 /* Returns:                                                                 */
4314 /*   Nothing.                                                               */
4315 /****************************************************************************/
4316 static void
4317 bce_tx_intr(struct bce_softc *sc)
4318 {
4319 	struct status_block *sblk = sc->status_block;
4320 	struct ifnet *ifp = sc->bce_ifp;
4321 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4322 
4323 	BCE_LOCK_ASSERT(sc);
4324 
4325 	DBRUNIF(1, sc->tx_interrupts++);
4326 
4327 	/* Get the hardware's view of the TX consumer index. */
4328 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4329 
4330 	/* Skip to the next entry if this is a chain page pointer. */
4331 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4332 		hw_tx_cons++;
4333 
4334 	sw_tx_cons = sc->tx_cons;
4335 
4336 	/* Prevent speculative reads from getting ahead of the status block. */
4337 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4338 		BUS_SPACE_BARRIER_READ);
4339 
4340 	/* Cycle through any completed TX chain page entries. */
4341 	while (sw_tx_cons != hw_tx_cons) {
4342 #ifdef BCE_DEBUG
4343 		struct tx_bd *txbd = NULL;
4344 #endif
4345 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4346 
4347 		DBPRINT(sc, BCE_INFO_SEND,
4348 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4349 			"sw_tx_chain_cons = 0x%04X\n",
4350 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4351 
4352 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4353 			BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4354 				" 0x%04X > 0x%04X\n",
4355 				__FILE__, __LINE__, sw_tx_chain_cons,
4356 				(int) MAX_TX_BD);
4357 			bce_breakpoint(sc));
4358 
4359 		DBRUNIF(1,
4360 			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4361 				[TX_IDX(sw_tx_chain_cons)]);
4362 
4363 		DBRUNIF((txbd == NULL),
4364 			BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4365 				__FILE__, __LINE__, sw_tx_chain_cons);
4366 			bce_breakpoint(sc));
4367 
4368 		DBRUN(BCE_INFO_SEND,
4369 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4370 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4371 
4372 		/*
4373 		 * Free the associated mbuf. Remember
4374 		 * that only the last tx_bd of a packet
4375 		 * has an mbuf pointer and DMA map.
4376 		 */
4377 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4378 
4379 			/* Validate that this is the last tx_bd. */
4380 			DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4381 				BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4382 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4383 				bce_breakpoint(sc));
4384 
4385 			DBRUN(BCE_INFO_SEND,
4386 				BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4387 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4388 
4389 			/* Unmap the mbuf. */
4390 			bus_dmamap_unload(sc->tx_mbuf_tag,
4391 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4392 
4393 			/* Free the mbuf. */
4394 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4395 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4396 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4397 
4398 			ifp->if_opackets++;
4399 		}
4400 
4401 		sc->used_tx_bd--;
4402 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4403 
4404 		/* Refresh hw_cons to see if there's new work. */
4405 		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4406 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4407 			hw_tx_cons++;
4408 
4409 		/* Prevent speculative reads from getting ahead of the status block. */
4410 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4411 			BUS_SPACE_BARRIER_READ);
4412 	}
4413 
4414 	/* Clear the TX timeout timer. */
4415 	ifp->if_timer = 0;
4416 
4417 	/* Clear the tx hardware queue full flag. */
4418 	if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4419 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4420 			BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4421 				__FUNCTION__, sc->used_tx_bd));
4422 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4423 	}
4424 
4425 	sc->tx_cons = sw_tx_cons;
4426 }
4427 
4428 
4429 /****************************************************************************/
4430 /* Disables interrupt generation.                                           */
4431 /*                                                                          */
4432 /* Returns:                                                                 */
4433 /*   Nothing.                                                               */
4434 /****************************************************************************/
4435 static void
4436 bce_disable_intr(struct bce_softc *sc)
4437 {
4438 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4439 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4440 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4441 }
4442 
4443 
4444 /****************************************************************************/
4445 /* Enables interrupt generation.                                            */
4446 /*                                                                          */
4447 /* Returns:                                                                 */
4448 /*   Nothing.                                                               */
4449 /****************************************************************************/
4450 static void
4451 bce_enable_intr(struct bce_softc *sc)
4452 {
4453 	u32 val;
4454 
4455 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4456 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4457 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4458 
4459 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4460 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4461 
4462 	val = REG_RD(sc, BCE_HC_COMMAND);
4463 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4464 }
4465 
4466 
4467 /****************************************************************************/
4468 /* Handles controller initialization.                                       */
4469 /*                                                                          */
4470 /* Must be called from a locked routine.                                    */
4471 /*                                                                          */
4472 /* Returns:                                                                 */
4473 /*   Nothing.                                                               */
4474 /****************************************************************************/
4475 static void
4476 bce_init_locked(struct bce_softc *sc)
4477 {
4478 	struct ifnet *ifp;
4479 	u32 ether_mtu;
4480 
4481 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4482 
4483 	BCE_LOCK_ASSERT(sc);
4484 
4485 	ifp = sc->bce_ifp;
4486 
4487 	/* Check if the driver is still running and bail out if it is. */
4488 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4489 		goto bce_init_locked_exit;
4490 
4491 	bce_stop(sc);
4492 
4493 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4494 		BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4495 			__FILE__, __LINE__);
4496 		goto bce_init_locked_exit;
4497 	}
4498 
4499 	if (bce_chipinit(sc)) {
4500 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4501 			__FILE__, __LINE__);
4502 		goto bce_init_locked_exit;
4503 	}
4504 
4505 	if (bce_blockinit(sc)) {
4506 		BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4507 			__FILE__, __LINE__);
4508 		goto bce_init_locked_exit;
4509 	}
4510 
4511 	/* Load our MAC address. */
4512 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4513 	bce_set_mac_addr(sc);
4514 
4515 	/* Calculate and program the Ethernet MTU size. */
4516 	ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4517 		ETHER_CRC_LEN;
4518 
4519 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4520 
4521 	/*
4522 	 * Program the mtu, enabling jumbo frame
4523 	 * support if necessary.  Also set the mbuf
4524 	 * allocation count for RX frames.
4525 	 */
4526 	if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4527 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4528 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4529 		sc->mbuf_alloc_size = MJUM9BYTES;
4530 	} else {
4531 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4532 		sc->mbuf_alloc_size = MCLBYTES;
4533 	}
4534 
4535 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4536 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4537 
4538 	DBPRINT(sc, BCE_INFO,
4539 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4540 		"max_frame_size = %d\n",
4541 		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4542 
4543 	/* Program appropriate promiscuous/multicast filtering. */
4544 	bce_set_rx_mode(sc);
4545 
4546 	/* Init RX buffer descriptor chain. */
4547 	bce_init_rx_chain(sc);
4548 
4549 	/* Init TX buffer descriptor chain. */
4550 	bce_init_tx_chain(sc);
4551 
4552 #ifdef DEVICE_POLLING
4553 	/* Disable interrupts if we are polling. */
4554 	if (ifp->if_capenable & IFCAP_POLLING) {
4555 		bce_disable_intr(sc);
4556 
4557 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4558 			(1 << 16) | sc->bce_rx_quick_cons_trip);
4559 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4560 			(1 << 16) | sc->bce_tx_quick_cons_trip);
4561 	} else
4562 #endif
4563 	/* Enable host interrupts. */
4564 	bce_enable_intr(sc);
4565 
4566 	bce_ifmedia_upd(ifp);
4567 
4568 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4569 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4570 
4571 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4572 
4573 bce_init_locked_exit:
4574 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4575 
4576 	return;
4577 }
4578 
4579 static void
4580 bce_mgmt_init_locked(struct bce_softc *sc)
4581 {
4582 	u32 val;
4583 	struct ifnet *ifp;
4584 
4585 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4586 
4587 	BCE_LOCK_ASSERT(sc);
4588 
4589 	ifp = sc->bce_ifp;
4590 
4591 	/* Check if the driver is still running and bail out if it is. */
4592 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4593 		goto bce_mgmt_init_locked_exit;
4594 
4595 	/* Initialize the on-boards CPUs */
4596 	bce_init_cpus(sc);
4597 
4598 	val = (BCM_PAGE_BITS - 8) << 24;
4599 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4600 
4601 	/* Enable all critical blocks in the MAC. */
4602 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4603 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4604 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4605 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4606 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4607 	DELAY(20);
4608 
4609 	bce_ifmedia_upd(ifp);
4610 bce_mgmt_init_locked_exit:
4611 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4612 
4613 	return;
4614 }
4615 
4616 
4617 /****************************************************************************/
4618 /* Handles controller initialization when called from an unlocked routine.  */
4619 /*                                                                          */
4620 /* Returns:                                                                 */
4621 /*   Nothing.                                                               */
4622 /****************************************************************************/
4623 static void
4624 bce_init(void *xsc)
4625 {
4626 	struct bce_softc *sc = xsc;
4627 
4628 	BCE_LOCK(sc);
4629 	bce_init_locked(sc);
4630 	BCE_UNLOCK(sc);
4631 }
4632 
4633 
4634 /****************************************************************************/
4635 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4636 /* memory visible to the controller.                                        */
4637 /*                                                                          */
4638 /* Returns:                                                                 */
4639 /*   0 for success, positive value for failure.                             */
4640 /****************************************************************************/
4641 static int
4642 bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod,
4643 	u16 *chain_prod, u32 *prod_bseq)
4644 {
4645 	u32 vlan_tag_flags = 0;
4646 	struct bce_dmamap_arg map_arg;
4647 	bus_dmamap_t map;
4648 	int i, error, rc = 0;
4649 
4650 	/* Transfer any checksum offload flags to the bd. */
4651 	if (m_head->m_pkthdr.csum_flags) {
4652 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4653 			vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4654 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4655 			vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4656 	}
4657 
4658 	/* Transfer any VLAN tags to the bd. */
4659 	if (m_head->m_flags & M_VLANTAG)
4660 		vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4661 			(m_head->m_pkthdr.ether_vtag << 16));
4662 
4663 	/* Map the mbuf into DMAable memory. */
4664 	map = sc->tx_mbuf_map[*chain_prod];
4665 	map_arg.sc         = sc;
4666 	map_arg.prod       = *prod;
4667 	map_arg.chain_prod = *chain_prod;
4668 	map_arg.prod_bseq  = *prod_bseq;
4669 	map_arg.tx_flags   = vlan_tag_flags;
4670 	map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd -
4671 		BCE_TX_SLACK_SPACE;
4672 
4673 	KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4674 
4675 	for (i = 0; i < TX_PAGES; i++)
4676 		map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4677 
4678 	/* Map the mbuf into our DMA address space. */
4679 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4680 	    bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4681 
4682 	if (error || map_arg.maxsegs == 0) {
4683 
4684             /* Try to defrag the mbuf if there are too many segments. */
4685             if (error == EFBIG && map_arg.maxsegs != 0) {
4686                 struct mbuf *m0;
4687 
4688 	        DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4689                     __FUNCTION__, map_arg.maxsegs);
4690 
4691                 m0 = m_defrag(m_head, M_DONTWAIT);
4692                 if (m0 != NULL) {
4693                     m_head = m0;
4694                     error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag,
4695                         map, m_head, bce_dma_map_tx_desc, &map_arg,
4696                         BUS_DMA_NOWAIT);
4697                 }
4698             }
4699 
4700             /* Still getting an error after a defrag. */
4701             if (error) {
4702                 BCE_PRINTF(sc,
4703                     "%s(%d): Error mapping mbuf into TX chain!\n",
4704                     __FILE__, __LINE__);
4705                 rc = ENOBUFS;
4706                 goto bce_tx_encap_exit;
4707             }
4708 
4709 	}
4710 
4711 	/*
4712 	 * Ensure that the map for this transmission
4713 	 * is placed at the array index of the last
4714 	 * descriptor in this chain.  This is done
4715 	 * because a single map is used for all
4716 	 * segments of the mbuf and we don't want to
4717 	 * delete the map before all of the segments
4718 	 * have been freed.
4719 	 */
4720 	sc->tx_mbuf_map[*chain_prod] =
4721 		sc->tx_mbuf_map[map_arg.chain_prod];
4722 	sc->tx_mbuf_map[map_arg.chain_prod] = map;
4723 	sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4724 	sc->used_tx_bd += map_arg.maxsegs;
4725 
4726 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4727 		sc->tx_hi_watermark = sc->used_tx_bd);
4728 
4729 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4730 
4731 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, *chain_prod,
4732 		map_arg.maxsegs));
4733 
4734 	/* prod still points the last used tx_bd at this point. */
4735 	*prod       = map_arg.prod;
4736 	*chain_prod = map_arg.chain_prod;
4737 	*prod_bseq  = map_arg.prod_bseq;
4738 
4739 bce_tx_encap_exit:
4740 
4741 	return(rc);
4742 }
4743 
4744 
4745 /****************************************************************************/
4746 /* Main transmit routine when called from another routine with a lock.      */
4747 /*                                                                          */
4748 /* Returns:                                                                 */
4749 /*   Nothing.                                                               */
4750 /****************************************************************************/
4751 static void
4752 bce_start_locked(struct ifnet *ifp)
4753 {
4754 	struct bce_softc *sc = ifp->if_softc;
4755 	struct mbuf *m_head = NULL;
4756 	int count = 0;
4757 	u16 tx_prod, tx_chain_prod;
4758 	u32	tx_prod_bseq;
4759 
4760 	/* If there's no link or the transmit queue is empty then just exit. */
4761 	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4762 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4763 			__FUNCTION__);
4764 		goto bce_start_locked_exit;
4765 	}
4766 
4767 	/* prod points to the next free tx_bd. */
4768 	tx_prod = sc->tx_prod;
4769 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4770 	tx_prod_bseq = sc->tx_prod_bseq;
4771 
4772 	DBPRINT(sc, BCE_INFO_SEND,
4773 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4774 		"tx_prod_bseq = 0x%08X\n",
4775 		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4776 
4777 	/* Keep adding entries while there is space in the ring. */
4778 	while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4779 
4780 		/* Check for any frames to send. */
4781 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4782 		if (m_head == NULL)
4783 			break;
4784 
4785 		/*
4786 		 * Pack the data into the transmit ring. If we
4787 		 * don't have room, place the mbuf back at the
4788 		 * head of the queue and set the OACTIVE flag
4789 		 * to wait for the NIC to drain the chain.
4790 		 */
4791 		if (bce_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4792 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4793 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4794 			DBPRINT(sc, BCE_INFO_SEND,
4795 				"TX chain is closed for business! Total tx_bd used = %d\n",
4796 				sc->used_tx_bd);
4797 			break;
4798 		}
4799 
4800 		count++;
4801 
4802 		/* Send a copy of the frame to any BPF listeners. */
4803 		BPF_MTAP(ifp, m_head);
4804 
4805 		tx_prod = NEXT_TX_BD(tx_prod);
4806 		tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4807 	}
4808 
4809 	if (count == 0) {
4810 		/* no packets were dequeued */
4811 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4812 			__FUNCTION__);
4813 		goto bce_start_locked_exit;
4814 	}
4815 
4816 	/* Update the driver's counters. */
4817 	sc->tx_prod      = tx_prod;
4818 	sc->tx_prod_bseq = tx_prod_bseq;
4819 
4820 	DBPRINT(sc, BCE_INFO_SEND,
4821 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4822 		"tx_prod_bseq = 0x%08X\n",
4823 		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4824 
4825 	/* Start the transmit. */
4826 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4827 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4828 
4829 	/* Set the tx timeout. */
4830 	ifp->if_timer = BCE_TX_TIMEOUT;
4831 
4832 bce_start_locked_exit:
4833 	return;
4834 }
4835 
4836 
4837 /****************************************************************************/
4838 /* Main transmit routine when called from another routine without a lock.   */
4839 /*                                                                          */
4840 /* Returns:                                                                 */
4841 /*   Nothing.                                                               */
4842 /****************************************************************************/
4843 static void
4844 bce_start(struct ifnet *ifp)
4845 {
4846 	struct bce_softc *sc = ifp->if_softc;
4847 
4848 	BCE_LOCK(sc);
4849 	bce_start_locked(ifp);
4850 	BCE_UNLOCK(sc);
4851 }
4852 
4853 
4854 /****************************************************************************/
4855 /* Handles any IOCTL calls from the operating system.                       */
4856 /*                                                                          */
4857 /* Returns:                                                                 */
4858 /*   0 for success, positive value for failure.                             */
4859 /****************************************************************************/
4860 static int
4861 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4862 {
4863 	struct bce_softc *sc = ifp->if_softc;
4864 	struct ifreq *ifr = (struct ifreq *) data;
4865 	struct mii_data *mii;
4866 	int mask, error = 0;
4867 
4868 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4869 
4870 	switch(command) {
4871 
4872 		/* Set the MTU. */
4873 		case SIOCSIFMTU:
4874 			/* Check that the MTU setting is supported. */
4875 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4876 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4877 				error = EINVAL;
4878 				break;
4879 			}
4880 
4881 			DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4882 
4883 			BCE_LOCK(sc);
4884 			ifp->if_mtu = ifr->ifr_mtu;
4885 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4886 			bce_init_locked(sc);
4887 			BCE_UNLOCK(sc);
4888 			break;
4889 
4890 		/* Set interface. */
4891 		case SIOCSIFFLAGS:
4892 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4893 
4894 			BCE_LOCK(sc);
4895 
4896 			/* Check if the interface is up. */
4897 			if (ifp->if_flags & IFF_UP) {
4898 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4899 					/* Change the promiscuous/multicast flags as necessary. */
4900 					bce_set_rx_mode(sc);
4901 				} else {
4902 					/* Start the HW */
4903 					bce_init_locked(sc);
4904 				}
4905 			} else {
4906 				/* The interface is down.  Check if the driver is running. */
4907 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4908 					bce_stop(sc);
4909 				}
4910 			}
4911 
4912 			BCE_UNLOCK(sc);
4913 			error = 0;
4914 
4915 			break;
4916 
4917 		/* Add/Delete multicast address */
4918 		case SIOCADDMULTI:
4919 		case SIOCDELMULTI:
4920 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4921 
4922 			BCE_LOCK(sc);
4923 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4924 				bce_set_rx_mode(sc);
4925 				error = 0;
4926 			}
4927 			BCE_UNLOCK(sc);
4928 
4929 			break;
4930 
4931 		/* Set/Get Interface media */
4932 		case SIOCSIFMEDIA:
4933 		case SIOCGIFMEDIA:
4934 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4935 
4936 			DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4937 				sc->bce_phy_flags);
4938 
4939 			if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4940 				DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4941 
4942 				error = ifmedia_ioctl(ifp, ifr,
4943 				    &sc->bce_ifmedia, command);
4944 			} else {
4945 				DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4946 				mii = device_get_softc(sc->bce_miibus);
4947 				error = ifmedia_ioctl(ifp, ifr,
4948 				    &mii->mii_media, command);
4949 			}
4950 			break;
4951 
4952 		/* Set interface capability */
4953 		case SIOCSIFCAP:
4954 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4955 			DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4956 
4957 #ifdef DEVICE_POLLING
4958 			if (mask & IFCAP_POLLING) {
4959 				if (ifr->ifr_reqcap & IFCAP_POLLING) {
4960 
4961 					/* Setup the poll routine to call. */
4962 					error = ether_poll_register(bce_poll, ifp);
4963 					if (error) {
4964 						BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4965 							__FILE__, __LINE__);
4966 						goto bce_ioctl_exit;
4967 					}
4968 
4969 					/* Clear the interrupt. */
4970 					BCE_LOCK(sc);
4971 					bce_disable_intr(sc);
4972 
4973 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4974 						(1 << 16) | sc->bce_rx_quick_cons_trip);
4975 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4976 						(1 << 16) | sc->bce_tx_quick_cons_trip);
4977 
4978 					ifp->if_capenable |= IFCAP_POLLING;
4979 					BCE_UNLOCK(sc);
4980 				} else {
4981 					/* Clear the poll routine. */
4982 					error = ether_poll_deregister(ifp);
4983 
4984 					/* Enable interrupt even in error case */
4985 					BCE_LOCK(sc);
4986 					bce_enable_intr(sc);
4987 
4988 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4989 						(sc->bce_tx_quick_cons_trip_int << 16) |
4990 						sc->bce_tx_quick_cons_trip);
4991 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4992 						(sc->bce_rx_quick_cons_trip_int << 16) |
4993 						sc->bce_rx_quick_cons_trip);
4994 
4995 					ifp->if_capenable &= ~IFCAP_POLLING;
4996 					BCE_UNLOCK(sc);
4997 				}
4998 			}
4999 #endif /*DEVICE_POLLING */
5000 
5001 			/* Toggle the TX checksum capabilites enable flag. */
5002 			if (mask & IFCAP_TXCSUM) {
5003 				ifp->if_capenable ^= IFCAP_TXCSUM;
5004 				if (IFCAP_TXCSUM & ifp->if_capenable)
5005 					ifp->if_hwassist = BCE_IF_HWASSIST;
5006 				else
5007 					ifp->if_hwassist = 0;
5008 			}
5009 
5010 			/* Toggle the RX checksum capabilities enable flag. */
5011 			if (mask & IFCAP_RXCSUM) {
5012 				ifp->if_capenable ^= IFCAP_RXCSUM;
5013 				if (IFCAP_RXCSUM & ifp->if_capenable)
5014 					ifp->if_hwassist = BCE_IF_HWASSIST;
5015 				else
5016 					ifp->if_hwassist = 0;
5017 			}
5018 
5019 			/* Toggle VLAN_MTU capabilities enable flag. */
5020 			if (mask & IFCAP_VLAN_MTU) {
5021 				BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
5022 					__FILE__, __LINE__);
5023 			}
5024 
5025 			/* Toggle VLANHWTAG capabilities enabled flag. */
5026 			if (mask & IFCAP_VLAN_HWTAGGING) {
5027 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5028 					BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
5029 						"management firmware (ASF/IPMI/UMP) is running!\n",
5030 						__FILE__, __LINE__);
5031 				else
5032 					BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5033 						__FILE__, __LINE__);
5034 			}
5035 
5036 			break;
5037 		default:
5038 			DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
5039 				(u32) command);
5040 
5041 			/* We don't know how to handle the IOCTL, pass it on. */
5042 			error = ether_ioctl(ifp, command, data);
5043 			break;
5044 	}
5045 
5046 #ifdef DEVICE_POLLING
5047 bce_ioctl_exit:
5048 #endif
5049 
5050 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5051 
5052 	return(error);
5053 }
5054 
5055 
5056 /****************************************************************************/
5057 /* Transmit timeout handler.                                                */
5058 /*                                                                          */
5059 /* Returns:                                                                 */
5060 /*   Nothing.                                                               */
5061 /****************************************************************************/
5062 static void
5063 bce_watchdog(struct ifnet *ifp)
5064 {
5065 	struct bce_softc *sc = ifp->if_softc;
5066 
5067 	DBRUN(BCE_WARN_SEND,
5068 		bce_dump_driver_state(sc);
5069 		bce_dump_status_block(sc));
5070 
5071 	BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
5072 		__FILE__, __LINE__);
5073 
5074 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5075 
5076 	BCE_LOCK(sc);
5077 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5078 
5079 	bce_init_locked(sc);
5080 	ifp->if_oerrors++;
5081 	BCE_UNLOCK(sc);
5082 
5083 }
5084 
5085 
5086 #ifdef DEVICE_POLLING
5087 static void
5088 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5089 {
5090 	struct bce_softc *sc = ifp->if_softc;
5091 
5092 	BCE_LOCK_ASSERT(sc);
5093 
5094 	sc->bce_rxcycles = count;
5095 
5096 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5097 	    BUS_DMASYNC_POSTWRITE);
5098 
5099 	/* Check for any completed RX frames. */
5100 	if (sc->status_block->status_rx_quick_consumer_index0 !=
5101 		sc->hw_rx_cons)
5102 		bce_rx_intr(sc);
5103 
5104 	/* Check for any completed TX frames. */
5105 	if (sc->status_block->status_tx_quick_consumer_index0 !=
5106 		sc->hw_tx_cons)
5107 		bce_tx_intr(sc);
5108 
5109 	/* Check for new frames to transmit. */
5110 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5111 		bce_start_locked(ifp);
5112 
5113 }
5114 
5115 
5116 static void
5117 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5118 {
5119 	struct bce_softc *sc = ifp->if_softc;
5120 
5121 	BCE_LOCK(sc);
5122 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5123 		bce_poll_locked(ifp, cmd, count);
5124 	BCE_UNLOCK(sc);
5125 }
5126 #endif /* DEVICE_POLLING */
5127 
5128 
5129 #if 0
5130 static inline int
5131 bce_has_work(struct bce_softc *sc)
5132 {
5133 	struct status_block *stat = sc->status_block;
5134 
5135 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5136 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5137 		return 1;
5138 
5139 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5140 	    bp->link_up)
5141 		return 1;
5142 
5143 	return 0;
5144 }
5145 #endif
5146 
5147 
5148 /*
5149  * Interrupt handler.
5150  */
5151 /****************************************************************************/
5152 /* Main interrupt entry point.  Verifies that the controller generated the  */
5153 /* interrupt and then calls a separate routine for handle the various       */
5154 /* interrupt causes (PHY, TX, RX).                                          */
5155 /*                                                                          */
5156 /* Returns:                                                                 */
5157 /*   0 for success, positive value for failure.                             */
5158 /****************************************************************************/
5159 static void
5160 bce_intr(void *xsc)
5161 {
5162 	struct bce_softc *sc;
5163 	struct ifnet *ifp;
5164 	u32 status_attn_bits;
5165 
5166 	sc = xsc;
5167 	ifp = sc->bce_ifp;
5168 
5169 	BCE_LOCK(sc);
5170 
5171 	DBRUNIF(1, sc->interrupts_generated++);
5172 
5173 #ifdef DEVICE_POLLING
5174 	if (ifp->if_capenable & IFCAP_POLLING) {
5175 		DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5176 		goto bce_intr_exit;
5177 	}
5178 #endif
5179 
5180 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5181 	    BUS_DMASYNC_POSTWRITE);
5182 
5183 	/*
5184 	 * If the hardware status block index
5185 	 * matches the last value read by the
5186 	 * driver and we haven't asserted our
5187 	 * interrupt then there's nothing to do.
5188 	 */
5189 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5190 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5191 		goto bce_intr_exit;
5192 
5193 	/* Ack the interrupt and stop others from occuring. */
5194 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5195 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5196 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5197 
5198 	/* Keep processing data as long as there is work to do. */
5199 	for (;;) {
5200 
5201 		status_attn_bits = sc->status_block->status_attn_bits;
5202 
5203 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5204 			BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5205 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5206 
5207 		/* Was it a link change interrupt? */
5208 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5209 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5210 			bce_phy_intr(sc);
5211 
5212 		/* If any other attention is asserted then the chip is toast. */
5213 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5214 			(sc->status_block->status_attn_bits_ack &
5215 			~STATUS_ATTN_BITS_LINK_STATE))) {
5216 
5217 			DBRUN(1, sc->unexpected_attentions++);
5218 
5219 			BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5220 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5221 
5222 			DBRUN(BCE_FATAL,
5223 				if (bce_debug_unexpected_attention == 0)
5224 					bce_breakpoint(sc));
5225 
5226 			bce_init_locked(sc);
5227 			goto bce_intr_exit;
5228 		}
5229 
5230 		/* Check for any completed RX frames. */
5231 		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5232 			bce_rx_intr(sc);
5233 
5234 		/* Check for any completed TX frames. */
5235 		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5236 			bce_tx_intr(sc);
5237 
5238 		/* Save the status block index value for use during the next interrupt. */
5239 		sc->last_status_idx = sc->status_block->status_idx;
5240 
5241 		/* Prevent speculative reads from getting ahead of the status block. */
5242 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5243 			BUS_SPACE_BARRIER_READ);
5244 
5245 		/* If there's no work left then exit the interrupt service routine. */
5246 		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5247 	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5248 			break;
5249 
5250 	}
5251 
5252 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5253 	    BUS_DMASYNC_PREWRITE);
5254 
5255 	/* Re-enable interrupts. */
5256 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5257 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5258 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5259 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5260 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5261 
5262 	/* Handle any frames that arrived while handling the interrupt. */
5263 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5264 		bce_start_locked(ifp);
5265 
5266 bce_intr_exit:
5267 	BCE_UNLOCK(sc);
5268 }
5269 
5270 
5271 /****************************************************************************/
5272 /* Programs the various packet receive modes (broadcast and multicast).     */
5273 /*                                                                          */
5274 /* Returns:                                                                 */
5275 /*   Nothing.                                                               */
5276 /****************************************************************************/
5277 static void
5278 bce_set_rx_mode(struct bce_softc *sc)
5279 {
5280 	struct ifnet *ifp;
5281 	struct ifmultiaddr *ifma;
5282 	u32 hashes[4] = { 0, 0, 0, 0 };
5283 	u32 rx_mode, sort_mode;
5284 	int h, i;
5285 
5286 	BCE_LOCK_ASSERT(sc);
5287 
5288 	ifp = sc->bce_ifp;
5289 
5290 	/* Initialize receive mode default settings. */
5291 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5292 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5293 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5294 
5295 	/*
5296 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5297 	 * be enbled.
5298 	 */
5299 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5300 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5301 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5302 
5303 	/*
5304 	 * Check for promiscuous, all multicast, or selected
5305 	 * multicast address filtering.
5306 	 */
5307 	if (ifp->if_flags & IFF_PROMISC) {
5308 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5309 
5310 		/* Enable promiscuous mode. */
5311 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5312 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5313 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5314 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5315 
5316 		/* Enable all multicast addresses. */
5317 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5318 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5319        	}
5320 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5321 	} else {
5322 		/* Accept one or more multicast(s). */
5323 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5324 
5325 		IF_ADDR_LOCK(ifp);
5326 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5327 			if (ifma->ifma_addr->sa_family != AF_LINK)
5328 				continue;
5329 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5330 		    	ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5331 			hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5332 		}
5333 		IF_ADDR_UNLOCK(ifp);
5334 
5335 		for (i = 0; i < 4; i++)
5336 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5337 
5338 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5339 	}
5340 
5341 	/* Only make changes if the recive mode has actually changed. */
5342 	if (rx_mode != sc->rx_mode) {
5343 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5344 			rx_mode);
5345 
5346 		sc->rx_mode = rx_mode;
5347 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5348 	}
5349 
5350 	/* Disable and clear the exisitng sort before enabling a new sort. */
5351 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5352 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5353 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5354 }
5355 
5356 
5357 /****************************************************************************/
5358 /* Called periodically to updates statistics from the controllers           */
5359 /* statistics block.                                                        */
5360 /*                                                                          */
5361 /* Returns:                                                                 */
5362 /*   Nothing.                                                               */
5363 /****************************************************************************/
5364 static void
5365 bce_stats_update(struct bce_softc *sc)
5366 {
5367 	struct ifnet *ifp;
5368 	struct statistics_block *stats;
5369 
5370 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5371 
5372 	ifp = sc->bce_ifp;
5373 
5374 	stats = (struct statistics_block *) sc->stats_block;
5375 
5376 	/*
5377 	 * Update the interface statistics from the
5378 	 * hardware statistics.
5379 	 */
5380 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5381 
5382 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5383 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5384 					  (u_long) stats->stat_IfInMBUFDiscards +
5385 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5386 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5387 
5388 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5389 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5390 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5391 
5392 	/*
5393 	 * Certain controllers don't report
5394 	 * carrier sense errors correctly.
5395 	 * See errata E11_5708CA0_1165.
5396 	 */
5397 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5398 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5399 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5400 
5401 	/*
5402 	 * Update the sysctl statistics from the
5403 	 * hardware statistics.
5404 	 */
5405 	sc->stat_IfHCInOctets =
5406 		((u64) stats->stat_IfHCInOctets_hi << 32) +
5407 		 (u64) stats->stat_IfHCInOctets_lo;
5408 
5409 	sc->stat_IfHCInBadOctets =
5410 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5411 		 (u64) stats->stat_IfHCInBadOctets_lo;
5412 
5413 	sc->stat_IfHCOutOctets =
5414 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
5415 		 (u64) stats->stat_IfHCOutOctets_lo;
5416 
5417 	sc->stat_IfHCOutBadOctets =
5418 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5419 		 (u64) stats->stat_IfHCOutBadOctets_lo;
5420 
5421 	sc->stat_IfHCInUcastPkts =
5422 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5423 		 (u64) stats->stat_IfHCInUcastPkts_lo;
5424 
5425 	sc->stat_IfHCInMulticastPkts =
5426 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5427 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
5428 
5429 	sc->stat_IfHCInBroadcastPkts =
5430 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5431 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5432 
5433 	sc->stat_IfHCOutUcastPkts =
5434 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5435 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
5436 
5437 	sc->stat_IfHCOutMulticastPkts =
5438 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5439 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5440 
5441 	sc->stat_IfHCOutBroadcastPkts =
5442 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5443 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5444 
5445 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5446 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5447 
5448 	sc->stat_Dot3StatsCarrierSenseErrors =
5449 		stats->stat_Dot3StatsCarrierSenseErrors;
5450 
5451 	sc->stat_Dot3StatsFCSErrors =
5452 		stats->stat_Dot3StatsFCSErrors;
5453 
5454 	sc->stat_Dot3StatsAlignmentErrors =
5455 		stats->stat_Dot3StatsAlignmentErrors;
5456 
5457 	sc->stat_Dot3StatsSingleCollisionFrames =
5458 		stats->stat_Dot3StatsSingleCollisionFrames;
5459 
5460 	sc->stat_Dot3StatsMultipleCollisionFrames =
5461 		stats->stat_Dot3StatsMultipleCollisionFrames;
5462 
5463 	sc->stat_Dot3StatsDeferredTransmissions =
5464 		stats->stat_Dot3StatsDeferredTransmissions;
5465 
5466 	sc->stat_Dot3StatsExcessiveCollisions =
5467 		stats->stat_Dot3StatsExcessiveCollisions;
5468 
5469 	sc->stat_Dot3StatsLateCollisions =
5470 		stats->stat_Dot3StatsLateCollisions;
5471 
5472 	sc->stat_EtherStatsCollisions =
5473 		stats->stat_EtherStatsCollisions;
5474 
5475 	sc->stat_EtherStatsFragments =
5476 		stats->stat_EtherStatsFragments;
5477 
5478 	sc->stat_EtherStatsJabbers =
5479 		stats->stat_EtherStatsJabbers;
5480 
5481 	sc->stat_EtherStatsUndersizePkts =
5482 		stats->stat_EtherStatsUndersizePkts;
5483 
5484 	sc->stat_EtherStatsOverrsizePkts =
5485 		stats->stat_EtherStatsOverrsizePkts;
5486 
5487 	sc->stat_EtherStatsPktsRx64Octets =
5488 		stats->stat_EtherStatsPktsRx64Octets;
5489 
5490 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5491 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5492 
5493 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5494 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5495 
5496 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5497 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5498 
5499 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5500 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5501 
5502 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5503 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5504 
5505 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5506 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5507 
5508 	sc->stat_EtherStatsPktsTx64Octets =
5509 		stats->stat_EtherStatsPktsTx64Octets;
5510 
5511 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5512 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5513 
5514 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5515 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5516 
5517 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5518 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5519 
5520 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5521 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5522 
5523 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5524 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5525 
5526 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5527 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5528 
5529 	sc->stat_XonPauseFramesReceived =
5530 		stats->stat_XonPauseFramesReceived;
5531 
5532 	sc->stat_XoffPauseFramesReceived =
5533 		stats->stat_XoffPauseFramesReceived;
5534 
5535 	sc->stat_OutXonSent =
5536 		stats->stat_OutXonSent;
5537 
5538 	sc->stat_OutXoffSent =
5539 		stats->stat_OutXoffSent;
5540 
5541 	sc->stat_FlowControlDone =
5542 		stats->stat_FlowControlDone;
5543 
5544 	sc->stat_MacControlFramesReceived =
5545 		stats->stat_MacControlFramesReceived;
5546 
5547 	sc->stat_XoffStateEntered =
5548 		stats->stat_XoffStateEntered;
5549 
5550 	sc->stat_IfInFramesL2FilterDiscards =
5551 		stats->stat_IfInFramesL2FilterDiscards;
5552 
5553 	sc->stat_IfInRuleCheckerDiscards =
5554 		stats->stat_IfInRuleCheckerDiscards;
5555 
5556 	sc->stat_IfInFTQDiscards =
5557 		stats->stat_IfInFTQDiscards;
5558 
5559 	sc->stat_IfInMBUFDiscards =
5560 		stats->stat_IfInMBUFDiscards;
5561 
5562 	sc->stat_IfInRuleCheckerP4Hit =
5563 		stats->stat_IfInRuleCheckerP4Hit;
5564 
5565 	sc->stat_CatchupInRuleCheckerDiscards =
5566 		stats->stat_CatchupInRuleCheckerDiscards;
5567 
5568 	sc->stat_CatchupInFTQDiscards =
5569 		stats->stat_CatchupInFTQDiscards;
5570 
5571 	sc->stat_CatchupInMBUFDiscards =
5572 		stats->stat_CatchupInMBUFDiscards;
5573 
5574 	sc->stat_CatchupInRuleCheckerP4Hit =
5575 		stats->stat_CatchupInRuleCheckerP4Hit;
5576 
5577 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5578 }
5579 
5580 
5581 static void
5582 bce_tick_locked(struct bce_softc *sc)
5583 {
5584 	struct mii_data *mii = NULL;
5585 	struct ifnet *ifp;
5586 	u32 msg;
5587 
5588 	ifp = sc->bce_ifp;
5589 
5590 	BCE_LOCK_ASSERT(sc);
5591 
5592 	/* Tell the firmware that the driver is still running. */
5593 #ifdef BCE_DEBUG
5594 	msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5595 #else
5596 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5597 #endif
5598 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5599 
5600 	/* Update the statistics from the hardware statistics block. */
5601 	bce_stats_update(sc);
5602 
5603 	/* Schedule the next tick. */
5604 	callout_reset(
5605 		&sc->bce_stat_ch,		/* callout */
5606 		hz, 					/* ticks */
5607 		bce_tick, 				/* function */
5608 		sc);					/* function argument */
5609 
5610 	/* If link is up already up then we're done. */
5611 	if (sc->bce_link)
5612 		goto bce_tick_locked_exit;
5613 
5614 	/* DRC - ToDo: Add SerDes support and check SerDes link here. */
5615 
5616 	mii = device_get_softc(sc->bce_miibus);
5617 	mii_tick(mii);
5618 
5619 	/* Check if the link has come up. */
5620 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5621 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5622 		sc->bce_link++;
5623 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5624 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5625 		    bootverbose)
5626 			BCE_PRINTF(sc, "Gigabit link up\n");
5627 		/* Now that link is up, handle any outstanding TX traffic. */
5628 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5629 			bce_start_locked(ifp);
5630 	}
5631 
5632 bce_tick_locked_exit:
5633 	return;
5634 }
5635 
5636 
5637 static void
5638 bce_tick(void *xsc)
5639 {
5640 	struct bce_softc *sc;
5641 
5642 	sc = xsc;
5643 
5644 	BCE_LOCK(sc);
5645 	bce_tick_locked(sc);
5646 	BCE_UNLOCK(sc);
5647 }
5648 
5649 
5650 #ifdef BCE_DEBUG
5651 /****************************************************************************/
5652 /* Allows the driver state to be dumped through the sysctl interface.       */
5653 /*                                                                          */
5654 /* Returns:                                                                 */
5655 /*   0 for success, positive value for failure.                             */
5656 /****************************************************************************/
5657 static int
5658 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5659 {
5660         int error;
5661         int result;
5662         struct bce_softc *sc;
5663 
5664         result = -1;
5665         error = sysctl_handle_int(oidp, &result, 0, req);
5666 
5667         if (error || !req->newptr)
5668                 return (error);
5669 
5670         if (result == 1) {
5671                 sc = (struct bce_softc *)arg1;
5672                 bce_dump_driver_state(sc);
5673         }
5674 
5675         return error;
5676 }
5677 
5678 
5679 /****************************************************************************/
5680 /* Allows the hardware state to be dumped through the sysctl interface.     */
5681 /*                                                                          */
5682 /* Returns:                                                                 */
5683 /*   0 for success, positive value for failure.                             */
5684 /****************************************************************************/
5685 static int
5686 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5687 {
5688         int error;
5689         int result;
5690         struct bce_softc *sc;
5691 
5692         result = -1;
5693         error = sysctl_handle_int(oidp, &result, 0, req);
5694 
5695         if (error || !req->newptr)
5696                 return (error);
5697 
5698         if (result == 1) {
5699                 sc = (struct bce_softc *)arg1;
5700                 bce_dump_hw_state(sc);
5701         }
5702 
5703         return error;
5704 }
5705 
5706 
5707 /****************************************************************************/
5708 /*                                                                          */
5709 /*                                                                          */
5710 /* Returns:                                                                 */
5711 /*   0 for success, positive value for failure.                             */
5712 /****************************************************************************/
5713 static int
5714 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5715 {
5716         int error;
5717         int result;
5718         struct bce_softc *sc;
5719 
5720         result = -1;
5721         error = sysctl_handle_int(oidp, &result, 0, req);
5722 
5723         if (error || !req->newptr)
5724                 return (error);
5725 
5726         if (result == 1) {
5727                 sc = (struct bce_softc *)arg1;
5728                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5729         }
5730 
5731         return error;
5732 }
5733 
5734 
5735 /****************************************************************************/
5736 /*                                                                          */
5737 /*                                                                          */
5738 /* Returns:                                                                 */
5739 /*   0 for success, positive value for failure.                             */
5740 /****************************************************************************/
5741 static int
5742 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5743 {
5744         int error;
5745         int result;
5746         struct bce_softc *sc;
5747 
5748         result = -1;
5749         error = sysctl_handle_int(oidp, &result, 0, req);
5750 
5751         if (error || !req->newptr)
5752                 return (error);
5753 
5754         if (result == 1) {
5755                 sc = (struct bce_softc *)arg1;
5756                 bce_breakpoint(sc);
5757         }
5758 
5759         return error;
5760 }
5761 #endif
5762 
5763 
5764 /****************************************************************************/
5765 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5766 /*                                                                          */
5767 /* Returns:                                                                 */
5768 /*   0 for success, positive value for failure.                             */
5769 /****************************************************************************/
5770 static void
5771 bce_add_sysctls(struct bce_softc *sc)
5772 {
5773 	struct sysctl_ctx_list *ctx;
5774 	struct sysctl_oid_list *children;
5775 
5776 	ctx = device_get_sysctl_ctx(sc->bce_dev);
5777 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5778 
5779 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5780 		"driver_version",
5781 		CTLFLAG_RD, &bce_driver_version,
5782 		0, "bce driver version");
5783 
5784 #ifdef BCE_DEBUG
5785 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5786 		"rx_low_watermark",
5787 		CTLFLAG_RD, &sc->rx_low_watermark,
5788 		0, "Lowest level of free rx_bd's");
5789 
5790 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5791 		"tx_hi_watermark",
5792 		CTLFLAG_RD, &sc->tx_hi_watermark,
5793 		0, "Highest level of used tx_bd's");
5794 
5795 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5796 		"l2fhdr_status_errors",
5797 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5798 		0, "l2_fhdr status errors");
5799 
5800 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5801 		"unexpected_attentions",
5802 		CTLFLAG_RD, &sc->unexpected_attentions,
5803 		0, "unexpected attentions");
5804 
5805 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5806 		"lost_status_block_updates",
5807 		CTLFLAG_RD, &sc->lost_status_block_updates,
5808 		0, "lost status block updates");
5809 
5810 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5811 		"mbuf_alloc_failed",
5812 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5813 		0, "mbuf cluster allocation failures");
5814 #endif
5815 
5816 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5817 		"stat_IfHcInOctets",
5818 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5819 		"Bytes received");
5820 
5821 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5822 		"stat_IfHCInBadOctets",
5823 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5824 		"Bad bytes received");
5825 
5826 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5827 		"stat_IfHCOutOctets",
5828 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5829 		"Bytes sent");
5830 
5831 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5832 		"stat_IfHCOutBadOctets",
5833 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5834 		"Bad bytes sent");
5835 
5836 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5837 		"stat_IfHCInUcastPkts",
5838 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5839 		"Unicast packets received");
5840 
5841 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5842 		"stat_IfHCInMulticastPkts",
5843 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5844 		"Multicast packets received");
5845 
5846 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5847 		"stat_IfHCInBroadcastPkts",
5848 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5849 		"Broadcast packets received");
5850 
5851 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5852 		"stat_IfHCOutUcastPkts",
5853 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5854 		"Unicast packets sent");
5855 
5856 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5857 		"stat_IfHCOutMulticastPkts",
5858 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5859 		"Multicast packets sent");
5860 
5861 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5862 		"stat_IfHCOutBroadcastPkts",
5863 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5864 		"Broadcast packets sent");
5865 
5866 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5867 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5868 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5869 		0, "Internal MAC transmit errors");
5870 
5871 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5872 		"stat_Dot3StatsCarrierSenseErrors",
5873 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5874 		0, "Carrier sense errors");
5875 
5876 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5877 		"stat_Dot3StatsFCSErrors",
5878 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5879 		0, "Frame check sequence errors");
5880 
5881 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5882 		"stat_Dot3StatsAlignmentErrors",
5883 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5884 		0, "Alignment errors");
5885 
5886 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5887 		"stat_Dot3StatsSingleCollisionFrames",
5888 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5889 		0, "Single Collision Frames");
5890 
5891 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5892 		"stat_Dot3StatsMultipleCollisionFrames",
5893 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5894 		0, "Multiple Collision Frames");
5895 
5896 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5897 		"stat_Dot3StatsDeferredTransmissions",
5898 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5899 		0, "Deferred Transmissions");
5900 
5901 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5902 		"stat_Dot3StatsExcessiveCollisions",
5903 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5904 		0, "Excessive Collisions");
5905 
5906 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5907 		"stat_Dot3StatsLateCollisions",
5908 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5909 		0, "Late Collisions");
5910 
5911 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5912 		"stat_EtherStatsCollisions",
5913 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5914 		0, "Collisions");
5915 
5916 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5917 		"stat_EtherStatsFragments",
5918 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5919 		0, "Fragments");
5920 
5921 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5922 		"stat_EtherStatsJabbers",
5923 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5924 		0, "Jabbers");
5925 
5926 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5927 		"stat_EtherStatsUndersizePkts",
5928 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5929 		0, "Undersize packets");
5930 
5931 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5932 		"stat_EtherStatsOverrsizePkts",
5933 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5934 		0, "stat_EtherStatsOverrsizePkts");
5935 
5936 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5937 		"stat_EtherStatsPktsRx64Octets",
5938 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5939 		0, "Bytes received in 64 byte packets");
5940 
5941 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5942 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5943 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5944 		0, "Bytes received in 65 to 127 byte packets");
5945 
5946 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5947 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5948 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5949 		0, "Bytes received in 128 to 255 byte packets");
5950 
5951 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5952 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5953 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5954 		0, "Bytes received in 256 to 511 byte packets");
5955 
5956 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5957 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5958 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5959 		0, "Bytes received in 512 to 1023 byte packets");
5960 
5961 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5962 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5963 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5964 		0, "Bytes received in 1024 t0 1522 byte packets");
5965 
5966 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5967 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5968 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5969 		0, "Bytes received in 1523 to 9022 byte packets");
5970 
5971 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5972 		"stat_EtherStatsPktsTx64Octets",
5973 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5974 		0, "Bytes sent in 64 byte packets");
5975 
5976 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5977 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5978 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5979 		0, "Bytes sent in 65 to 127 byte packets");
5980 
5981 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5982 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5983 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5984 		0, "Bytes sent in 128 to 255 byte packets");
5985 
5986 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5987 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5988 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5989 		0, "Bytes sent in 256 to 511 byte packets");
5990 
5991 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5992 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5993 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5994 		0, "Bytes sent in 512 to 1023 byte packets");
5995 
5996 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5997 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5998 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5999 		0, "Bytes sent in 1024 to 1522 byte packets");
6000 
6001 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6002 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6003 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6004 		0, "Bytes sent in 1523 to 9022 byte packets");
6005 
6006 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6007 		"stat_XonPauseFramesReceived",
6008 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6009 		0, "XON pause frames receved");
6010 
6011 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6012 		"stat_XoffPauseFramesReceived",
6013 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6014 		0, "XOFF pause frames received");
6015 
6016 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6017 		"stat_OutXonSent",
6018 		CTLFLAG_RD, &sc->stat_OutXonSent,
6019 		0, "XON pause frames sent");
6020 
6021 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6022 		"stat_OutXoffSent",
6023 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6024 		0, "XOFF pause frames sent");
6025 
6026 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6027 		"stat_FlowControlDone",
6028 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6029 		0, "Flow control done");
6030 
6031 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6032 		"stat_MacControlFramesReceived",
6033 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6034 		0, "MAC control frames received");
6035 
6036 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6037 		"stat_XoffStateEntered",
6038 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6039 		0, "XOFF state entered");
6040 
6041 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6042 		"stat_IfInFramesL2FilterDiscards",
6043 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6044 		0, "Received L2 packets discarded");
6045 
6046 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6047 		"stat_IfInRuleCheckerDiscards",
6048 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6049 		0, "Received packets discarded by rule");
6050 
6051 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6052 		"stat_IfInFTQDiscards",
6053 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6054 		0, "Received packet FTQ discards");
6055 
6056 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6057 		"stat_IfInMBUFDiscards",
6058 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6059 		0, "Received packets discarded due to lack of controller buffer memory");
6060 
6061 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6062 		"stat_IfInRuleCheckerP4Hit",
6063 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6064 		0, "Received packets rule checker hits");
6065 
6066 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6067 		"stat_CatchupInRuleCheckerDiscards",
6068 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6069 		0, "Received packets discarded in Catchup path");
6070 
6071 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6072 		"stat_CatchupInFTQDiscards",
6073 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6074 		0, "Received packets discarded in FTQ in Catchup path");
6075 
6076 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6077 		"stat_CatchupInMBUFDiscards",
6078 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6079 		0, "Received packets discarded in controller buffer memory in Catchup path");
6080 
6081 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6082 		"stat_CatchupInRuleCheckerP4Hit",
6083 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6084 		0, "Received packets rule checker hits in Catchup path");
6085 
6086 #ifdef BCE_DEBUG
6087 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6088 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6089 		(void *)sc, 0,
6090 		bce_sysctl_driver_state, "I", "Drive state information");
6091 
6092 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6093 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6094 		(void *)sc, 0,
6095 		bce_sysctl_hw_state, "I", "Hardware state information");
6096 
6097 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6098 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6099 		(void *)sc, 0,
6100 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6101 
6102 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6103 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6104 		(void *)sc, 0,
6105 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6106 #endif
6107 
6108 }
6109 
6110 
6111 /****************************************************************************/
6112 /* BCE Debug Routines                                                       */
6113 /****************************************************************************/
6114 #ifdef BCE_DEBUG
6115 
6116 /****************************************************************************/
6117 /* Prints out information about an mbuf.                                    */
6118 /*                                                                          */
6119 /* Returns:                                                                 */
6120 /*   Nothing.                                                               */
6121 /****************************************************************************/
6122 static void
6123 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6124 {
6125 	u32 val_hi, val_lo;
6126 	struct mbuf *mp = m;
6127 
6128 	if (m == NULL) {
6129 		/* Index out of range. */
6130 		printf("mbuf ptr is null!\n");
6131 		return;
6132 	}
6133 
6134 	while (mp) {
6135 		val_hi = BCE_ADDR_HI(mp);
6136 		val_lo = BCE_ADDR_LO(mp);
6137 		BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6138 			   val_hi, val_lo, mp->m_len);
6139 
6140 		if (mp->m_flags & M_EXT)
6141 			printf("M_EXT ");
6142 		if (mp->m_flags & M_PKTHDR)
6143 			printf("M_PKTHDR ");
6144 		printf("\n");
6145 
6146 		if (mp->m_flags & M_EXT) {
6147 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6148 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6149 			BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6150 				val_hi, val_lo, mp->m_ext.ext_size);
6151 		}
6152 
6153 		mp = mp->m_next;
6154 	}
6155 
6156 
6157 }
6158 
6159 
6160 /****************************************************************************/
6161 /* Prints out the mbufs in the TX mbuf chain.                               */
6162 /*                                                                          */
6163 /* Returns:                                                                 */
6164 /*   Nothing.                                                               */
6165 /****************************************************************************/
6166 static void
6167 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6168 {
6169 	struct mbuf *m;
6170 
6171 	BCE_PRINTF(sc,
6172 		"----------------------------"
6173 		"  tx mbuf data  "
6174 		"----------------------------\n");
6175 
6176 	for (int i = 0; i < count; i++) {
6177 	 	m = sc->tx_mbuf_ptr[chain_prod];
6178 		BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6179 		bce_dump_mbuf(sc, m);
6180 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6181 	}
6182 
6183 	BCE_PRINTF(sc,
6184 		"----------------------------"
6185 		"----------------"
6186 		"----------------------------\n");
6187 }
6188 
6189 
6190 /*
6191  * This routine prints the RX mbuf chain.
6192  */
6193 static void
6194 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6195 {
6196 	struct mbuf *m;
6197 
6198 	BCE_PRINTF(sc,
6199 		"----------------------------"
6200 		"  rx mbuf data  "
6201 		"----------------------------\n");
6202 
6203 	for (int i = 0; i < count; i++) {
6204 	 	m = sc->rx_mbuf_ptr[chain_prod];
6205 		BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6206 		bce_dump_mbuf(sc, m);
6207 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6208 	}
6209 
6210 
6211 	BCE_PRINTF(sc,
6212 		"----------------------------"
6213 		"----------------"
6214 		"----------------------------\n");
6215 }
6216 
6217 
6218 static void
6219 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6220 {
6221 	if (idx > MAX_TX_BD)
6222 		/* Index out of range. */
6223 		BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6224 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6225 		/* TX Chain page pointer. */
6226 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6227 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6228 	else
6229 		/* Normal tx_bd entry. */
6230 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6231 			"flags = 0x%08X\n", idx,
6232 			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6233 			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6234 }
6235 
6236 
6237 static void
6238 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6239 {
6240 	if (idx > MAX_RX_BD)
6241 		/* Index out of range. */
6242 		BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6243 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6244 		/* TX Chain page pointer. */
6245 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6246 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6247 	else
6248 		/* Normal tx_bd entry. */
6249 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6250 			"flags = 0x%08X\n", idx,
6251 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6252 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
6253 }
6254 
6255 
6256 static void
6257 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6258 {
6259 	BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6260 		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6261 		"tcp_udp_xsum = 0x%04X\n", idx,
6262 		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6263 		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6264 		l2fhdr->l2_fhdr_tcp_udp_xsum);
6265 }
6266 
6267 
6268 /*
6269  * This routine prints the TX chain.
6270  */
6271 static void
6272 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6273 {
6274 	struct tx_bd *txbd;
6275 
6276 	/* First some info about the tx_bd chain structure. */
6277 	BCE_PRINTF(sc,
6278 		"----------------------------"
6279 		"  tx_bd  chain  "
6280 		"----------------------------\n");
6281 
6282 	BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6283 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6284 
6285 	BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6286 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6287 
6288 	BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6289 
6290 	BCE_PRINTF(sc, ""
6291 		"-----------------------------"
6292 		"   tx_bd data   "
6293 		"-----------------------------\n");
6294 
6295 	/* Now print out the tx_bd's themselves. */
6296 	for (int i = 0; i < count; i++) {
6297 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6298 		bce_dump_txbd(sc, tx_prod, txbd);
6299 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6300 	}
6301 
6302 	BCE_PRINTF(sc,
6303 		"-----------------------------"
6304 		"--------------"
6305 		"-----------------------------\n");
6306 }
6307 
6308 
6309 /*
6310  * This routine prints the RX chain.
6311  */
6312 static void
6313 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6314 {
6315 	struct rx_bd *rxbd;
6316 
6317 	/* First some info about the tx_bd chain structure. */
6318 	BCE_PRINTF(sc,
6319 		"----------------------------"
6320 		"  rx_bd  chain  "
6321 		"----------------------------\n");
6322 
6323 	BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6324 
6325 	BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6326 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6327 
6328 	BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6329 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6330 
6331 	BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6332 
6333 	BCE_PRINTF(sc,
6334 		"----------------------------"
6335 		"   rx_bd data   "
6336 		"----------------------------\n");
6337 
6338 	/* Now print out the rx_bd's themselves. */
6339 	for (int i = 0; i < count; i++) {
6340 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6341 		bce_dump_rxbd(sc, rx_prod, rxbd);
6342 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6343 	}
6344 
6345 	BCE_PRINTF(sc,
6346 		"----------------------------"
6347 		"--------------"
6348 		"----------------------------\n");
6349 }
6350 
6351 
6352 /*
6353  * This routine prints the status block.
6354  */
6355 static void
6356 bce_dump_status_block(struct bce_softc *sc)
6357 {
6358 	struct status_block *sblk;
6359 
6360 	sblk = sc->status_block;
6361 
6362    	BCE_PRINTF(sc, "----------------------------- Status Block "
6363 		"-----------------------------\n");
6364 
6365 	BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6366 		sblk->status_attn_bits, sblk->status_attn_bits_ack,
6367 		sblk->status_idx);
6368 
6369 	BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6370 		sblk->status_rx_quick_consumer_index0,
6371 		sblk->status_tx_quick_consumer_index0);
6372 
6373 	BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6374 
6375 	/* Theses indices are not used for normal L2 drivers. */
6376 	if (sblk->status_rx_quick_consumer_index1 ||
6377 		sblk->status_tx_quick_consumer_index1)
6378 		BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6379 			sblk->status_rx_quick_consumer_index1,
6380 			sblk->status_tx_quick_consumer_index1);
6381 
6382 	if (sblk->status_rx_quick_consumer_index2 ||
6383 		sblk->status_tx_quick_consumer_index2)
6384 		BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6385 			sblk->status_rx_quick_consumer_index2,
6386 			sblk->status_tx_quick_consumer_index2);
6387 
6388 	if (sblk->status_rx_quick_consumer_index3 ||
6389 		sblk->status_tx_quick_consumer_index3)
6390 		BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6391 			sblk->status_rx_quick_consumer_index3,
6392 			sblk->status_tx_quick_consumer_index3);
6393 
6394 	if (sblk->status_rx_quick_consumer_index4 ||
6395 		sblk->status_rx_quick_consumer_index5)
6396 		BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6397 			sblk->status_rx_quick_consumer_index4,
6398 			sblk->status_rx_quick_consumer_index5);
6399 
6400 	if (sblk->status_rx_quick_consumer_index6 ||
6401 		sblk->status_rx_quick_consumer_index7)
6402 		BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6403 			sblk->status_rx_quick_consumer_index6,
6404 			sblk->status_rx_quick_consumer_index7);
6405 
6406 	if (sblk->status_rx_quick_consumer_index8 ||
6407 		sblk->status_rx_quick_consumer_index9)
6408 		BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6409 			sblk->status_rx_quick_consumer_index8,
6410 			sblk->status_rx_quick_consumer_index9);
6411 
6412 	if (sblk->status_rx_quick_consumer_index10 ||
6413 		sblk->status_rx_quick_consumer_index11)
6414 		BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6415 			sblk->status_rx_quick_consumer_index10,
6416 			sblk->status_rx_quick_consumer_index11);
6417 
6418 	if (sblk->status_rx_quick_consumer_index12 ||
6419 		sblk->status_rx_quick_consumer_index13)
6420 		BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6421 			sblk->status_rx_quick_consumer_index12,
6422 			sblk->status_rx_quick_consumer_index13);
6423 
6424 	if (sblk->status_rx_quick_consumer_index14 ||
6425 		sblk->status_rx_quick_consumer_index15)
6426 		BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6427 			sblk->status_rx_quick_consumer_index14,
6428 			sblk->status_rx_quick_consumer_index15);
6429 
6430 	if (sblk->status_completion_producer_index ||
6431 		sblk->status_cmd_consumer_index)
6432 		BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6433 			sblk->status_completion_producer_index,
6434 			sblk->status_cmd_consumer_index);
6435 
6436 	BCE_PRINTF(sc, "-------------------------------------------"
6437 		"-----------------------------\n");
6438 }
6439 
6440 
6441 /*
6442  * This routine prints the statistics block.
6443  */
6444 static void
6445 bce_dump_stats_block(struct bce_softc *sc)
6446 {
6447 	struct statistics_block *sblk;
6448 
6449 	sblk = sc->stats_block;
6450 
6451 	BCE_PRINTF(sc, ""
6452 		"-----------------------------"
6453 		" Stats  Block "
6454 		"-----------------------------\n");
6455 
6456 	BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6457 		"IfHcInBadOctets      = 0x%08X:%08X\n",
6458 		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6459 		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6460 
6461 	BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6462 		"IfHcOutBadOctets     = 0x%08X:%08X\n",
6463 		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6464 		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6465 
6466 	BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6467 		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
6468 		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6469 		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6470 
6471 	BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6472 		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
6473 		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6474 		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6475 
6476 	BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6477 		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6478 		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6479 
6480 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6481 		BCE_PRINTF(sc, "0x%08X : "
6482 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6483 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6484 
6485 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6486 		BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6487 			sblk->stat_Dot3StatsCarrierSenseErrors);
6488 
6489 	if (sblk->stat_Dot3StatsFCSErrors)
6490 		BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6491 			sblk->stat_Dot3StatsFCSErrors);
6492 
6493 	if (sblk->stat_Dot3StatsAlignmentErrors)
6494 		BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6495 			sblk->stat_Dot3StatsAlignmentErrors);
6496 
6497 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6498 		BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6499 			sblk->stat_Dot3StatsSingleCollisionFrames);
6500 
6501 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6502 		BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6503 			sblk->stat_Dot3StatsMultipleCollisionFrames);
6504 
6505 	if (sblk->stat_Dot3StatsDeferredTransmissions)
6506 		BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6507 			sblk->stat_Dot3StatsDeferredTransmissions);
6508 
6509 	if (sblk->stat_Dot3StatsExcessiveCollisions)
6510 		BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6511 			sblk->stat_Dot3StatsExcessiveCollisions);
6512 
6513 	if (sblk->stat_Dot3StatsLateCollisions)
6514 		BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6515 			sblk->stat_Dot3StatsLateCollisions);
6516 
6517 	if (sblk->stat_EtherStatsCollisions)
6518 		BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6519 			sblk->stat_EtherStatsCollisions);
6520 
6521 	if (sblk->stat_EtherStatsFragments)
6522 		BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6523 			sblk->stat_EtherStatsFragments);
6524 
6525 	if (sblk->stat_EtherStatsJabbers)
6526 		BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6527 			sblk->stat_EtherStatsJabbers);
6528 
6529 	if (sblk->stat_EtherStatsUndersizePkts)
6530 		BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6531 			sblk->stat_EtherStatsUndersizePkts);
6532 
6533 	if (sblk->stat_EtherStatsOverrsizePkts)
6534 		BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6535 			sblk->stat_EtherStatsOverrsizePkts);
6536 
6537 	if (sblk->stat_EtherStatsPktsRx64Octets)
6538 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6539 			sblk->stat_EtherStatsPktsRx64Octets);
6540 
6541 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6542 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6543 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6544 
6545 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6546 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6547 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6548 
6549 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6550 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6551 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6552 
6553 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6554 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6555 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6556 
6557 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6558 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6559 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6560 
6561 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6562 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6563 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6564 
6565 	if (sblk->stat_EtherStatsPktsTx64Octets)
6566 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6567 			sblk->stat_EtherStatsPktsTx64Octets);
6568 
6569 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6570 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6571 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6572 
6573 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6574 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6575 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6576 
6577 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6578 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6579 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6580 
6581 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6582 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6583 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6584 
6585 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6586 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6587 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6588 
6589 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6590 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6591 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6592 
6593 	if (sblk->stat_XonPauseFramesReceived)
6594 		BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6595 			sblk->stat_XonPauseFramesReceived);
6596 
6597 	if (sblk->stat_XoffPauseFramesReceived)
6598 	   BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6599 			sblk->stat_XoffPauseFramesReceived);
6600 
6601 	if (sblk->stat_OutXonSent)
6602 		BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6603 			sblk->stat_OutXonSent);
6604 
6605 	if (sblk->stat_OutXoffSent)
6606 		BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6607 			sblk->stat_OutXoffSent);
6608 
6609 	if (sblk->stat_FlowControlDone)
6610 		BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6611 			sblk->stat_FlowControlDone);
6612 
6613 	if (sblk->stat_MacControlFramesReceived)
6614 		BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6615 			sblk->stat_MacControlFramesReceived);
6616 
6617 	if (sblk->stat_XoffStateEntered)
6618 		BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6619 			sblk->stat_XoffStateEntered);
6620 
6621 	if (sblk->stat_IfInFramesL2FilterDiscards)
6622 		BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6623 			sblk->stat_IfInFramesL2FilterDiscards);
6624 
6625 	if (sblk->stat_IfInRuleCheckerDiscards)
6626 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6627 			sblk->stat_IfInRuleCheckerDiscards);
6628 
6629 	if (sblk->stat_IfInFTQDiscards)
6630 		BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6631 			sblk->stat_IfInFTQDiscards);
6632 
6633 	if (sblk->stat_IfInMBUFDiscards)
6634 		BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6635 			sblk->stat_IfInMBUFDiscards);
6636 
6637 	if (sblk->stat_IfInRuleCheckerP4Hit)
6638 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6639 			sblk->stat_IfInRuleCheckerP4Hit);
6640 
6641 	if (sblk->stat_CatchupInRuleCheckerDiscards)
6642 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6643 			sblk->stat_CatchupInRuleCheckerDiscards);
6644 
6645 	if (sblk->stat_CatchupInFTQDiscards)
6646 		BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6647 			sblk->stat_CatchupInFTQDiscards);
6648 
6649 	if (sblk->stat_CatchupInMBUFDiscards)
6650 		BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6651 			sblk->stat_CatchupInMBUFDiscards);
6652 
6653 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6654 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6655 			sblk->stat_CatchupInRuleCheckerP4Hit);
6656 
6657 	BCE_PRINTF(sc,
6658 		"-----------------------------"
6659 		"--------------"
6660 		"-----------------------------\n");
6661 }
6662 
6663 
6664 static void
6665 bce_dump_driver_state(struct bce_softc *sc)
6666 {
6667 	u32 val_hi, val_lo;
6668 
6669 	BCE_PRINTF(sc,
6670 		"-----------------------------"
6671 		" Driver State "
6672 		"-----------------------------\n");
6673 
6674 	val_hi = BCE_ADDR_HI(sc);
6675 	val_lo = BCE_ADDR_LO(sc);
6676 	BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6677 		val_hi, val_lo);
6678 
6679 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6680 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6681 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6682 		val_hi, val_lo);
6683 
6684 	val_hi = BCE_ADDR_HI(sc->status_block);
6685 	val_lo = BCE_ADDR_LO(sc->status_block);
6686 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6687 		val_hi, val_lo);
6688 
6689 	val_hi = BCE_ADDR_HI(sc->stats_block);
6690 	val_lo = BCE_ADDR_LO(sc->stats_block);
6691 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6692 		val_hi, val_lo);
6693 
6694 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6695 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6696 	BCE_PRINTF(sc,
6697 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6698 		val_hi, val_lo);
6699 
6700 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6701 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6702 	BCE_PRINTF(sc,
6703 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6704 		val_hi, val_lo);
6705 
6706 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6707 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6708 	BCE_PRINTF(sc,
6709 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6710 		val_hi, val_lo);
6711 
6712 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6713 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6714 	BCE_PRINTF(sc,
6715 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6716 		val_hi, val_lo);
6717 
6718 	BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6719 		sc->interrupts_generated);
6720 
6721 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6722 		sc->rx_interrupts);
6723 
6724 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6725 		sc->tx_interrupts);
6726 
6727 	BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6728 		sc->last_status_idx);
6729 
6730 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6731 		sc->tx_prod);
6732 
6733 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6734 		sc->tx_cons);
6735 
6736 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6737 		sc->tx_prod_bseq);
6738 
6739 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6740 		sc->rx_prod);
6741 
6742 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6743 		sc->rx_cons);
6744 
6745 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6746 		sc->rx_prod_bseq);
6747 
6748 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6749 		sc->rx_mbuf_alloc);
6750 
6751 	BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6752 		sc->free_rx_bd);
6753 
6754 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6755 		sc->rx_low_watermark, (u32) USABLE_RX_BD);
6756 
6757 	BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6758 		sc->tx_mbuf_alloc);
6759 
6760 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6761 		sc->rx_mbuf_alloc);
6762 
6763 	BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6764 		sc->used_tx_bd);
6765 
6766 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6767 		sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6768 
6769 	BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6770 		sc->mbuf_alloc_failed);
6771 
6772 	BCE_PRINTF(sc,
6773 		"-----------------------------"
6774 		"--------------"
6775 		"-----------------------------\n");
6776 }
6777 
6778 
6779 static void
6780 bce_dump_hw_state(struct bce_softc *sc)
6781 {
6782 	u32 val1;
6783 
6784 	BCE_PRINTF(sc,
6785 		"----------------------------"
6786 		" Hardware State "
6787 		"----------------------------\n");
6788 
6789 	BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6790 
6791 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6792 	BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6793 		val1, BCE_MISC_ENABLE_STATUS_BITS);
6794 
6795 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6796 	BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6797 
6798 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6799 	BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6800 
6801 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6802 	BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6803 
6804 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6805 	BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6806 
6807 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6808 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6809 
6810 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6811 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6812 
6813 	val1 = REG_RD(sc, BCE_HC_STATUS);
6814 	BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6815 
6816 	BCE_PRINTF(sc,
6817 		"----------------------------"
6818 		"----------------"
6819 		"----------------------------\n");
6820 
6821 	BCE_PRINTF(sc,
6822 		"----------------------------"
6823 		" Register  Dump "
6824 		"----------------------------\n");
6825 
6826 	for (int i = 0x400; i < 0x8000; i += 0x10)
6827 		BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6828 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6829 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6830 
6831 	BCE_PRINTF(sc,
6832 		"----------------------------"
6833 		"----------------"
6834 		"----------------------------\n");
6835 }
6836 
6837 
6838 static void
6839 bce_breakpoint(struct bce_softc *sc)
6840 {
6841 
6842 	/* Unreachable code to shut the compiler up about unused functions. */
6843 	if (0) {
6844    		bce_dump_txbd(sc, 0, NULL);
6845 		bce_dump_rxbd(sc, 0, NULL);
6846 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6847 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6848 		bce_dump_l2fhdr(sc, 0, NULL);
6849 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6850 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6851 		bce_dump_status_block(sc);
6852 		bce_dump_stats_block(sc);
6853 		bce_dump_driver_state(sc);
6854 		bce_dump_hw_state(sc);
6855 	}
6856 
6857 	bce_dump_driver_state(sc);
6858 	/* Print the important status block fields. */
6859 	bce_dump_status_block(sc);
6860 
6861 	/* Call the debugger. */
6862 	breakpoint();
6863 
6864 	return;
6865 }
6866 #endif
6867