xref: /freebsd/sys/dev/bce/if_bce.c (revision c96ae1968a6ab7056427a739bce81bf07447c2d4)
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  *
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
57 
58 
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63 	u32 bce_debug = BCE_WARN;
64 
65 	/*          0 = Never              */
66 	/*          1 = 1 in 2,147,483,648 */
67 	/*        256 = 1 in     8,388,608 */
68 	/*       2048 = 1 in     1,048,576 */
69 	/*      65536 = 1 in        32,768 */
70 	/*    1048576 = 1 in         2,048 */
71 	/*  268435456 =	1 in             8 */
72 	/*  536870912 = 1 in             4 */
73 	/* 1073741824 = 1 in             2 */
74 
75 	/* Controls how often the l2_fhdr frame error check will fail. */
76 	int bce_debug_l2fhdr_status_check = 0;
77 
78 	/* Controls how often the unexpected attention check will fail. */
79 	int bce_debug_unexpected_attention = 0;
80 
81 	/* Controls how often to simulate an mbuf allocation failure. */
82 	int bce_debug_mbuf_allocation_failure = 0;
83 
84 	/* Controls how often to simulate a DMA mapping failure. */
85 	int bce_debug_dma_map_addr_failure = 0;
86 
87 	/* Controls how often to simulate a bootcode failure. */
88 	int bce_debug_bootcode_running_failure = 0;
89 #endif
90 
91 
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX		64
98 
99 static struct bce_type bce_devs[] = {
100 	/* BCM5706C Controllers and OEM boards. */
101 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102 		"HP NC370T Multifunction Gigabit Server Adapter" },
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104 		"HP NC370i Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
107 
108 	/* BCM5706S controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110 		"HP NC370F Multifunction Gigabit Server Adapter" },
111 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 
114 	/* BCM5708C controllers and OEM boards. */
115 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
117 
118 	/* BCM5708S controllers and OEM boards. */
119 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
120 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
121 	{ 0, 0, 0, 0, NULL }
122 };
123 
124 
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130 	/* Slow EEPROM */
131 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 	 "EEPROM - slow"},
135 	/* Expansion entry 0001 */
136 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 	 "Entry 0001"},
140 	/* Saifun SA25F010 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145 	 "Non-buffered flash (128kB)"},
146 	/* Saifun SA25F020 (non-buffered flash) */
147 	/* strap, cfg1, & write1 need updates */
148 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151 	 "Non-buffered flash (256kB)"},
152 	/* Expansion entry 0100 */
153 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0100"},
157 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
163 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167 	/* Saifun SA25F005 (non-buffered flash) */
168 	/* strap, cfg1, & write1 need updates */
169 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172 	 "Non-buffered flash (64kB)"},
173 	/* Fast EEPROM */
174 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - fast"},
178 	/* Expansion entry 1001 */
179 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 1001"},
183 	/* Expansion entry 1010 */
184 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 	 "Entry 1010"},
188 	/* ATMEL AT45DB011B (buffered flash) */
189 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192 	 "Buffered flash (128kB)"},
193 	/* Expansion entry 1100 */
194 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 	 "Entry 1100"},
198 	/* Expansion entry 1101 */
199 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 	 "Entry 1101"},
203 	/* Ateml Expansion entry 1110 */
204 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207 	 "Entry 1110 (Atmel)"},
208 	/* ATMEL AT45DB021B (buffered flash) */
209 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212 	 "Buffered flash (256kB)"},
213 };
214 
215 
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe				(device_t);
220 static int  bce_attach				(device_t);
221 static int  bce_detach				(device_t);
222 static void bce_shutdown			(device_t);
223 
224 
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, int, int);
232 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain		(struct bce_softc *, int, int);
236 static void bce_dump_rx_chain		(struct bce_softc *, int, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_breakpoint			(struct bce_softc *);
242 #endif
243 
244 
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
249 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
250 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg		(device_t, int, int);
252 static int  bce_miibus_write_reg	(device_t, int, int, int);
253 static void bce_miibus_statchg		(device_t);
254 
255 
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock	(struct bce_softc *);
260 static int  bce_release_nvram_lock	(struct bce_softc *);
261 static void bce_enable_nvram_access	(struct bce_softc *);
262 static void	bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram			(struct bce_softc *);
265 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test			(struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write	(struct bce_softc *);
269 static void bce_disable_nvram_write	(struct bce_softc *);
270 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
271 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
273 #endif
274 
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
279 static int  bce_dma_alloc			(device_t);
280 static void bce_dma_free			(struct bce_softc *);
281 static void bce_release_resources	(struct bce_softc *);
282 
283 /****************************************************************************/
284 /* BCE Firmware Synchronization and Load                                    */
285 /****************************************************************************/
286 static int  bce_fw_sync				(struct bce_softc *, u32);
287 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
288 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
289 static void bce_init_cpus			(struct bce_softc *);
290 
291 static void bce_stop				(struct bce_softc *);
292 static int  bce_reset				(struct bce_softc *, u32);
293 static int  bce_chipinit 			(struct bce_softc *);
294 static int  bce_blockinit 			(struct bce_softc *);
295 static int  bce_get_buf				(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
296 
297 static int  bce_init_tx_chain		(struct bce_softc *);
298 static int  bce_init_rx_chain		(struct bce_softc *);
299 static void bce_free_rx_chain		(struct bce_softc *);
300 static void bce_free_tx_chain		(struct bce_softc *);
301 
302 static int  bce_tx_encap		(struct bce_softc *, struct mbuf **);
303 static void bce_start_locked		(struct ifnet *);
304 static void bce_start				(struct ifnet *);
305 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
306 static void bce_watchdog			(struct bce_softc *);
307 static int  bce_ifmedia_upd			(struct ifnet *);
308 static void bce_ifmedia_upd_locked		(struct ifnet *);
309 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked			(struct bce_softc *);
311 static void bce_init				(void *);
312 static void bce_mgmt_init_locked(struct bce_softc *sc);
313 
314 static void bce_init_context		(struct bce_softc *);
315 static void bce_get_mac_addr		(struct bce_softc *);
316 static void bce_set_mac_addr		(struct bce_softc *);
317 static void bce_phy_intr			(struct bce_softc *);
318 static void bce_rx_intr				(struct bce_softc *);
319 static void bce_tx_intr				(struct bce_softc *);
320 static void bce_disable_intr		(struct bce_softc *);
321 static void bce_enable_intr			(struct bce_softc *);
322 
323 #ifdef DEVICE_POLLING
324 static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
325 static void bce_poll				(struct ifnet *, enum poll_cmd, int);
326 #endif
327 static void bce_intr				(void *);
328 static void bce_set_rx_mode			(struct bce_softc *);
329 static void bce_stats_update		(struct bce_softc *);
330 static void bce_tick				(void *);
331 static void bce_add_sysctls			(struct bce_softc *);
332 
333 
334 /****************************************************************************/
335 /* FreeBSD device dispatch table.                                           */
336 /****************************************************************************/
337 static device_method_t bce_methods[] = {
338 	/* Device interface */
339 	DEVMETHOD(device_probe,		bce_probe),
340 	DEVMETHOD(device_attach,	bce_attach),
341 	DEVMETHOD(device_detach,	bce_detach),
342 	DEVMETHOD(device_shutdown,	bce_shutdown),
343 
344 	/* bus interface */
345 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
346 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
347 
348 	/* MII interface */
349 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
350 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
351 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
352 
353 	{ 0, 0 }
354 };
355 
356 static driver_t bce_driver = {
357 	"bce",
358 	bce_methods,
359 	sizeof(struct bce_softc)
360 };
361 
362 static devclass_t bce_devclass;
363 
364 MODULE_DEPEND(bce, pci, 1, 1, 1);
365 MODULE_DEPEND(bce, ether, 1, 1, 1);
366 MODULE_DEPEND(bce, miibus, 1, 1, 1);
367 
368 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370 
371 
372 /****************************************************************************/
373 /* Device probe function.                                                   */
374 /*                                                                          */
375 /* Compares the device to the driver's list of supported devices and        */
376 /* reports back to the OS whether this is the right driver for the device.  */
377 /*                                                                          */
378 /* Returns:                                                                 */
379 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380 /****************************************************************************/
381 static int
382 bce_probe(device_t dev)
383 {
384 	struct bce_type *t;
385 	struct bce_softc *sc;
386 	char *descbuf;
387 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
388 
389 	t = bce_devs;
390 
391 	sc = device_get_softc(dev);
392 	bzero(sc, sizeof(struct bce_softc));
393 	sc->bce_unit = device_get_unit(dev);
394 	sc->bce_dev = dev;
395 
396 	/* Get the data for the device to be probed. */
397 	vid  = pci_get_vendor(dev);
398 	did  = pci_get_device(dev);
399 	svid = pci_get_subvendor(dev);
400 	sdid = pci_get_subdevice(dev);
401 
402 	DBPRINT(sc, BCE_VERBOSE_LOAD,
403 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405 
406 	/* Look through the list of known devices for a match. */
407 	while(t->bce_name != NULL) {
408 
409 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
410 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412 
413 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414 
415 			if (descbuf == NULL)
416 				return(ENOMEM);
417 
418 			/* Print out the device identity. */
419 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
420 				t->bce_name,
421 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423 			    bce_driver_version);
424 
425 			device_set_desc_copy(dev, descbuf);
426 			free(descbuf, M_TEMP);
427 			return(BUS_PROBE_DEFAULT);
428 		}
429 		t++;
430 	}
431 
432 	DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
433 		__FILE__, __LINE__);
434 
435 	return(ENXIO);
436 }
437 
438 
439 /****************************************************************************/
440 /* Device attach function.                                                  */
441 /*                                                                          */
442 /* Allocates device resources, performs secondary chip identification,      */
443 /* resets and initializes the hardware, and initializes driver instance     */
444 /* variables.                                                               */
445 /*                                                                          */
446 /* Returns:                                                                 */
447 /*   0 on success, positive value on failure.                               */
448 /****************************************************************************/
449 static int
450 bce_attach(device_t dev)
451 {
452 	struct bce_softc *sc;
453 	struct ifnet *ifp;
454 	u32 val;
455 	int count, mbuf, rid, rc = 0;
456 
457 	sc = device_get_softc(dev);
458 	sc->bce_dev = dev;
459 
460 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461 
462 	mbuf = device_get_unit(dev);
463 	sc->bce_unit = mbuf;
464 
465 	pci_enable_busmaster(dev);
466 
467 	/* Allocate PCI memory resources. */
468 	rid = PCIR_BAR(0);
469 	sc->bce_res = bus_alloc_resource_any(
470 		dev,				/* dev */
471 		SYS_RES_MEMORY,			/* type */
472 		&rid,				/* rid */
473 		RF_ACTIVE | PCI_RF_DENSE);	/* flags */
474 
475 	if (sc->bce_res == NULL) {
476 		BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
477 			__FILE__, __LINE__);
478 		rc = ENXIO;
479 		goto bce_attach_fail;
480 	}
481 
482 	/* Get various resource handles. */
483 	sc->bce_btag    = rman_get_bustag(sc->bce_res);
484 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486 
487 	/* Allocate PCI IRQ resources. */
488 	count = pci_msi_count(dev);
489 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
490 		rid = 1;
491 		sc->bce_flags |= BCE_USING_MSI_FLAG;
492 	} else
493 		rid = 0;
494 	sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
495 	    RF_SHAREABLE | RF_ACTIVE);
496 
497 	if (sc->bce_irq == NULL) {
498 		BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
499 			__FILE__, __LINE__);
500 		rc = ENXIO;
501 		goto bce_attach_fail;
502 	}
503 
504 	/* Initialize mutex for the current device instance. */
505 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
506 
507 	/*
508 	 * Configure byte swap and enable indirect register access.
509 	 * Rely on CPU to do target byte swapping on big endian systems.
510 	 * Access to registers outside of PCI configurtion space are not
511 	 * valid until this is done.
512 	 */
513 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
514 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
515 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
516 
517 	/* Save ASIC revsion info. */
518 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
519 
520 	/* Weed out any non-production controller revisions. */
521 	switch(BCE_CHIP_ID(sc)) {
522 		case BCE_CHIP_ID_5706_A0:
523 		case BCE_CHIP_ID_5706_A1:
524 		case BCE_CHIP_ID_5708_A0:
525 		case BCE_CHIP_ID_5708_B0:
526 			BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
527 				__FILE__, __LINE__,
528 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
529 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
530 			rc = ENODEV;
531 			goto bce_attach_fail;
532 	}
533 
534 	/*
535 	 * The embedded PCIe to PCI-X bridge (EPB)
536 	 * in the 5708 cannot address memory above
537 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
538 	 */
539 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
540 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
541 	else
542 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
543 
544 	/*
545 	 * Find the base address for shared memory access.
546 	 * Newer versions of bootcode use a signature and offset
547 	 * while older versions use a fixed address.
548 	 */
549 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
550 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
551 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
552 	else
553 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
554 
555 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
556 
557 	/* Set initial device and PHY flags */
558 	sc->bce_flags = 0;
559 	sc->bce_phy_flags = 0;
560 
561 	/* Get PCI bus information (speed and type). */
562 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
563 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
564 		u32 clkreg;
565 
566 		sc->bce_flags |= BCE_PCIX_FLAG;
567 
568 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
569 
570 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
571 		switch (clkreg) {
572 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
573 			sc->bus_speed_mhz = 133;
574 			break;
575 
576 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
577 			sc->bus_speed_mhz = 100;
578 			break;
579 
580 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
581 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
582 			sc->bus_speed_mhz = 66;
583 			break;
584 
585 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
586 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
587 			sc->bus_speed_mhz = 50;
588 			break;
589 
590 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
591 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
592 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
593 			sc->bus_speed_mhz = 33;
594 			break;
595 		}
596 	} else {
597 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
598 			sc->bus_speed_mhz = 66;
599 		else
600 			sc->bus_speed_mhz = 33;
601 	}
602 
603 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
604 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
605 
606 	BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
607 		sc->bce_chipid,
608 		((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
609 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
610 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
611 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
612 		sc->bus_speed_mhz);
613 
614 	/* Reset the controller. */
615 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
616 		rc = ENXIO;
617 		goto bce_attach_fail;
618 	}
619 
620 	/* Initialize the controller. */
621 	if (bce_chipinit(sc)) {
622 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
623 			__FILE__, __LINE__);
624 		rc = ENXIO;
625 		goto bce_attach_fail;
626 	}
627 
628 	/* Perform NVRAM test. */
629 	if (bce_nvram_test(sc)) {
630 		BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
631 			__FILE__, __LINE__);
632 		rc = ENXIO;
633 		goto bce_attach_fail;
634 	}
635 
636 	/* Fetch the permanent Ethernet MAC address. */
637 	bce_get_mac_addr(sc);
638 
639 	/*
640 	 * Trip points control how many BDs
641 	 * should be ready before generating an
642 	 * interrupt while ticks control how long
643 	 * a BD can sit in the chain before
644 	 * generating an interrupt.  Set the default
645 	 * values for the RX and TX rings.
646 	 */
647 
648 #ifdef BCE_DRBUG
649 	/* Force more frequent interrupts. */
650 	sc->bce_tx_quick_cons_trip_int = 1;
651 	sc->bce_tx_quick_cons_trip     = 1;
652 	sc->bce_tx_ticks_int           = 0;
653 	sc->bce_tx_ticks               = 0;
654 
655 	sc->bce_rx_quick_cons_trip_int = 1;
656 	sc->bce_rx_quick_cons_trip     = 1;
657 	sc->bce_rx_ticks_int           = 0;
658 	sc->bce_rx_ticks               = 0;
659 #else
660 	sc->bce_tx_quick_cons_trip_int = 20;
661 	sc->bce_tx_quick_cons_trip     = 20;
662 	sc->bce_tx_ticks_int           = 80;
663 	sc->bce_tx_ticks               = 80;
664 
665 	sc->bce_rx_quick_cons_trip_int = 6;
666 	sc->bce_rx_quick_cons_trip     = 6;
667 	sc->bce_rx_ticks_int           = 18;
668 	sc->bce_rx_ticks               = 18;
669 #endif
670 
671 	/* Update statistics once every second. */
672 	sc->bce_stats_ticks = 1000000 & 0xffff00;
673 
674 	/*
675 	 * The copper based NetXtreme II controllers
676 	 * use an integrated PHY at address 1 while
677 	 * the SerDes controllers use a PHY at
678 	 * address 2.
679 	 */
680 	sc->bce_phy_addr = 1;
681 
682 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
683 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
684 		sc->bce_flags |= BCE_NO_WOL_FLAG;
685 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
686 			sc->bce_phy_addr = 2;
687 			val = REG_RD_IND(sc, sc->bce_shmem_base +
688 					 BCE_SHARED_HW_CFG_CONFIG);
689 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
690 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
691 		}
692 	}
693 
694 	/* Allocate DMA memory resources. */
695 	if (bce_dma_alloc(dev)) {
696 		BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
697 		    __FILE__, __LINE__);
698 		rc = ENXIO;
699 		goto bce_attach_fail;
700 	}
701 
702 	/* Allocate an ifnet structure. */
703 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
704 	if (ifp == NULL) {
705 		BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
706 			__FILE__, __LINE__);
707 		rc = ENXIO;
708 		goto bce_attach_fail;
709 	}
710 
711 	/* Initialize the ifnet interface. */
712 	ifp->if_softc        = sc;
713 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
714 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
715 	ifp->if_ioctl        = bce_ioctl;
716 	ifp->if_start        = bce_start;
717 	ifp->if_init         = bce_init;
718 	ifp->if_mtu          = ETHERMTU;
719 	ifp->if_hwassist     = BCE_IF_HWASSIST;
720 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
721 	ifp->if_capenable    = ifp->if_capabilities;
722 
723 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
724 	sc->mbuf_alloc_size  = MCLBYTES;
725 #ifdef DEVICE_POLLING
726 	ifp->if_capabilities |= IFCAP_POLLING;
727 #endif
728 
729 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
730 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
731 		ifp->if_baudrate = IF_Gbps(2.5);
732 	else
733 		ifp->if_baudrate = IF_Gbps(1);
734 
735 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
736 	IFQ_SET_READY(&ifp->if_snd);
737 
738 	/* Look for our PHY. */
739 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
740 		bce_ifmedia_sts)) {
741 		BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
742 			__FILE__, __LINE__);
743 		rc = ENXIO;
744 		goto bce_attach_fail;
745 	}
746 
747 	/* Attach to the Ethernet interface list. */
748 	ether_ifattach(ifp, sc->eaddr);
749 
750 #if __FreeBSD_version < 500000
751 	callout_init(&sc->bce_stat_ch);
752 #else
753 	callout_init_mtx(&sc->bce_stat_ch, &sc->bce_mtx, 0);
754 #endif
755 
756 	/* Hookup IRQ last. */
757 	rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
758 	   bce_intr, sc, &sc->bce_intrhand);
759 
760 	if (rc) {
761 		BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
762 			__FILE__, __LINE__);
763 		bce_detach(dev);
764 		goto bce_attach_exit;
765 	}
766 
767 	/* Print some important debugging info. */
768 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
769 
770 	/* Add the supported sysctls to the kernel. */
771 	bce_add_sysctls(sc);
772 
773 	/* Get the firmware running so IPMI still works */
774 	BCE_LOCK(sc);
775 	bce_mgmt_init_locked(sc);
776 	BCE_UNLOCK(sc);
777 
778 	goto bce_attach_exit;
779 
780 bce_attach_fail:
781 	bce_release_resources(sc);
782 
783 bce_attach_exit:
784 
785 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
786 
787 	return(rc);
788 }
789 
790 
791 /****************************************************************************/
792 /* Device detach function.                                                  */
793 /*                                                                          */
794 /* Stops the controller, resets the controller, and releases resources.     */
795 /*                                                                          */
796 /* Returns:                                                                 */
797 /*   0 on success, positive value on failure.                               */
798 /****************************************************************************/
799 static int
800 bce_detach(device_t dev)
801 {
802 	struct bce_softc *sc;
803 	struct ifnet *ifp;
804 
805 	sc = device_get_softc(dev);
806 
807 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
808 
809 	ifp = sc->bce_ifp;
810 
811 #ifdef DEVICE_POLLING
812 	if (ifp->if_capenable & IFCAP_POLLING)
813 		ether_poll_deregister(ifp);
814 #endif
815 
816 	/* Stop and reset the controller. */
817 	BCE_LOCK(sc);
818 	bce_stop(sc);
819 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
820 	BCE_UNLOCK(sc);
821 
822 	ether_ifdetach(ifp);
823 
824 	/* If we have a child device on the MII bus remove it too. */
825 	bus_generic_detach(dev);
826 	device_delete_child(dev, sc->bce_miibus);
827 
828 	/* Release all remaining resources. */
829 	bce_release_resources(sc);
830 
831 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
832 
833 	return(0);
834 }
835 
836 
837 /****************************************************************************/
838 /* Device shutdown function.                                                */
839 /*                                                                          */
840 /* Stops and resets the controller.                                         */
841 /*                                                                          */
842 /* Returns:                                                                 */
843 /*   Nothing                                                                */
844 /****************************************************************************/
845 static void
846 bce_shutdown(device_t dev)
847 {
848 	struct bce_softc *sc = device_get_softc(dev);
849 
850 	BCE_LOCK(sc);
851 	bce_stop(sc);
852 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
853 	BCE_UNLOCK(sc);
854 }
855 
856 
857 /****************************************************************************/
858 /* Indirect register read.                                                  */
859 /*                                                                          */
860 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
861 /* configuration space.  Using this mechanism avoids issues with posted     */
862 /* reads but is much slower than memory-mapped I/O.                         */
863 /*                                                                          */
864 /* Returns:                                                                 */
865 /*   The value of the register.                                             */
866 /****************************************************************************/
867 static u32
868 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
869 {
870 	device_t dev;
871 	dev = sc->bce_dev;
872 
873 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
874 #ifdef BCE_DEBUG
875 	{
876 		u32 val;
877 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
878 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
879 			__FUNCTION__, offset, val);
880 		return val;
881 	}
882 #else
883 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
884 #endif
885 }
886 
887 
888 /****************************************************************************/
889 /* Indirect register write.                                                 */
890 /*                                                                          */
891 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
892 /* configuration space.  Using this mechanism avoids issues with posted     */
893 /* writes but is muchh slower than memory-mapped I/O.                       */
894 /*                                                                          */
895 /* Returns:                                                                 */
896 /*   Nothing.                                                               */
897 /****************************************************************************/
898 static void
899 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
900 {
901 	device_t dev;
902 	dev = sc->bce_dev;
903 
904 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
905 		__FUNCTION__, offset, val);
906 
907 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
908 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
909 }
910 
911 
912 /****************************************************************************/
913 /* Context memory write.                                                    */
914 /*                                                                          */
915 /* The NetXtreme II controller uses context memory to track connection      */
916 /* information for L2 and higher network protocols.                         */
917 /*                                                                          */
918 /* Returns:                                                                 */
919 /*   Nothing.                                                               */
920 /****************************************************************************/
921 static void
922 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
923 {
924 
925 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
926 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
927 
928 	offset += cid_addr;
929 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
930 	REG_WR(sc, BCE_CTX_DATA, val);
931 }
932 
933 
934 /****************************************************************************/
935 /* PHY register read.                                                       */
936 /*                                                                          */
937 /* Implements register reads on the MII bus.                                */
938 /*                                                                          */
939 /* Returns:                                                                 */
940 /*   The value of the register.                                             */
941 /****************************************************************************/
942 static int
943 bce_miibus_read_reg(device_t dev, int phy, int reg)
944 {
945 	struct bce_softc *sc;
946 	u32 val;
947 	int i;
948 
949 	sc = device_get_softc(dev);
950 
951 	/* Make sure we are accessing the correct PHY address. */
952 	if (phy != sc->bce_phy_addr) {
953 		DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
954 		return(0);
955 	}
956 
957 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
958 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
959 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
960 
961 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
962 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
963 
964 		DELAY(40);
965 	}
966 
967 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
968 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
969 		BCE_EMAC_MDIO_COMM_START_BUSY;
970 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
971 
972 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
973 		DELAY(10);
974 
975 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
976 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
977 			DELAY(5);
978 
979 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
980 			val &= BCE_EMAC_MDIO_COMM_DATA;
981 
982 			break;
983 		}
984 	}
985 
986 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
987 		BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
988 			__FILE__, __LINE__, phy, reg);
989 		val = 0x0;
990 	} else {
991 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
992 	}
993 
994 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
995 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
996 
997 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
998 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
999 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1000 
1001 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1002 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1003 
1004 		DELAY(40);
1005 	}
1006 
1007 	return (val & 0xffff);
1008 
1009 }
1010 
1011 
1012 /****************************************************************************/
1013 /* PHY register write.                                                      */
1014 /*                                                                          */
1015 /* Implements register writes on the MII bus.                               */
1016 /*                                                                          */
1017 /* Returns:                                                                 */
1018 /*   The value of the register.                                             */
1019 /****************************************************************************/
1020 static int
1021 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1022 {
1023 	struct bce_softc *sc;
1024 	u32 val1;
1025 	int i;
1026 
1027 	sc = device_get_softc(dev);
1028 
1029 	/* Make sure we are accessing the correct PHY address. */
1030 	if (phy != sc->bce_phy_addr) {
1031 		DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1032 		return(0);
1033 	}
1034 
1035 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1036 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1037 
1038 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1039 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1040 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1041 
1042 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1043 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1044 
1045 		DELAY(40);
1046 	}
1047 
1048 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1049 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1050 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1051 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1052 
1053 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1054 		DELAY(10);
1055 
1056 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1057 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1058 			DELAY(5);
1059 			break;
1060 		}
1061 	}
1062 
1063 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1064 		BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1065 			__FILE__, __LINE__);
1066 
1067 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1068 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1069 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1070 
1071 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1072 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1073 
1074 		DELAY(40);
1075 	}
1076 
1077 	return 0;
1078 }
1079 
1080 
1081 /****************************************************************************/
1082 /* MII bus status change.                                                   */
1083 /*                                                                          */
1084 /* Called by the MII bus driver when the PHY establishes link to set the    */
1085 /* MAC interface registers.                                                 */
1086 /*                                                                          */
1087 /* Returns:                                                                 */
1088 /*   Nothing.                                                               */
1089 /****************************************************************************/
1090 static void
1091 bce_miibus_statchg(device_t dev)
1092 {
1093 	struct bce_softc *sc;
1094 	struct mii_data *mii;
1095 
1096 	sc = device_get_softc(dev);
1097 
1098 	mii = device_get_softc(sc->bce_miibus);
1099 
1100 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1101 
1102 	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1103 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1104 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1105 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1106 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1107 	} else {
1108 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1109 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1110 	}
1111 
1112 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1113 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1114 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1115 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1116 	} else {
1117 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1118 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1119 	}
1120 }
1121 
1122 
1123 /****************************************************************************/
1124 /* Acquire NVRAM lock.                                                      */
1125 /*                                                                          */
1126 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1127 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1128 /* for use by the driver.                                                   */
1129 /*                                                                          */
1130 /* Returns:                                                                 */
1131 /*   0 on success, positive value on failure.                               */
1132 /****************************************************************************/
1133 static int
1134 bce_acquire_nvram_lock(struct bce_softc *sc)
1135 {
1136 	u32 val;
1137 	int j;
1138 
1139 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1140 
1141 	/* Request access to the flash interface. */
1142 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1143 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1144 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1145 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1146 			break;
1147 
1148 		DELAY(5);
1149 	}
1150 
1151 	if (j >= NVRAM_TIMEOUT_COUNT) {
1152 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1153 		return EBUSY;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 
1160 /****************************************************************************/
1161 /* Release NVRAM lock.                                                      */
1162 /*                                                                          */
1163 /* When the caller is finished accessing NVRAM the lock must be released.   */
1164 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1165 /* for use by the driver.                                                   */
1166 /*                                                                          */
1167 /* Returns:                                                                 */
1168 /*   0 on success, positive value on failure.                               */
1169 /****************************************************************************/
1170 static int
1171 bce_release_nvram_lock(struct bce_softc *sc)
1172 {
1173 	int j;
1174 	u32 val;
1175 
1176 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1177 
1178 	/*
1179 	 * Relinquish nvram interface.
1180 	 */
1181 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1182 
1183 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1184 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1185 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1186 			break;
1187 
1188 		DELAY(5);
1189 	}
1190 
1191 	if (j >= NVRAM_TIMEOUT_COUNT) {
1192 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1193 		return EBUSY;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 
1200 #ifdef BCE_NVRAM_WRITE_SUPPORT
1201 /****************************************************************************/
1202 /* Enable NVRAM write access.                                               */
1203 /*                                                                          */
1204 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1205 /*                                                                          */
1206 /* Returns:                                                                 */
1207 /*   0 on success, positive value on failure.                               */
1208 /****************************************************************************/
1209 static int
1210 bce_enable_nvram_write(struct bce_softc *sc)
1211 {
1212 	u32 val;
1213 
1214 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1215 
1216 	val = REG_RD(sc, BCE_MISC_CFG);
1217 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1218 
1219 	if (!sc->bce_flash_info->buffered) {
1220 		int j;
1221 
1222 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1223 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1224 
1225 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1226 			DELAY(5);
1227 
1228 			val = REG_RD(sc, BCE_NVM_COMMAND);
1229 			if (val & BCE_NVM_COMMAND_DONE)
1230 				break;
1231 		}
1232 
1233 		if (j >= NVRAM_TIMEOUT_COUNT) {
1234 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1235 			return EBUSY;
1236 		}
1237 	}
1238 	return 0;
1239 }
1240 
1241 
1242 /****************************************************************************/
1243 /* Disable NVRAM write access.                                              */
1244 /*                                                                          */
1245 /* When the caller is finished writing to NVRAM write access must be        */
1246 /* disabled.                                                                */
1247 /*                                                                          */
1248 /* Returns:                                                                 */
1249 /*   Nothing.                                                               */
1250 /****************************************************************************/
1251 static void
1252 bce_disable_nvram_write(struct bce_softc *sc)
1253 {
1254 	u32 val;
1255 
1256 	DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1257 
1258 	val = REG_RD(sc, BCE_MISC_CFG);
1259 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1260 }
1261 #endif
1262 
1263 
1264 /****************************************************************************/
1265 /* Enable NVRAM access.                                                     */
1266 /*                                                                          */
1267 /* Before accessing NVRAM for read or write operations the caller must      */
1268 /* enabled NVRAM access.                                                    */
1269 /*                                                                          */
1270 /* Returns:                                                                 */
1271 /*   Nothing.                                                               */
1272 /****************************************************************************/
1273 static void
1274 bce_enable_nvram_access(struct bce_softc *sc)
1275 {
1276 	u32 val;
1277 
1278 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1279 
1280 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1281 	/* Enable both bits, even on read. */
1282 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1283 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1284 }
1285 
1286 
1287 /****************************************************************************/
1288 /* Disable NVRAM access.                                                    */
1289 /*                                                                          */
1290 /* When the caller is finished accessing NVRAM access must be disabled.     */
1291 /*                                                                          */
1292 /* Returns:                                                                 */
1293 /*   Nothing.                                                               */
1294 /****************************************************************************/
1295 static void
1296 bce_disable_nvram_access(struct bce_softc *sc)
1297 {
1298 	u32 val;
1299 
1300 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1301 
1302 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1303 
1304 	/* Disable both bits, even after read. */
1305 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1306 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1307 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1308 }
1309 
1310 
1311 #ifdef BCE_NVRAM_WRITE_SUPPORT
1312 /****************************************************************************/
1313 /* Erase NVRAM page before writing.                                         */
1314 /*                                                                          */
1315 /* Non-buffered flash parts require that a page be erased before it is      */
1316 /* written.                                                                 */
1317 /*                                                                          */
1318 /* Returns:                                                                 */
1319 /*   0 on success, positive value on failure.                               */
1320 /****************************************************************************/
1321 static int
1322 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1323 {
1324 	u32 cmd;
1325 	int j;
1326 
1327 	/* Buffered flash doesn't require an erase. */
1328 	if (sc->bce_flash_info->buffered)
1329 		return 0;
1330 
1331 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1332 
1333 	/* Build an erase command. */
1334 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1335 	      BCE_NVM_COMMAND_DOIT;
1336 
1337 	/*
1338 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1339 	 * and issue the erase command.
1340 	 */
1341 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1342 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1343 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1344 
1345 	/* Wait for completion. */
1346 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1347 		u32 val;
1348 
1349 		DELAY(5);
1350 
1351 		val = REG_RD(sc, BCE_NVM_COMMAND);
1352 		if (val & BCE_NVM_COMMAND_DONE)
1353 			break;
1354 	}
1355 
1356 	if (j >= NVRAM_TIMEOUT_COUNT) {
1357 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1358 		return EBUSY;
1359 	}
1360 
1361 	return 0;
1362 }
1363 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1364 
1365 
1366 /****************************************************************************/
1367 /* Read a dword (32 bits) from NVRAM.                                       */
1368 /*                                                                          */
1369 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1370 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1371 /*                                                                          */
1372 /* Returns:                                                                 */
1373 /*   0 on success and the 32 bit value read, positive value on failure.     */
1374 /****************************************************************************/
1375 static int
1376 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1377 							u32 cmd_flags)
1378 {
1379 	u32 cmd;
1380 	int i, rc = 0;
1381 
1382 	/* Build the command word. */
1383 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1384 
1385 	/* Calculate the offset for buffered flash. */
1386 	if (sc->bce_flash_info->buffered) {
1387 		offset = ((offset / sc->bce_flash_info->page_size) <<
1388 			   sc->bce_flash_info->page_bits) +
1389 			  (offset % sc->bce_flash_info->page_size);
1390 	}
1391 
1392 	/*
1393 	 * Clear the DONE bit separately, set the address to read,
1394 	 * and issue the read.
1395 	 */
1396 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1397 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1398 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1399 
1400 	/* Wait for completion. */
1401 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1402 		u32 val;
1403 
1404 		DELAY(5);
1405 
1406 		val = REG_RD(sc, BCE_NVM_COMMAND);
1407 		if (val & BCE_NVM_COMMAND_DONE) {
1408 			val = REG_RD(sc, BCE_NVM_READ);
1409 
1410 			val = bce_be32toh(val);
1411 			memcpy(ret_val, &val, 4);
1412 			break;
1413 		}
1414 	}
1415 
1416 	/* Check for errors. */
1417 	if (i >= NVRAM_TIMEOUT_COUNT) {
1418 		BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1419 			__FILE__, __LINE__, offset);
1420 		rc = EBUSY;
1421 	}
1422 
1423 	return(rc);
1424 }
1425 
1426 
1427 #ifdef BCE_NVRAM_WRITE_SUPPORT
1428 /****************************************************************************/
1429 /* Write a dword (32 bits) to NVRAM.                                        */
1430 /*                                                                          */
1431 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1432 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1433 /* enabled NVRAM write access.                                              */
1434 /*                                                                          */
1435 /* Returns:                                                                 */
1436 /*   0 on success, positive value on failure.                               */
1437 /****************************************************************************/
1438 static int
1439 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1440 	u32 cmd_flags)
1441 {
1442 	u32 cmd, val32;
1443 	int j;
1444 
1445 	/* Build the command word. */
1446 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1447 
1448 	/* Calculate the offset for buffered flash. */
1449 	if (sc->bce_flash_info->buffered) {
1450 		offset = ((offset / sc->bce_flash_info->page_size) <<
1451 			  sc->bce_flash_info->page_bits) +
1452 			 (offset % sc->bce_flash_info->page_size);
1453 	}
1454 
1455 	/*
1456 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1457 	 * set the NVRAM address to write, and issue the write command
1458 	 */
1459 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1460 	memcpy(&val32, val, 4);
1461 	val32 = htobe32(val32);
1462 	REG_WR(sc, BCE_NVM_WRITE, val32);
1463 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1464 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1465 
1466 	/* Wait for completion. */
1467 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1468 		DELAY(5);
1469 
1470 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1471 			break;
1472 	}
1473 	if (j >= NVRAM_TIMEOUT_COUNT) {
1474 		BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1475 			__FILE__, __LINE__, offset);
1476 		return EBUSY;
1477 	}
1478 
1479 	return 0;
1480 }
1481 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1482 
1483 
1484 /****************************************************************************/
1485 /* Initialize NVRAM access.                                                 */
1486 /*                                                                          */
1487 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1488 /* access that device.                                                      */
1489 /*                                                                          */
1490 /* Returns:                                                                 */
1491 /*   0 on success, positive value on failure.                               */
1492 /****************************************************************************/
1493 static int
1494 bce_init_nvram(struct bce_softc *sc)
1495 {
1496 	u32 val;
1497 	int j, entry_count, rc;
1498 	struct flash_spec *flash;
1499 
1500 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1501 
1502 	/* Determine the selected interface. */
1503 	val = REG_RD(sc, BCE_NVM_CFG1);
1504 
1505 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1506 
1507 	rc = 0;
1508 
1509 	/*
1510 	 * Flash reconfiguration is required to support additional
1511 	 * NVRAM devices not directly supported in hardware.
1512 	 * Check if the flash interface was reconfigured
1513 	 * by the bootcode.
1514 	 */
1515 
1516 	if (val & 0x40000000) {
1517 		/* Flash interface reconfigured by bootcode. */
1518 
1519 		DBPRINT(sc,BCE_INFO_LOAD,
1520 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1521 
1522 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1523 		     j++, flash++) {
1524 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1525 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1526 				sc->bce_flash_info = flash;
1527 				break;
1528 			}
1529 		}
1530 	} else {
1531 		/* Flash interface not yet reconfigured. */
1532 		u32 mask;
1533 
1534 		DBPRINT(sc,BCE_INFO_LOAD,
1535 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1536 
1537 		if (val & (1 << 23))
1538 			mask = FLASH_BACKUP_STRAP_MASK;
1539 		else
1540 			mask = FLASH_STRAP_MASK;
1541 
1542 		/* Look for the matching NVRAM device configuration data. */
1543 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1544 
1545 			/* Check if the device matches any of the known devices. */
1546 			if ((val & mask) == (flash->strapping & mask)) {
1547 				/* Found a device match. */
1548 				sc->bce_flash_info = flash;
1549 
1550 				/* Request access to the flash interface. */
1551 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1552 					return rc;
1553 
1554 				/* Reconfigure the flash interface. */
1555 				bce_enable_nvram_access(sc);
1556 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1557 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1558 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1559 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1560 				bce_disable_nvram_access(sc);
1561 				bce_release_nvram_lock(sc);
1562 
1563 				break;
1564 			}
1565 		}
1566 	}
1567 
1568 	/* Check if a matching device was found. */
1569 	if (j == entry_count) {
1570 		sc->bce_flash_info = NULL;
1571 		BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1572 			__FILE__, __LINE__);
1573 		rc = ENODEV;
1574 	}
1575 
1576 	/* Write the flash config data to the shared memory interface. */
1577 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1578 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1579 	if (val)
1580 		sc->bce_flash_size = val;
1581 	else
1582 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1583 
1584 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1585 		sc->bce_flash_info->total_size);
1586 
1587 	DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1588 
1589 	return rc;
1590 }
1591 
1592 
1593 /****************************************************************************/
1594 /* Read an arbitrary range of data from NVRAM.                              */
1595 /*                                                                          */
1596 /* Prepares the NVRAM interface for access and reads the requested data     */
1597 /* into the supplied buffer.                                                */
1598 /*                                                                          */
1599 /* Returns:                                                                 */
1600 /*   0 on success and the data read, positive value on failure.             */
1601 /****************************************************************************/
1602 static int
1603 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1604 	int buf_size)
1605 {
1606 	int rc = 0;
1607 	u32 cmd_flags, offset32, len32, extra;
1608 
1609 	if (buf_size == 0)
1610 		return 0;
1611 
1612 	/* Request access to the flash interface. */
1613 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1614 		return rc;
1615 
1616 	/* Enable access to flash interface */
1617 	bce_enable_nvram_access(sc);
1618 
1619 	len32 = buf_size;
1620 	offset32 = offset;
1621 	extra = 0;
1622 
1623 	cmd_flags = 0;
1624 
1625 	if (offset32 & 3) {
1626 		u8 buf[4];
1627 		u32 pre_len;
1628 
1629 		offset32 &= ~3;
1630 		pre_len = 4 - (offset & 3);
1631 
1632 		if (pre_len >= len32) {
1633 			pre_len = len32;
1634 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1635 		}
1636 		else {
1637 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1638 		}
1639 
1640 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1641 
1642 		if (rc)
1643 			return rc;
1644 
1645 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1646 
1647 		offset32 += 4;
1648 		ret_buf += pre_len;
1649 		len32 -= pre_len;
1650 	}
1651 
1652 	if (len32 & 3) {
1653 		extra = 4 - (len32 & 3);
1654 		len32 = (len32 + 4) & ~3;
1655 	}
1656 
1657 	if (len32 == 4) {
1658 		u8 buf[4];
1659 
1660 		if (cmd_flags)
1661 			cmd_flags = BCE_NVM_COMMAND_LAST;
1662 		else
1663 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1664 				    BCE_NVM_COMMAND_LAST;
1665 
1666 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1667 
1668 		memcpy(ret_buf, buf, 4 - extra);
1669 	}
1670 	else if (len32 > 0) {
1671 		u8 buf[4];
1672 
1673 		/* Read the first word. */
1674 		if (cmd_flags)
1675 			cmd_flags = 0;
1676 		else
1677 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1678 
1679 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1680 
1681 		/* Advance to the next dword. */
1682 		offset32 += 4;
1683 		ret_buf += 4;
1684 		len32 -= 4;
1685 
1686 		while (len32 > 4 && rc == 0) {
1687 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1688 
1689 			/* Advance to the next dword. */
1690 			offset32 += 4;
1691 			ret_buf += 4;
1692 			len32 -= 4;
1693 		}
1694 
1695 		if (rc)
1696 			return rc;
1697 
1698 		cmd_flags = BCE_NVM_COMMAND_LAST;
1699 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1700 
1701 		memcpy(ret_buf, buf, 4 - extra);
1702 	}
1703 
1704 	/* Disable access to flash interface and release the lock. */
1705 	bce_disable_nvram_access(sc);
1706 	bce_release_nvram_lock(sc);
1707 
1708 	return rc;
1709 }
1710 
1711 
1712 #ifdef BCE_NVRAM_WRITE_SUPPORT
1713 /****************************************************************************/
1714 /* Write an arbitrary range of data from NVRAM.                             */
1715 /*                                                                          */
1716 /* Prepares the NVRAM interface for write access and writes the requested   */
1717 /* data from the supplied buffer.  The caller is responsible for            */
1718 /* calculating any appropriate CRCs.                                        */
1719 /*                                                                          */
1720 /* Returns:                                                                 */
1721 /*   0 on success, positive value on failure.                               */
1722 /****************************************************************************/
1723 static int
1724 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1725 	int buf_size)
1726 {
1727 	u32 written, offset32, len32;
1728 	u8 *buf, start[4], end[4];
1729 	int rc = 0;
1730 	int align_start, align_end;
1731 
1732 	buf = data_buf;
1733 	offset32 = offset;
1734 	len32 = buf_size;
1735 	align_start = align_end = 0;
1736 
1737 	if ((align_start = (offset32 & 3))) {
1738 		offset32 &= ~3;
1739 		len32 += align_start;
1740 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1741 			return rc;
1742 	}
1743 
1744 	if (len32 & 3) {
1745 	       	if ((len32 > 4) || !align_start) {
1746 			align_end = 4 - (len32 & 3);
1747 			len32 += align_end;
1748 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1749 				end, 4))) {
1750 				return rc;
1751 			}
1752 		}
1753 	}
1754 
1755 	if (align_start || align_end) {
1756 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1757 		if (buf == 0)
1758 			return ENOMEM;
1759 		if (align_start) {
1760 			memcpy(buf, start, 4);
1761 		}
1762 		if (align_end) {
1763 			memcpy(buf + len32 - 4, end, 4);
1764 		}
1765 		memcpy(buf + align_start, data_buf, buf_size);
1766 	}
1767 
1768 	written = 0;
1769 	while ((written < len32) && (rc == 0)) {
1770 		u32 page_start, page_end, data_start, data_end;
1771 		u32 addr, cmd_flags;
1772 		int i;
1773 		u8 flash_buffer[264];
1774 
1775 	    /* Find the page_start addr */
1776 		page_start = offset32 + written;
1777 		page_start -= (page_start % sc->bce_flash_info->page_size);
1778 		/* Find the page_end addr */
1779 		page_end = page_start + sc->bce_flash_info->page_size;
1780 		/* Find the data_start addr */
1781 		data_start = (written == 0) ? offset32 : page_start;
1782 		/* Find the data_end addr */
1783 		data_end = (page_end > offset32 + len32) ?
1784 			(offset32 + len32) : page_end;
1785 
1786 		/* Request access to the flash interface. */
1787 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1788 			goto nvram_write_end;
1789 
1790 		/* Enable access to flash interface */
1791 		bce_enable_nvram_access(sc);
1792 
1793 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1794 		if (sc->bce_flash_info->buffered == 0) {
1795 			int j;
1796 
1797 			/* Read the whole page into the buffer
1798 			 * (non-buffer flash only) */
1799 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1800 				if (j == (sc->bce_flash_info->page_size - 4)) {
1801 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1802 				}
1803 				rc = bce_nvram_read_dword(sc,
1804 					page_start + j,
1805 					&flash_buffer[j],
1806 					cmd_flags);
1807 
1808 				if (rc)
1809 					goto nvram_write_end;
1810 
1811 				cmd_flags = 0;
1812 			}
1813 		}
1814 
1815 		/* Enable writes to flash interface (unlock write-protect) */
1816 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1817 			goto nvram_write_end;
1818 
1819 		/* Erase the page */
1820 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1821 			goto nvram_write_end;
1822 
1823 		/* Re-enable the write again for the actual write */
1824 		bce_enable_nvram_write(sc);
1825 
1826 		/* Loop to write back the buffer data from page_start to
1827 		 * data_start */
1828 		i = 0;
1829 		if (sc->bce_flash_info->buffered == 0) {
1830 			for (addr = page_start; addr < data_start;
1831 				addr += 4, i += 4) {
1832 
1833 				rc = bce_nvram_write_dword(sc, addr,
1834 					&flash_buffer[i], cmd_flags);
1835 
1836 				if (rc != 0)
1837 					goto nvram_write_end;
1838 
1839 				cmd_flags = 0;
1840 			}
1841 		}
1842 
1843 		/* Loop to write the new data from data_start to data_end */
1844 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1845 			if ((addr == page_end - 4) ||
1846 				((sc->bce_flash_info->buffered) &&
1847 				 (addr == data_end - 4))) {
1848 
1849 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1850 			}
1851 			rc = bce_nvram_write_dword(sc, addr, buf,
1852 				cmd_flags);
1853 
1854 			if (rc != 0)
1855 				goto nvram_write_end;
1856 
1857 			cmd_flags = 0;
1858 			buf += 4;
1859 		}
1860 
1861 		/* Loop to write back the buffer data from data_end
1862 		 * to page_end */
1863 		if (sc->bce_flash_info->buffered == 0) {
1864 			for (addr = data_end; addr < page_end;
1865 				addr += 4, i += 4) {
1866 
1867 				if (addr == page_end-4) {
1868 					cmd_flags = BCE_NVM_COMMAND_LAST;
1869                 		}
1870 				rc = bce_nvram_write_dword(sc, addr,
1871 					&flash_buffer[i], cmd_flags);
1872 
1873 				if (rc != 0)
1874 					goto nvram_write_end;
1875 
1876 				cmd_flags = 0;
1877 			}
1878 		}
1879 
1880 		/* Disable writes to flash interface (lock write-protect) */
1881 		bce_disable_nvram_write(sc);
1882 
1883 		/* Disable access to flash interface */
1884 		bce_disable_nvram_access(sc);
1885 		bce_release_nvram_lock(sc);
1886 
1887 		/* Increment written */
1888 		written += data_end - data_start;
1889 	}
1890 
1891 nvram_write_end:
1892 	if (align_start || align_end)
1893 		free(buf, M_DEVBUF);
1894 
1895 	return rc;
1896 }
1897 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1898 
1899 
1900 /****************************************************************************/
1901 /* Verifies that NVRAM is accessible and contains valid data.               */
1902 /*                                                                          */
1903 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1904 /* correct.                                                                 */
1905 /*                                                                          */
1906 /* Returns:                                                                 */
1907 /*   0 on success, positive value on failure.                               */
1908 /****************************************************************************/
1909 static int
1910 bce_nvram_test(struct bce_softc *sc)
1911 {
1912 	u32 buf[BCE_NVRAM_SIZE / 4];
1913 	u8 *data = (u8 *) buf;
1914 	int rc = 0;
1915 	u32 magic, csum;
1916 
1917 
1918 	/*
1919 	 * Check that the device NVRAM is valid by reading
1920 	 * the magic value at offset 0.
1921 	 */
1922 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1923 		goto bce_nvram_test_done;
1924 
1925 
1926     magic = bce_be32toh(buf[0]);
1927 	if (magic != BCE_NVRAM_MAGIC) {
1928 		rc = ENODEV;
1929 		BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1930 			"Found: 0x%08X\n",
1931 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1932 		goto bce_nvram_test_done;
1933 	}
1934 
1935 	/*
1936 	 * Verify that the device NVRAM includes valid
1937 	 * configuration data.
1938 	 */
1939 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1940 		goto bce_nvram_test_done;
1941 
1942 	csum = ether_crc32_le(data, 0x100);
1943 	if (csum != BCE_CRC32_RESIDUAL) {
1944 		rc = ENODEV;
1945 		BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1946 			"Expected: 0x%08X, Found: 0x%08X\n",
1947 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1948 		goto bce_nvram_test_done;
1949 	}
1950 
1951 	csum = ether_crc32_le(data + 0x100, 0x100);
1952 	if (csum != BCE_CRC32_RESIDUAL) {
1953 		BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1954 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1955 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1956 		rc = ENODEV;
1957 	}
1958 
1959 bce_nvram_test_done:
1960 	return rc;
1961 }
1962 
1963 
1964 /****************************************************************************/
1965 /* Free any DMA memory owned by the driver.                                 */
1966 /*                                                                          */
1967 /* Scans through each data structre that requires DMA memory and frees      */
1968 /* the memory if allocated.                                                 */
1969 /*                                                                          */
1970 /* Returns:                                                                 */
1971 /*   Nothing.                                                               */
1972 /****************************************************************************/
1973 static void
1974 bce_dma_free(struct bce_softc *sc)
1975 {
1976 	int i;
1977 
1978 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1979 
1980 	/* Destroy the status block. */
1981 	if (sc->status_block != NULL)
1982 		bus_dmamem_free(
1983 			sc->status_tag,
1984 		    sc->status_block,
1985 		    sc->status_map);
1986 
1987 	if (sc->status_map != NULL) {
1988 		bus_dmamap_unload(
1989 			sc->status_tag,
1990 		    sc->status_map);
1991 		bus_dmamap_destroy(sc->status_tag,
1992 		    sc->status_map);
1993 	}
1994 
1995 	if (sc->status_tag != NULL)
1996 		bus_dma_tag_destroy(sc->status_tag);
1997 
1998 
1999 	/* Destroy the statistics block. */
2000 	if (sc->stats_block != NULL)
2001 		bus_dmamem_free(
2002 			sc->stats_tag,
2003 		    sc->stats_block,
2004 		    sc->stats_map);
2005 
2006 	if (sc->stats_map != NULL) {
2007 		bus_dmamap_unload(
2008 			sc->stats_tag,
2009 		    sc->stats_map);
2010 		bus_dmamap_destroy(sc->stats_tag,
2011 		    sc->stats_map);
2012 	}
2013 
2014 	if (sc->stats_tag != NULL)
2015 		bus_dma_tag_destroy(sc->stats_tag);
2016 
2017 
2018 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2019 	for (i = 0; i < TX_PAGES; i++ ) {
2020 		if (sc->tx_bd_chain[i] != NULL)
2021 			bus_dmamem_free(
2022 				sc->tx_bd_chain_tag,
2023 			    sc->tx_bd_chain[i],
2024 			    sc->tx_bd_chain_map[i]);
2025 
2026 		if (sc->tx_bd_chain_map[i] != NULL) {
2027 			bus_dmamap_unload(
2028 				sc->tx_bd_chain_tag,
2029 		    	sc->tx_bd_chain_map[i]);
2030 			bus_dmamap_destroy(
2031 				sc->tx_bd_chain_tag,
2032 			    sc->tx_bd_chain_map[i]);
2033 		}
2034 
2035 	}
2036 
2037 	/* Destroy the TX buffer descriptor tag. */
2038 	if (sc->tx_bd_chain_tag != NULL)
2039 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2040 
2041 
2042 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2043 	for (i = 0; i < RX_PAGES; i++ ) {
2044 		if (sc->rx_bd_chain[i] != NULL)
2045 			bus_dmamem_free(
2046 				sc->rx_bd_chain_tag,
2047 			    sc->rx_bd_chain[i],
2048 			    sc->rx_bd_chain_map[i]);
2049 
2050 		if (sc->rx_bd_chain_map[i] != NULL) {
2051 			bus_dmamap_unload(
2052 				sc->rx_bd_chain_tag,
2053 		    	sc->rx_bd_chain_map[i]);
2054 			bus_dmamap_destroy(
2055 				sc->rx_bd_chain_tag,
2056 			    sc->rx_bd_chain_map[i]);
2057 		}
2058 	}
2059 
2060 	/* Destroy the RX buffer descriptor tag. */
2061 	if (sc->rx_bd_chain_tag != NULL)
2062 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2063 
2064 
2065 	/* Unload and destroy the TX mbuf maps. */
2066 	for (i = 0; i < TOTAL_TX_BD; i++) {
2067 		if (sc->tx_mbuf_map[i] != NULL) {
2068 			bus_dmamap_unload(sc->tx_mbuf_tag,
2069 				sc->tx_mbuf_map[i]);
2070 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2071 	 			sc->tx_mbuf_map[i]);
2072 		}
2073 	}
2074 
2075 	/* Destroy the TX mbuf tag. */
2076 	if (sc->tx_mbuf_tag != NULL)
2077 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2078 
2079 
2080 	/* Unload and destroy the RX mbuf maps. */
2081 	for (i = 0; i < TOTAL_RX_BD; i++) {
2082 		if (sc->rx_mbuf_map[i] != NULL) {
2083 			bus_dmamap_unload(sc->rx_mbuf_tag,
2084 				sc->rx_mbuf_map[i]);
2085 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2086 	 			sc->rx_mbuf_map[i]);
2087 		}
2088 	}
2089 
2090 	/* Destroy the RX mbuf tag. */
2091 	if (sc->rx_mbuf_tag != NULL)
2092 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2093 
2094 
2095 	/* Destroy the parent tag */
2096 	if (sc->parent_tag != NULL)
2097 		bus_dma_tag_destroy(sc->parent_tag);
2098 
2099 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2100 
2101 }
2102 
2103 
2104 /****************************************************************************/
2105 /* Get DMA memory from the OS.                                              */
2106 /*                                                                          */
2107 /* Validates that the OS has provided DMA buffers in response to a          */
2108 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2109 /* When the callback is used the OS will return 0 for the mapping function  */
2110 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2111 /* failures back to the caller.                                             */
2112 /*                                                                          */
2113 /* Returns:                                                                 */
2114 /*   Nothing.                                                               */
2115 /****************************************************************************/
2116 static void
2117 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2118 {
2119 	bus_addr_t *busaddr = arg;
2120 
2121 	/* Simulate a mapping failure. */
2122 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2123 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2124 			__FILE__, __LINE__);
2125 		error = ENOMEM);
2126 
2127 	/* Check for an error and signal the caller that an error occurred. */
2128 	if (error) {
2129 		printf("bce %s(%d): DMA mapping error! error = %d, "
2130 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2131 		*busaddr = 0;
2132 		return;
2133 	}
2134 
2135 	*busaddr = segs->ds_addr;
2136 	return;
2137 }
2138 
2139 
2140 /****************************************************************************/
2141 /* Allocate any DMA memory needed by the driver.                            */
2142 /*                                                                          */
2143 /* Allocates DMA memory needed for the various global structures needed by  */
2144 /* hardware.                                                                */
2145 /*                                                                          */
2146 /* Returns:                                                                 */
2147 /*   0 for success, positive value for failure.                             */
2148 /****************************************************************************/
2149 static int
2150 bce_dma_alloc(device_t dev)
2151 {
2152 	struct bce_softc *sc;
2153 	int i, error, rc = 0;
2154 	bus_addr_t busaddr;
2155 
2156 	sc = device_get_softc(dev);
2157 
2158 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2159 
2160 	/*
2161 	 * Allocate the parent bus DMA tag appropriate for PCI.
2162 	 */
2163 	if (bus_dma_tag_create(NULL,		/* parent     */
2164 			1,			/* alignment  */
2165 			BCE_DMA_BOUNDARY,	/* boundary   */
2166 			sc->max_bus_addr,	/* lowaddr    */
2167 			BUS_SPACE_MAXADDR,	/* highaddr   */
2168 			NULL, 			/* filterfunc */
2169 			NULL,			/* filterarg  */
2170 			MAXBSIZE, 		/* maxsize    */
2171 			BUS_SPACE_UNRESTRICTED,	/* nsegments  */
2172 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
2173 			0,			/* flags      */
2174 			NULL, 			/* locfunc    */
2175 			NULL,			/* lockarg    */
2176 			&sc->parent_tag)) {
2177 		BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2178 			__FILE__, __LINE__);
2179 		rc = ENOMEM;
2180 		goto bce_dma_alloc_exit;
2181 	}
2182 
2183 	/*
2184 	 * Create a DMA tag for the status block, allocate and clear the
2185 	 * memory, map the memory into DMA space, and fetch the physical
2186 	 * address of the block.
2187 	 */
2188 	if (bus_dma_tag_create(
2189 		sc->parent_tag,			/* parent      */
2190 	    	BCE_DMA_ALIGN,			/* alignment   */
2191 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2192 	    	sc->max_bus_addr,		/* lowaddr     */
2193 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2194 	    	NULL, 				/* filterfunc  */
2195 	    	NULL, 				/* filterarg   */
2196 	    	BCE_STATUS_BLK_SZ, 		/* maxsize     */
2197 	    	1,				/* nsegments   */
2198 	    	BCE_STATUS_BLK_SZ, 		/* maxsegsize  */
2199 	    	0,				/* flags       */
2200 	    	NULL, 				/* lockfunc    */
2201 	    	NULL,				/* lockarg     */
2202 	    	&sc->status_tag)) {
2203 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2204 			__FILE__, __LINE__);
2205 		rc = ENOMEM;
2206 		goto bce_dma_alloc_exit;
2207 	}
2208 
2209 	if(bus_dmamem_alloc(
2210 		sc->status_tag,			/* dmat        */
2211 	    	(void **)&sc->status_block,	/* vaddr       */
2212 	    	BUS_DMA_NOWAIT,			/* flags       */
2213 	    	&sc->status_map)) {
2214 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2215 			__FILE__, __LINE__);
2216 		rc = ENOMEM;
2217 		goto bce_dma_alloc_exit;
2218 	}
2219 
2220 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2221 
2222 	error = bus_dmamap_load(
2223 		sc->status_tag,	   		/* dmat        */
2224 	    	sc->status_map,	   		/* map         */
2225 	    	sc->status_block,	 	/* buf         */
2226 	    	BCE_STATUS_BLK_SZ,	 	/* buflen      */
2227 	    	bce_dma_map_addr, 	 	/* callback    */
2228 	    	&busaddr,		 	/* callbackarg */
2229 	    	BUS_DMA_NOWAIT);		/* flags       */
2230 
2231 	if (error) {
2232 		BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2233 			__FILE__, __LINE__);
2234 		rc = ENOMEM;
2235 		goto bce_dma_alloc_exit;
2236 	}
2237 
2238 	sc->status_block_paddr = busaddr;
2239 	/* DRC - Fix for 64 bit addresses. */
2240 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2241 		(u32) sc->status_block_paddr);
2242 
2243 	/*
2244 	 * Create a DMA tag for the statistics block, allocate and clear the
2245 	 * memory, map the memory into DMA space, and fetch the physical
2246 	 * address of the block.
2247 	 */
2248 	if (bus_dma_tag_create(
2249 		sc->parent_tag,			/* parent      */
2250 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2251 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2252 	    	sc->max_bus_addr,		/* lowaddr     */
2253 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2254 	    	NULL,		   		/* filterfunc  */
2255 	    	NULL, 		  		/* filterarg   */
2256 	    	BCE_STATS_BLK_SZ, 		/* maxsize     */
2257 	    	1,		  		/* nsegments   */
2258 	    	BCE_STATS_BLK_SZ, 		/* maxsegsize  */
2259 	    	0, 		  		/* flags       */
2260 	    	NULL, 		 		/* lockfunc    */
2261 	    	NULL, 		  		/* lockarg     */
2262 	    	&sc->stats_tag)) {
2263 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2264 			__FILE__, __LINE__);
2265 		rc = ENOMEM;
2266 		goto bce_dma_alloc_exit;
2267 	}
2268 
2269 	if (bus_dmamem_alloc(
2270 		sc->stats_tag,			/* dmat        */
2271 	    	(void **)&sc->stats_block,	/* vaddr       */
2272 	    	BUS_DMA_NOWAIT,			/* flags       */
2273 	    	&sc->stats_map)) {
2274 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2275 			__FILE__, __LINE__);
2276 		rc = ENOMEM;
2277 		goto bce_dma_alloc_exit;
2278 	}
2279 
2280 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2281 
2282 	error = bus_dmamap_load(
2283 		sc->stats_tag,	 	/* dmat        */
2284 	    	sc->stats_map,	 	/* map         */
2285 	    	sc->stats_block, 	/* buf         */
2286 	    	BCE_STATS_BLK_SZ,	/* buflen      */
2287 	    	bce_dma_map_addr,	/* callback    */
2288 	    	&busaddr, 	 	/* callbackarg */
2289 	    	BUS_DMA_NOWAIT);	/* flags       */
2290 
2291 	if(error) {
2292 		BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2293 			__FILE__, __LINE__);
2294 		rc = ENOMEM;
2295 		goto bce_dma_alloc_exit;
2296 	}
2297 
2298 	sc->stats_block_paddr = busaddr;
2299 	/* DRC - Fix for 64 bit address. */
2300 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2301 		(u32) sc->stats_block_paddr);
2302 
2303 	/*
2304 	 * Create a DMA tag for the TX buffer descriptor chain,
2305 	 * allocate and clear the  memory, and fetch the
2306 	 * physical address of the block.
2307 	 */
2308 	if(bus_dma_tag_create(
2309 			sc->parent_tag,		/* parent      */
2310 			BCM_PAGE_SIZE,		/* alignment   */
2311 		    	BCE_DMA_BOUNDARY,	/* boundary    */
2312 			sc->max_bus_addr,	/* lowaddr     */
2313 			BUS_SPACE_MAXADDR, 	/* highaddr    */
2314 			NULL,			/* filterfunc  */
2315 			NULL,			/* filterarg   */
2316 			BCE_TX_CHAIN_PAGE_SZ,	/* maxsize     */
2317 			1,			/* nsegments   */
2318 			BCE_TX_CHAIN_PAGE_SZ,	/* maxsegsize  */
2319 			0,			/* flags       */
2320 			NULL,			/* lockfunc    */
2321 			NULL,			/* lockarg     */
2322 			&sc->tx_bd_chain_tag)) {
2323 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2324 			__FILE__, __LINE__);
2325 		rc = ENOMEM;
2326 		goto bce_dma_alloc_exit;
2327 	}
2328 
2329 	for (i = 0; i < TX_PAGES; i++) {
2330 
2331 		if(bus_dmamem_alloc(
2332 			sc->tx_bd_chain_tag,		/* tag   */
2333 	    		(void **)&sc->tx_bd_chain[i],	/* vaddr */
2334 	    		BUS_DMA_NOWAIT,			/* flags */
2335 		    	&sc->tx_bd_chain_map[i])) {
2336 			BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2337 				"chain DMA memory!\n", __FILE__, __LINE__);
2338 			rc = ENOMEM;
2339 			goto bce_dma_alloc_exit;
2340 		}
2341 
2342 		error = bus_dmamap_load(
2343 			sc->tx_bd_chain_tag,		/* dmat        */
2344 	    		sc->tx_bd_chain_map[i],		/* map         */
2345 	    		sc->tx_bd_chain[i],		/* buf         */
2346 		    	BCE_TX_CHAIN_PAGE_SZ,		/* buflen      */
2347 		    	bce_dma_map_addr,		/* callback    */
2348 	    		&busaddr,			/* callbackarg */
2349 	    		BUS_DMA_NOWAIT);		/* flags       */
2350 
2351 		if (error) {
2352 			BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2353 				__FILE__, __LINE__);
2354 			rc = ENOMEM;
2355 			goto bce_dma_alloc_exit;
2356 		}
2357 
2358 		sc->tx_bd_chain_paddr[i] = busaddr;
2359 		/* DRC - Fix for 64 bit systems. */
2360 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2361 			i, (u32) sc->tx_bd_chain_paddr[i]);
2362 	}
2363 
2364 	/* Create a DMA tag for TX mbufs. */
2365 	if (bus_dma_tag_create(
2366 			sc->parent_tag,	 	 	/* parent      */
2367 			1,		 		/* alignment   */
2368 			BCE_DMA_BOUNDARY, 		/* boundary    */
2369 			sc->max_bus_addr,		/* lowaddr     */
2370 			BUS_SPACE_MAXADDR,		/* highaddr    */
2371 			NULL,				/* filterfunc  */
2372 			NULL,				/* filterarg   */
2373 			MCLBYTES * BCE_MAX_SEGMENTS,	/* maxsize     */
2374 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2375 			MCLBYTES,			/* maxsegsize  */
2376 			0,				/* flags       */
2377 			NULL,				/* lockfunc    */
2378 			NULL,				/* lockarg     */
2379 			&sc->tx_mbuf_tag)) {
2380 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2381 			__FILE__, __LINE__);
2382 		rc = ENOMEM;
2383 		goto bce_dma_alloc_exit;
2384 	}
2385 
2386 	/* Create DMA maps for the TX mbufs clusters. */
2387 	for (i = 0; i < TOTAL_TX_BD; i++) {
2388 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2389 			&sc->tx_mbuf_map[i])) {
2390 			BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2391 				__FILE__, __LINE__);
2392 			rc = ENOMEM;
2393 			goto bce_dma_alloc_exit;
2394 		}
2395 	}
2396 
2397 	/*
2398 	 * Create a DMA tag for the RX buffer descriptor chain,
2399 	 * allocate and clear the  memory, and fetch the physical
2400 	 * address of the blocks.
2401 	 */
2402 	if (bus_dma_tag_create(
2403 			sc->parent_tag,			/* parent      */
2404 			BCM_PAGE_SIZE,			/* alignment   */
2405 			BCE_DMA_BOUNDARY,		/* boundary    */
2406 			BUS_SPACE_MAXADDR,		/* lowaddr     */
2407 			sc->max_bus_addr,		/* lowaddr     */
2408 			NULL,				/* filter      */
2409 			NULL, 				/* filterarg   */
2410 			BCE_RX_CHAIN_PAGE_SZ,		/* maxsize     */
2411 			1, 				/* nsegments   */
2412 			BCE_RX_CHAIN_PAGE_SZ,		/* maxsegsize  */
2413 			0,		 		/* flags       */
2414 			NULL,				/* lockfunc    */
2415 			NULL,				/* lockarg     */
2416 			&sc->rx_bd_chain_tag)) {
2417 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2418 			__FILE__, __LINE__);
2419 		rc = ENOMEM;
2420 		goto bce_dma_alloc_exit;
2421 	}
2422 
2423 	for (i = 0; i < RX_PAGES; i++) {
2424 
2425 		if (bus_dmamem_alloc(
2426 			sc->rx_bd_chain_tag,		/* tag   */
2427 	    		(void **)&sc->rx_bd_chain[i], 	/* vaddr */
2428 	    		BUS_DMA_NOWAIT,		  	/* flags */
2429 		    	&sc->rx_bd_chain_map[i])) {
2430 			BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2431 				"DMA memory!\n", __FILE__, __LINE__);
2432 			rc = ENOMEM;
2433 			goto bce_dma_alloc_exit;
2434 		}
2435 
2436 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2437 
2438 		error = bus_dmamap_load(
2439 			sc->rx_bd_chain_tag,	/* dmat        */
2440 	    		sc->rx_bd_chain_map[i],	/* map         */
2441 	    		sc->rx_bd_chain[i],	/* buf         */
2442 		    	BCE_RX_CHAIN_PAGE_SZ,  	/* buflen      */
2443 		    	bce_dma_map_addr,   	/* callback    */
2444 	    		&busaddr,	   	/* callbackarg */
2445 	    		BUS_DMA_NOWAIT);	/* flags       */
2446 
2447 		if (error) {
2448 			BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2449 				__FILE__, __LINE__);
2450 			rc = ENOMEM;
2451 			goto bce_dma_alloc_exit;
2452 		}
2453 
2454 		sc->rx_bd_chain_paddr[i] = busaddr;
2455 		/* DRC - Fix for 64 bit systems. */
2456 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2457 			i, (u32) sc->rx_bd_chain_paddr[i]);
2458 	}
2459 
2460 	/*
2461 	 * Create a DMA tag for RX mbufs.
2462 	 */
2463 	if (bus_dma_tag_create(
2464 			sc->parent_tag,		/* parent      */
2465 			1,			/* alignment   */
2466 			BCE_DMA_BOUNDARY,  	/* boundary    */
2467 			sc->max_bus_addr,  	/* lowaddr     */
2468 			BUS_SPACE_MAXADDR,	/* highaddr    */
2469 			NULL, 			/* filterfunc  */
2470 			NULL, 			/* filterarg   */
2471 			MJUM9BYTES,		/* maxsize     */
2472 			BCE_MAX_SEGMENTS, 	/* nsegments   */
2473 			MJUM9BYTES,		/* maxsegsize  */
2474 			0,			/* flags       */
2475 			NULL, 			/* lockfunc    */
2476 			NULL,			/* lockarg     */
2477 	    	&sc->rx_mbuf_tag)) {
2478 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2479 			__FILE__, __LINE__);
2480 		rc = ENOMEM;
2481 		goto bce_dma_alloc_exit;
2482 	}
2483 
2484 	/* Create DMA maps for the RX mbuf clusters. */
2485 	for (i = 0; i < TOTAL_RX_BD; i++) {
2486 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2487 				&sc->rx_mbuf_map[i])) {
2488 			BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2489 				__FILE__, __LINE__);
2490 			rc = ENOMEM;
2491 			goto bce_dma_alloc_exit;
2492 		}
2493 	}
2494 
2495 bce_dma_alloc_exit:
2496 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2497 
2498 	return(rc);
2499 }
2500 
2501 
2502 /****************************************************************************/
2503 /* Release all resources used by the driver.                                */
2504 /*                                                                          */
2505 /* Releases all resources acquired by the driver including interrupts,      */
2506 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2507 /*                                                                          */
2508 /* Returns:                                                                 */
2509 /*   Nothing.                                                               */
2510 /****************************************************************************/
2511 static void
2512 bce_release_resources(struct bce_softc *sc)
2513 {
2514 	device_t dev;
2515 
2516 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2517 
2518 	dev = sc->bce_dev;
2519 
2520 	bce_dma_free(sc);
2521 
2522 	if (sc->bce_intrhand != NULL)
2523 		bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2524 
2525 	if (sc->bce_irq != NULL)
2526 		bus_release_resource(dev,
2527 			SYS_RES_IRQ,
2528 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2529 			sc->bce_irq);
2530 
2531 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
2532 		pci_release_msi(dev);
2533 
2534 	if (sc->bce_res != NULL)
2535 		bus_release_resource(dev,
2536 			SYS_RES_MEMORY,
2537 		    PCIR_BAR(0),
2538 		    sc->bce_res);
2539 
2540 	if (sc->bce_ifp != NULL)
2541 		if_free(sc->bce_ifp);
2542 
2543 
2544 	if (mtx_initialized(&sc->bce_mtx))
2545 		BCE_LOCK_DESTROY(sc);
2546 
2547 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2548 
2549 }
2550 
2551 
2552 /****************************************************************************/
2553 /* Firmware synchronization.                                                */
2554 /*                                                                          */
2555 /* Before performing certain events such as a chip reset, synchronize with  */
2556 /* the firmware first.                                                      */
2557 /*                                                                          */
2558 /* Returns:                                                                 */
2559 /*   0 for success, positive value for failure.                             */
2560 /****************************************************************************/
2561 static int
2562 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2563 {
2564 	int i, rc = 0;
2565 	u32 val;
2566 
2567 	/* Don't waste any time if we've timed out before. */
2568 	if (sc->bce_fw_timed_out) {
2569 		rc = EBUSY;
2570 		goto bce_fw_sync_exit;
2571 	}
2572 
2573 	/* Increment the message sequence number. */
2574 	sc->bce_fw_wr_seq++;
2575 	msg_data |= sc->bce_fw_wr_seq;
2576 
2577  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2578 
2579 	/* Send the message to the bootcode driver mailbox. */
2580 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2581 
2582 	/* Wait for the bootcode to acknowledge the message. */
2583 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2584 		/* Check for a response in the bootcode firmware mailbox. */
2585 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2586 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2587 			break;
2588 		DELAY(1000);
2589 	}
2590 
2591 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2592 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2593 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2594 
2595 		BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2596 			"msg_data = 0x%08X\n",
2597 			__FILE__, __LINE__, msg_data);
2598 
2599 		msg_data &= ~BCE_DRV_MSG_CODE;
2600 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2601 
2602 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2603 
2604 		sc->bce_fw_timed_out = 1;
2605 		rc = EBUSY;
2606 	}
2607 
2608 bce_fw_sync_exit:
2609 	return (rc);
2610 }
2611 
2612 
2613 /****************************************************************************/
2614 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2615 /*                                                                          */
2616 /* Returns:                                                                 */
2617 /*   Nothing.                                                               */
2618 /****************************************************************************/
2619 static void
2620 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2621 	u32 rv2p_code_len, u32 rv2p_proc)
2622 {
2623 	int i;
2624 	u32 val;
2625 
2626 	for (i = 0; i < rv2p_code_len; i += 8) {
2627 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2628 		rv2p_code++;
2629 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2630 		rv2p_code++;
2631 
2632 		if (rv2p_proc == RV2P_PROC1) {
2633 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2634 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2635 		}
2636 		else {
2637 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2638 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2639 		}
2640 	}
2641 
2642 	/* Reset the processor, un-stall is done later. */
2643 	if (rv2p_proc == RV2P_PROC1) {
2644 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2645 	}
2646 	else {
2647 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2648 	}
2649 }
2650 
2651 
2652 /****************************************************************************/
2653 /* Load RISC processor firmware.                                            */
2654 /*                                                                          */
2655 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2656 /* associated with a particular processor.                                  */
2657 /*                                                                          */
2658 /* Returns:                                                                 */
2659 /*   Nothing.                                                               */
2660 /****************************************************************************/
2661 static void
2662 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2663 	struct fw_info *fw)
2664 {
2665 	u32 offset;
2666 	u32 val;
2667 
2668 	/* Halt the CPU. */
2669 	val = REG_RD_IND(sc, cpu_reg->mode);
2670 	val |= cpu_reg->mode_value_halt;
2671 	REG_WR_IND(sc, cpu_reg->mode, val);
2672 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2673 
2674 	/* Load the Text area. */
2675 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2676 	if (fw->text) {
2677 		int j;
2678 
2679 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2680 			REG_WR_IND(sc, offset, fw->text[j]);
2681 	        }
2682 	}
2683 
2684 	/* Load the Data area. */
2685 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2686 	if (fw->data) {
2687 		int j;
2688 
2689 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2690 			REG_WR_IND(sc, offset, fw->data[j]);
2691 		}
2692 	}
2693 
2694 	/* Load the SBSS area. */
2695 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2696 	if (fw->sbss) {
2697 		int j;
2698 
2699 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2700 			REG_WR_IND(sc, offset, fw->sbss[j]);
2701 		}
2702 	}
2703 
2704 	/* Load the BSS area. */
2705 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2706 	if (fw->bss) {
2707 		int j;
2708 
2709 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2710 			REG_WR_IND(sc, offset, fw->bss[j]);
2711 		}
2712 	}
2713 
2714 	/* Load the Read-Only area. */
2715 	offset = cpu_reg->spad_base +
2716 		(fw->rodata_addr - cpu_reg->mips_view_base);
2717 	if (fw->rodata) {
2718 		int j;
2719 
2720 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2721 			REG_WR_IND(sc, offset, fw->rodata[j]);
2722 		}
2723 	}
2724 
2725 	/* Clear the pre-fetch instruction. */
2726 	REG_WR_IND(sc, cpu_reg->inst, 0);
2727 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2728 
2729 	/* Start the CPU. */
2730 	val = REG_RD_IND(sc, cpu_reg->mode);
2731 	val &= ~cpu_reg->mode_value_halt;
2732 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2733 	REG_WR_IND(sc, cpu_reg->mode, val);
2734 }
2735 
2736 
2737 /****************************************************************************/
2738 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2739 /*                                                                          */
2740 /* Loads the firmware for each CPU and starts the CPU.                      */
2741 /*                                                                          */
2742 /* Returns:                                                                 */
2743 /*   Nothing.                                                               */
2744 /****************************************************************************/
2745 static void
2746 bce_init_cpus(struct bce_softc *sc)
2747 {
2748 	struct cpu_reg cpu_reg;
2749 	struct fw_info fw;
2750 
2751 	/* Initialize the RV2P processor. */
2752 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2753 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2754 
2755 	/* Initialize the RX Processor. */
2756 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2757 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2758 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2759 	cpu_reg.state = BCE_RXP_CPU_STATE;
2760 	cpu_reg.state_value_clear = 0xffffff;
2761 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2762 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2763 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2764 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2765 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2766 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2767 	cpu_reg.mips_view_base = 0x8000000;
2768 
2769 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2770 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2771 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2772 	fw.start_addr = bce_RXP_b06FwStartAddr;
2773 
2774 	fw.text_addr = bce_RXP_b06FwTextAddr;
2775 	fw.text_len = bce_RXP_b06FwTextLen;
2776 	fw.text_index = 0;
2777 	fw.text = bce_RXP_b06FwText;
2778 
2779 	fw.data_addr = bce_RXP_b06FwDataAddr;
2780 	fw.data_len = bce_RXP_b06FwDataLen;
2781 	fw.data_index = 0;
2782 	fw.data = bce_RXP_b06FwData;
2783 
2784 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2785 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2786 	fw.sbss_index = 0;
2787 	fw.sbss = bce_RXP_b06FwSbss;
2788 
2789 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2790 	fw.bss_len = bce_RXP_b06FwBssLen;
2791 	fw.bss_index = 0;
2792 	fw.bss = bce_RXP_b06FwBss;
2793 
2794 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2795 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2796 	fw.rodata_index = 0;
2797 	fw.rodata = bce_RXP_b06FwRodata;
2798 
2799 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2800 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2801 
2802 	/* Initialize the TX Processor. */
2803 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2804 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2805 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2806 	cpu_reg.state = BCE_TXP_CPU_STATE;
2807 	cpu_reg.state_value_clear = 0xffffff;
2808 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2809 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2810 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2811 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2812 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2813 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2814 	cpu_reg.mips_view_base = 0x8000000;
2815 
2816 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2817 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2818 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2819 	fw.start_addr = bce_TXP_b06FwStartAddr;
2820 
2821 	fw.text_addr = bce_TXP_b06FwTextAddr;
2822 	fw.text_len = bce_TXP_b06FwTextLen;
2823 	fw.text_index = 0;
2824 	fw.text = bce_TXP_b06FwText;
2825 
2826 	fw.data_addr = bce_TXP_b06FwDataAddr;
2827 	fw.data_len = bce_TXP_b06FwDataLen;
2828 	fw.data_index = 0;
2829 	fw.data = bce_TXP_b06FwData;
2830 
2831 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2832 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2833 	fw.sbss_index = 0;
2834 	fw.sbss = bce_TXP_b06FwSbss;
2835 
2836 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2837 	fw.bss_len = bce_TXP_b06FwBssLen;
2838 	fw.bss_index = 0;
2839 	fw.bss = bce_TXP_b06FwBss;
2840 
2841 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2842 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2843 	fw.rodata_index = 0;
2844 	fw.rodata = bce_TXP_b06FwRodata;
2845 
2846 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2847 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2848 
2849 	/* Initialize the TX Patch-up Processor. */
2850 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2851 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2852 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2853 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2854 	cpu_reg.state_value_clear = 0xffffff;
2855 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2856 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2857 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2858 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2859 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2860 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2861 	cpu_reg.mips_view_base = 0x8000000;
2862 
2863 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2864 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2865 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2866 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2867 
2868 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2869 	fw.text_len = bce_TPAT_b06FwTextLen;
2870 	fw.text_index = 0;
2871 	fw.text = bce_TPAT_b06FwText;
2872 
2873 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2874 	fw.data_len = bce_TPAT_b06FwDataLen;
2875 	fw.data_index = 0;
2876 	fw.data = bce_TPAT_b06FwData;
2877 
2878 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2879 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2880 	fw.sbss_index = 0;
2881 	fw.sbss = bce_TPAT_b06FwSbss;
2882 
2883 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2884 	fw.bss_len = bce_TPAT_b06FwBssLen;
2885 	fw.bss_index = 0;
2886 	fw.bss = bce_TPAT_b06FwBss;
2887 
2888 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2889 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2890 	fw.rodata_index = 0;
2891 	fw.rodata = bce_TPAT_b06FwRodata;
2892 
2893 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2894 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2895 
2896 	/* Initialize the Completion Processor. */
2897 	cpu_reg.mode = BCE_COM_CPU_MODE;
2898 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2899 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2900 	cpu_reg.state = BCE_COM_CPU_STATE;
2901 	cpu_reg.state_value_clear = 0xffffff;
2902 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2903 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2904 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2905 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2906 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2907 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2908 	cpu_reg.mips_view_base = 0x8000000;
2909 
2910 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2911 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2912 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2913 	fw.start_addr = bce_COM_b06FwStartAddr;
2914 
2915 	fw.text_addr = bce_COM_b06FwTextAddr;
2916 	fw.text_len = bce_COM_b06FwTextLen;
2917 	fw.text_index = 0;
2918 	fw.text = bce_COM_b06FwText;
2919 
2920 	fw.data_addr = bce_COM_b06FwDataAddr;
2921 	fw.data_len = bce_COM_b06FwDataLen;
2922 	fw.data_index = 0;
2923 	fw.data = bce_COM_b06FwData;
2924 
2925 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2926 	fw.sbss_len = bce_COM_b06FwSbssLen;
2927 	fw.sbss_index = 0;
2928 	fw.sbss = bce_COM_b06FwSbss;
2929 
2930 	fw.bss_addr = bce_COM_b06FwBssAddr;
2931 	fw.bss_len = bce_COM_b06FwBssLen;
2932 	fw.bss_index = 0;
2933 	fw.bss = bce_COM_b06FwBss;
2934 
2935 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2936 	fw.rodata_len = bce_COM_b06FwRodataLen;
2937 	fw.rodata_index = 0;
2938 	fw.rodata = bce_COM_b06FwRodata;
2939 
2940 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2941 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2942 }
2943 
2944 
2945 /****************************************************************************/
2946 /* Initialize context memory.                                               */
2947 /*                                                                          */
2948 /* Clears the memory associated with each Context ID (CID).                 */
2949 /*                                                                          */
2950 /* Returns:                                                                 */
2951 /*   Nothing.                                                               */
2952 /****************************************************************************/
2953 static void
2954 bce_init_context(struct bce_softc *sc)
2955 {
2956 	u32 vcid;
2957 
2958 	vcid = 96;
2959 	while (vcid) {
2960 		u32 vcid_addr, pcid_addr, offset;
2961 
2962 		vcid--;
2963 
2964    		vcid_addr = GET_CID_ADDR(vcid);
2965 		pcid_addr = vcid_addr;
2966 
2967 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
2968 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2969 
2970 		/* Zero out the context. */
2971 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
2972 			CTX_WR(sc, 0x00, offset, 0);
2973 		}
2974 
2975 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2976 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2977 	}
2978 }
2979 
2980 
2981 /****************************************************************************/
2982 /* Fetch the permanent MAC address of the controller.                       */
2983 /*                                                                          */
2984 /* Returns:                                                                 */
2985 /*   Nothing.                                                               */
2986 /****************************************************************************/
2987 static void
2988 bce_get_mac_addr(struct bce_softc *sc)
2989 {
2990 	u32 mac_lo = 0, mac_hi = 0;
2991 
2992 	/*
2993 	 * The NetXtreme II bootcode populates various NIC
2994 	 * power-on and runtime configuration items in a
2995 	 * shared memory area.  The factory configured MAC
2996 	 * address is available from both NVRAM and the
2997 	 * shared memory area so we'll read the value from
2998 	 * shared memory for speed.
2999 	 */
3000 
3001 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3002 		BCE_PORT_HW_CFG_MAC_UPPER);
3003 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3004 		BCE_PORT_HW_CFG_MAC_LOWER);
3005 
3006 	if ((mac_lo == 0) && (mac_hi == 0)) {
3007 		BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3008 			__FILE__, __LINE__);
3009 	} else {
3010 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3011 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3012 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3013 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3014 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3015 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3016 	}
3017 
3018 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3019 }
3020 
3021 
3022 /****************************************************************************/
3023 /* Program the MAC address.                                                 */
3024 /*                                                                          */
3025 /* Returns:                                                                 */
3026 /*   Nothing.                                                               */
3027 /****************************************************************************/
3028 static void
3029 bce_set_mac_addr(struct bce_softc *sc)
3030 {
3031 	u32 val;
3032 	u8 *mac_addr = sc->eaddr;
3033 
3034 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3035 
3036 	val = (mac_addr[0] << 8) | mac_addr[1];
3037 
3038 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3039 
3040 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3041 		(mac_addr[4] << 8) | mac_addr[5];
3042 
3043 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3044 }
3045 
3046 
3047 /****************************************************************************/
3048 /* Stop the controller.                                                     */
3049 /*                                                                          */
3050 /* Returns:                                                                 */
3051 /*   Nothing.                                                               */
3052 /****************************************************************************/
3053 static void
3054 bce_stop(struct bce_softc *sc)
3055 {
3056 	struct ifnet *ifp;
3057 	struct ifmedia_entry *ifm;
3058 	struct mii_data *mii = NULL;
3059 	int mtmp, itmp;
3060 
3061 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3062 
3063 	BCE_LOCK_ASSERT(sc);
3064 
3065 	ifp = sc->bce_ifp;
3066 
3067 	mii = device_get_softc(sc->bce_miibus);
3068 
3069 	callout_stop(&sc->bce_stat_ch);
3070 
3071 	/* Disable the transmit/receive blocks. */
3072 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3073 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3074 	DELAY(20);
3075 
3076 	bce_disable_intr(sc);
3077 
3078 	/* Tell firmware that the driver is going away. */
3079 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3080 
3081 	/* Free the RX lists. */
3082 	bce_free_rx_chain(sc);
3083 
3084 	/* Free TX buffers. */
3085 	bce_free_tx_chain(sc);
3086 
3087 	/*
3088 	 * Isolate/power down the PHY, but leave the media selection
3089 	 * unchanged so that things will be put back to normal when
3090 	 * we bring the interface back up.
3091 	 */
3092 
3093 	itmp = ifp->if_flags;
3094 	ifp->if_flags |= IFF_UP;
3095 	/*
3096 	 * If we are called from bce_detach(), mii is already NULL.
3097 	 */
3098 	if (mii != NULL) {
3099 		ifm = mii->mii_media.ifm_cur;
3100 		mtmp = ifm->ifm_media;
3101 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3102 		mii_mediachg(mii);
3103 		ifm->ifm_media = mtmp;
3104 	}
3105 
3106 	ifp->if_flags = itmp;
3107 	sc->watchdog_timer = 0;
3108 
3109 	sc->bce_link = 0;
3110 
3111 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3112 
3113 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3114 
3115 	bce_mgmt_init_locked(sc);
3116 }
3117 
3118 
3119 static int
3120 bce_reset(struct bce_softc *sc, u32 reset_code)
3121 {
3122 	u32 val;
3123 	int i, rc = 0;
3124 
3125 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3126 
3127 	/* Wait for pending PCI transactions to complete. */
3128 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3129 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3130 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3131 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3132 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3133 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3134 	DELAY(5);
3135 
3136 	/* Assume bootcode is running. */
3137 	sc->bce_fw_timed_out = 0;
3138 
3139 	/* Give the firmware a chance to prepare for the reset. */
3140 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3141 	if (rc)
3142 		goto bce_reset_exit;
3143 
3144 	/* Set a firmware reminder that this is a soft reset. */
3145 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3146 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3147 
3148 	/* Dummy read to force the chip to complete all current transactions. */
3149 	val = REG_RD(sc, BCE_MISC_ID);
3150 
3151 	/* Chip reset. */
3152 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3153 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3154 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3155 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3156 
3157 	/* Allow up to 30us for reset to complete. */
3158 	for (i = 0; i < 10; i++) {
3159 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3160 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3161 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3162 			break;
3163 		}
3164 		DELAY(10);
3165 	}
3166 
3167 	/* Check that reset completed successfully. */
3168 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3169 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3170 		BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3171 			__FILE__, __LINE__);
3172 		rc = EBUSY;
3173 		goto bce_reset_exit;
3174 	}
3175 
3176 	/* Make sure byte swapping is properly configured. */
3177 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3178 	if (val != 0x01020304) {
3179 		BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3180 			__FILE__, __LINE__);
3181 		rc = ENODEV;
3182 		goto bce_reset_exit;
3183 	}
3184 
3185 	/* Just completed a reset, assume that firmware is running again. */
3186 	sc->bce_fw_timed_out = 0;
3187 
3188 	/* Wait for the firmware to finish its initialization. */
3189 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3190 	if (rc)
3191 		BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3192 			__FILE__, __LINE__);
3193 
3194 bce_reset_exit:
3195 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3196 
3197 	return (rc);
3198 }
3199 
3200 
3201 static int
3202 bce_chipinit(struct bce_softc *sc)
3203 {
3204 	u32 val;
3205 	int rc = 0;
3206 
3207 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3208 
3209 	/* Make sure the interrupt is not active. */
3210 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3211 
3212 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3213 	/* channels and PCI clock compensation delay.                      */
3214 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3215 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3216 #if BYTE_ORDER == BIG_ENDIAN
3217 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3218 #endif
3219 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3220 	      DMA_READ_CHANS << 12 |
3221 	      DMA_WRITE_CHANS << 16;
3222 
3223 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3224 
3225 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3226 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3227 
3228 	/*
3229 	 * This setting resolves a problem observed on certain Intel PCI
3230 	 * chipsets that cannot handle multiple outstanding DMA operations.
3231 	 * See errata E9_5706A1_65.
3232 	 */
3233 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3234 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3235 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3236 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3237 
3238 	REG_WR(sc, BCE_DMA_CONFIG, val);
3239 
3240 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3241 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3242 		u16 val;
3243 
3244 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3245 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3246 	}
3247 
3248 	/* Enable the RX_V2P and Context state machines before access. */
3249 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3250 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3251 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3252 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3253 
3254 	/* Initialize context mapping and zero out the quick contexts. */
3255 	bce_init_context(sc);
3256 
3257 	/* Initialize the on-boards CPUs */
3258 	bce_init_cpus(sc);
3259 
3260 	/* Prepare NVRAM for access. */
3261 	if (bce_init_nvram(sc)) {
3262 		rc = ENODEV;
3263 		goto bce_chipinit_exit;
3264 	}
3265 
3266 	/* Set the kernel bypass block size */
3267 	val = REG_RD(sc, BCE_MQ_CONFIG);
3268 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3269 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3270 	REG_WR(sc, BCE_MQ_CONFIG, val);
3271 
3272 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3273 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3274 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3275 
3276 	val = (BCM_PAGE_BITS - 8) << 24;
3277 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3278 
3279 	/* Configure page size. */
3280 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3281 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3282 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3283 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3284 
3285 bce_chipinit_exit:
3286 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3287 
3288 	return(rc);
3289 }
3290 
3291 
3292 /****************************************************************************/
3293 /* Initialize the controller in preparation to send/receive traffic.        */
3294 /*                                                                          */
3295 /* Returns:                                                                 */
3296 /*   0 for success, positive value for failure.                             */
3297 /****************************************************************************/
3298 static int
3299 bce_blockinit(struct bce_softc *sc)
3300 {
3301 	u32 reg, val;
3302 	int rc = 0;
3303 
3304 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3305 
3306 	/* Load the hardware default MAC address. */
3307 	bce_set_mac_addr(sc);
3308 
3309 	/* Set the Ethernet backoff seed value */
3310 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3311 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3312 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3313 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3314 
3315 	sc->last_status_idx = 0;
3316 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3317 
3318 	/* Set up link change interrupt generation. */
3319 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3320 
3321 	/* Program the physical address of the status block. */
3322 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3323 		BCE_ADDR_LO(sc->status_block_paddr));
3324 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3325 		BCE_ADDR_HI(sc->status_block_paddr));
3326 
3327 	/* Program the physical address of the statistics block. */
3328 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3329 		BCE_ADDR_LO(sc->stats_block_paddr));
3330 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3331 		BCE_ADDR_HI(sc->stats_block_paddr));
3332 
3333 	/* Program various host coalescing parameters. */
3334 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3335 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3336 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3337 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3338 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3339 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3340 	REG_WR(sc, BCE_HC_TX_TICKS,
3341 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3342 	REG_WR(sc, BCE_HC_RX_TICKS,
3343 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3344 	REG_WR(sc, BCE_HC_COM_TICKS,
3345 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3346 	REG_WR(sc, BCE_HC_CMD_TICKS,
3347 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3348 	REG_WR(sc, BCE_HC_STATS_TICKS,
3349 		(sc->bce_stats_ticks & 0xffff00));
3350 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3351 		0xbb8);  /* 3ms */
3352 	REG_WR(sc, BCE_HC_CONFIG,
3353 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3354 		BCE_HC_CONFIG_COLLECT_STATS));
3355 
3356 	/* Clear the internal statistics counters. */
3357 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3358 
3359 	/* Verify that bootcode is running. */
3360 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3361 
3362 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3363 		BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3364 			__FILE__, __LINE__);
3365 		reg = 0);
3366 
3367 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3368 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3369 		BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3370 			"Expected: 08%08X\n", __FILE__, __LINE__,
3371 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3372 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3373 		rc = ENODEV;
3374 		goto bce_blockinit_exit;
3375 	}
3376 
3377 	/* Check if any management firmware is running. */
3378 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3379 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3380 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3381 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3382 	}
3383 
3384 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3385 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3386 
3387 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3388 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3389 
3390 	/* Enable link state change interrupt generation. */
3391 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3392 
3393 	/* Enable all remaining blocks in the MAC. */
3394 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3395 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3396 	DELAY(20);
3397 
3398 bce_blockinit_exit:
3399 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3400 
3401 	return (rc);
3402 }
3403 
3404 
3405 /****************************************************************************/
3406 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3407 /*                                                                          */
3408 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3409 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3410 /* necessary.                                                               */
3411 /*                                                                          */
3412 /* Returns:                                                                 */
3413 /*   0 for success, positive value for failure.                             */
3414 /****************************************************************************/
3415 static int
3416 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3417 	u32 *prod_bseq)
3418 {
3419 	bus_dmamap_t		map;
3420 	bus_dma_segment_t	segs[4];
3421 	struct mbuf *m_new = NULL;
3422 	struct rx_bd		*rxbd;
3423 	int i, nsegs, error, rc = 0;
3424 #ifdef BCE_DEBUG
3425 	u16 debug_chain_prod = *chain_prod;
3426 #endif
3427 
3428 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3429 		__FUNCTION__);
3430 
3431 	/* Make sure the inputs are valid. */
3432 	DBRUNIF((*chain_prod > MAX_RX_BD),
3433 		BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3434 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3435 
3436 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3437 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3438 
3439 	if (m == NULL) {
3440 
3441 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3442 			BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3443 				__FILE__, __LINE__);
3444 			sc->mbuf_alloc_failed++;
3445 			rc = ENOBUFS;
3446 			goto bce_get_buf_exit);
3447 
3448 		/* This is a new mbuf allocation. */
3449 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3450 		if (m_new == NULL) {
3451 
3452 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3453 				__FILE__, __LINE__);
3454 
3455 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3456 
3457 			rc = ENOBUFS;
3458 			goto bce_get_buf_exit;
3459 		}
3460 
3461 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3462 		m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3463 		if (!(m_new->m_flags & M_EXT)) {
3464 
3465 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3466 				__FILE__, __LINE__);
3467 
3468 			m_freem(m_new);
3469 
3470 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3471 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3472 
3473 			rc = ENOBUFS;
3474 			goto bce_get_buf_exit;
3475 		}
3476 
3477 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3478 	} else {
3479 		m_new = m;
3480 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3481 		m_new->m_data = m_new->m_ext.ext_buf;
3482 	}
3483 
3484 	/* Map the mbuf cluster into device memory. */
3485 	map = sc->rx_mbuf_map[*chain_prod];
3486 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3487 	    segs, &nsegs, BUS_DMA_NOWAIT);
3488 
3489 	if (error) {
3490 		BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3491 			__FILE__, __LINE__);
3492 
3493 		m_freem(m_new);
3494 
3495 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3496 
3497 		rc = ENOBUFS;
3498 		goto bce_get_buf_exit;
3499 	}
3500 
3501 	/* Watch for overflow. */
3502 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3503 		BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3504 			__FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3505 
3506 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3507 		sc->rx_low_watermark = sc->free_rx_bd);
3508 
3509 	/* Setup the rx_bd for the first segment. */
3510 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3511 
3512 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3513 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3514 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3515 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3516 	*prod_bseq += segs[0].ds_len;
3517 
3518 	for (i = 1; i < nsegs; i++) {
3519 
3520 		*prod = NEXT_RX_BD(*prod);
3521 		*chain_prod = RX_CHAIN_IDX(*prod);
3522 
3523 		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3524 
3525 		rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3526 		rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3527 		rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3528 		rxbd->rx_bd_flags     = 0;
3529 		*prod_bseq += segs[i].ds_len;
3530 	}
3531 
3532 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3533 
3534 	/* Save the mbuf and update our counter. */
3535 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3536 	sc->free_rx_bd -= nsegs;
3537 
3538 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3539 		nsegs));
3540 
3541 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3542 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3543 
3544 bce_get_buf_exit:
3545 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3546 		__FUNCTION__);
3547 
3548 	return(rc);
3549 }
3550 
3551 
3552 /****************************************************************************/
3553 /* Allocate memory and initialize the TX data structures.                   */
3554 /*                                                                          */
3555 /* Returns:                                                                 */
3556 /*   0 for success, positive value for failure.                             */
3557 /****************************************************************************/
3558 static int
3559 bce_init_tx_chain(struct bce_softc *sc)
3560 {
3561 	struct tx_bd *txbd;
3562 	u32 val;
3563 	int i, rc = 0;
3564 
3565 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3566 
3567 	/* Set the initial TX producer/consumer indices. */
3568 	sc->tx_prod        = 0;
3569 	sc->tx_cons        = 0;
3570 	sc->tx_prod_bseq   = 0;
3571 	sc->used_tx_bd = 0;
3572 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3573 
3574 	/*
3575 	 * The NetXtreme II supports a linked-list structre called
3576 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3577 	 * consists of a series of 1 or more chain pages, each of which
3578 	 * consists of a fixed number of BD entries.
3579 	 * The last BD entry on each page is a pointer to the next page
3580 	 * in the chain, and the last pointer in the BD chain
3581 	 * points back to the beginning of the chain.
3582 	 */
3583 
3584 	/* Set the TX next pointer chain entries. */
3585 	for (i = 0; i < TX_PAGES; i++) {
3586 		int j;
3587 
3588 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3589 
3590 		/* Check if we've reached the last page. */
3591 		if (i == (TX_PAGES - 1))
3592 			j = 0;
3593 		else
3594 			j = i + 1;
3595 
3596 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3597 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3598 	}
3599 
3600 	/*
3601 	 * Initialize the context ID for an L2 TX chain.
3602 	 */
3603 	val = BCE_L2CTX_TYPE_TYPE_L2;
3604 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3605 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3606 
3607 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3608 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3609 
3610 	/* Point the hardware to the first page in the chain. */
3611 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3612 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3613 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3614 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3615 
3616 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3617 
3618 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3619 
3620 	return(rc);
3621 }
3622 
3623 
3624 /****************************************************************************/
3625 /* Free memory and clear the TX data structures.                            */
3626 /*                                                                          */
3627 /* Returns:                                                                 */
3628 /*   Nothing.                                                               */
3629 /****************************************************************************/
3630 static void
3631 bce_free_tx_chain(struct bce_softc *sc)
3632 {
3633 	int i;
3634 
3635 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3636 
3637 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3638 	for (i = 0; i < TOTAL_TX_BD; i++) {
3639 		if (sc->tx_mbuf_ptr[i] != NULL) {
3640 			if (sc->tx_mbuf_map != NULL)
3641 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3642 					BUS_DMASYNC_POSTWRITE);
3643 			m_freem(sc->tx_mbuf_ptr[i]);
3644 			sc->tx_mbuf_ptr[i] = NULL;
3645 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3646 		}
3647 	}
3648 
3649 	/* Clear each TX chain page. */
3650 	for (i = 0; i < TX_PAGES; i++)
3651 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3652 
3653 	/* Check if we lost any mbufs in the process. */
3654 	DBRUNIF((sc->tx_mbuf_alloc),
3655 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3656 			"from tx chain!\n",
3657 			__FILE__, __LINE__, sc->tx_mbuf_alloc));
3658 
3659 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3660 }
3661 
3662 
3663 /****************************************************************************/
3664 /* Allocate memory and initialize the RX data structures.                   */
3665 /*                                                                          */
3666 /* Returns:                                                                 */
3667 /*   0 for success, positive value for failure.                             */
3668 /****************************************************************************/
3669 static int
3670 bce_init_rx_chain(struct bce_softc *sc)
3671 {
3672 	struct rx_bd *rxbd;
3673 	int i, rc = 0;
3674 	u16 prod, chain_prod;
3675 	u32 prod_bseq, val;
3676 
3677 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3678 
3679 	/* Initialize the RX producer and consumer indices. */
3680 	sc->rx_prod        = 0;
3681 	sc->rx_cons        = 0;
3682 	sc->rx_prod_bseq   = 0;
3683 	sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3684 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3685 
3686 	/* Initialize the RX next pointer chain entries. */
3687 	for (i = 0; i < RX_PAGES; i++) {
3688 		int j;
3689 
3690 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3691 
3692 		/* Check if we've reached the last page. */
3693 		if (i == (RX_PAGES - 1))
3694 			j = 0;
3695 		else
3696 			j = i + 1;
3697 
3698 		/* Setup the chain page pointers. */
3699 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3700 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3701 	}
3702 
3703 	/* Initialize the context ID for an L2 RX chain. */
3704 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3705 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3706 	val |= 0x02 << 8;
3707 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3708 
3709 	/* Point the hardware to the first page in the chain. */
3710 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3711 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3712 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3713 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3714 
3715 	/* Allocate mbuf clusters for the rx_bd chain. */
3716 	prod = prod_bseq = 0;
3717 	while (prod < BCE_RX_SLACK_SPACE) {
3718 		chain_prod = RX_CHAIN_IDX(prod);
3719 		if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3720 			BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3721 				__FILE__, __LINE__, chain_prod);
3722 			rc = ENOBUFS;
3723 			break;
3724 		}
3725 		prod = NEXT_RX_BD(prod);
3726 	}
3727 
3728 	/* Save the RX chain producer index. */
3729 	sc->rx_prod      = prod;
3730 	sc->rx_prod_bseq = prod_bseq;
3731 
3732 	for (i = 0; i < RX_PAGES; i++) {
3733 		bus_dmamap_sync(
3734 			sc->rx_bd_chain_tag,
3735 	    	sc->rx_bd_chain_map[i],
3736 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3737 	}
3738 
3739 	/* Tell the chip about the waiting rx_bd's. */
3740 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3741 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3742 
3743 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3744 
3745 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3746 
3747 	return(rc);
3748 }
3749 
3750 
3751 /****************************************************************************/
3752 /* Free memory and clear the RX data structures.                            */
3753 /*                                                                          */
3754 /* Returns:                                                                 */
3755 /*   Nothing.                                                               */
3756 /****************************************************************************/
3757 static void
3758 bce_free_rx_chain(struct bce_softc *sc)
3759 {
3760 	int i;
3761 
3762 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3763 
3764 	/* Free any mbufs still in the RX mbuf chain. */
3765 	for (i = 0; i < TOTAL_RX_BD; i++) {
3766 		if (sc->rx_mbuf_ptr[i] != NULL) {
3767 			if (sc->rx_mbuf_map[i] != NULL)
3768 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3769 					BUS_DMASYNC_POSTREAD);
3770 			m_freem(sc->rx_mbuf_ptr[i]);
3771 			sc->rx_mbuf_ptr[i] = NULL;
3772 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3773 		}
3774 	}
3775 
3776 	/* Clear each RX chain page. */
3777 	for (i = 0; i < RX_PAGES; i++)
3778 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3779 
3780 	/* Check if we lost any mbufs in the process. */
3781 	DBRUNIF((sc->rx_mbuf_alloc),
3782 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3783 			__FILE__, __LINE__, sc->rx_mbuf_alloc));
3784 
3785 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3786 }
3787 
3788 
3789 /****************************************************************************/
3790 /* Set media options.                                                       */
3791 /*                                                                          */
3792 /* Returns:                                                                 */
3793 /*   0 for success, positive value for failure.                             */
3794 /****************************************************************************/
3795 static int
3796 bce_ifmedia_upd(struct ifnet *ifp)
3797 {
3798 	struct bce_softc *sc;
3799 
3800 	sc = ifp->if_softc;
3801 	BCE_LOCK(sc);
3802 	bce_ifmedia_upd_locked(ifp);
3803 	BCE_UNLOCK(sc);
3804 	return (0);
3805 }
3806 
3807 static void
3808 bce_ifmedia_upd_locked(struct ifnet *ifp)
3809 {
3810 	struct bce_softc *sc;
3811 	struct mii_data *mii;
3812 	struct ifmedia *ifm;
3813 
3814 	sc = ifp->if_softc;
3815 	ifm = &sc->bce_ifmedia;
3816 	BCE_LOCK_ASSERT(sc);
3817 
3818 	mii = device_get_softc(sc->bce_miibus);
3819 	sc->bce_link = 0;
3820 	if (mii->mii_instance) {
3821 		struct mii_softc *miisc;
3822 
3823 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3824 			mii_phy_reset(miisc);
3825 	}
3826 	mii_mediachg(mii);
3827 }
3828 
3829 
3830 /****************************************************************************/
3831 /* Reports current media status.                                            */
3832 /*                                                                          */
3833 /* Returns:                                                                 */
3834 /*   Nothing.                                                               */
3835 /****************************************************************************/
3836 static void
3837 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3838 {
3839 	struct bce_softc *sc;
3840 	struct mii_data *mii;
3841 
3842 	sc = ifp->if_softc;
3843 
3844 	BCE_LOCK(sc);
3845 
3846 	mii = device_get_softc(sc->bce_miibus);
3847 
3848 	mii_pollstat(mii);
3849 	ifmr->ifm_active = mii->mii_media_active;
3850 	ifmr->ifm_status = mii->mii_media_status;
3851 
3852 	BCE_UNLOCK(sc);
3853 }
3854 
3855 
3856 /****************************************************************************/
3857 /* Handles PHY generated interrupt events.                                  */
3858 /*                                                                          */
3859 /* Returns:                                                                 */
3860 /*   Nothing.                                                               */
3861 /****************************************************************************/
3862 static void
3863 bce_phy_intr(struct bce_softc *sc)
3864 {
3865 	u32 new_link_state, old_link_state;
3866 
3867 	new_link_state = sc->status_block->status_attn_bits &
3868 		STATUS_ATTN_BITS_LINK_STATE;
3869 	old_link_state = sc->status_block->status_attn_bits_ack &
3870 		STATUS_ATTN_BITS_LINK_STATE;
3871 
3872 	/* Handle any changes if the link state has changed. */
3873 	if (new_link_state != old_link_state) {
3874 
3875 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3876 
3877 		sc->bce_link = 0;
3878 		callout_stop(&sc->bce_stat_ch);
3879 		bce_tick(sc);
3880 
3881 		/* Update the status_attn_bits_ack field in the status block. */
3882 		if (new_link_state) {
3883 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3884 				STATUS_ATTN_BITS_LINK_STATE);
3885 			DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
3886 		}
3887 		else {
3888 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3889 				STATUS_ATTN_BITS_LINK_STATE);
3890 			DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
3891 		}
3892 
3893 	}
3894 
3895 	/* Acknowledge the link change interrupt. */
3896 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3897 }
3898 
3899 
3900 /****************************************************************************/
3901 /* Handles received frame interrupt events.                                 */
3902 /*                                                                          */
3903 /* Returns:                                                                 */
3904 /*   Nothing.                                                               */
3905 /****************************************************************************/
3906 static void
3907 bce_rx_intr(struct bce_softc *sc)
3908 {
3909 	struct status_block *sblk = sc->status_block;
3910 	struct ifnet *ifp = sc->bce_ifp;
3911 	u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3912 	u32 sw_prod_bseq;
3913 	struct l2_fhdr *l2fhdr;
3914 
3915 	DBRUNIF(1, sc->rx_interrupts++);
3916 
3917 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3918 	for (int i = 0; i < RX_PAGES; i++)
3919 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3920 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
3921 
3922 	/* Get the hardware's view of the RX consumer index. */
3923 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3924 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3925 		hw_cons++;
3926 
3927 	/* Get working copies of the driver's view of the RX indices. */
3928 	sw_cons = sc->rx_cons;
3929 	sw_prod = sc->rx_prod;
3930 	sw_prod_bseq = sc->rx_prod_bseq;
3931 
3932 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3933 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3934 		__FUNCTION__, sw_prod, sw_cons,
3935 		sw_prod_bseq);
3936 
3937 	/* Prevent speculative reads from getting ahead of the status block. */
3938 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3939 		BUS_SPACE_BARRIER_READ);
3940 
3941 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3942 		sc->rx_low_watermark = sc->free_rx_bd);
3943 
3944 	/*
3945 	 * Scan through the receive chain as long
3946 	 * as there is work to do.
3947 	 */
3948 	while (sw_cons != hw_cons) {
3949 		struct mbuf *m;
3950 		struct rx_bd *rxbd;
3951 		unsigned int len;
3952 		u32 status;
3953 
3954 		/* Convert the producer/consumer indices to an actual rx_bd index. */
3955 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3956 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3957 
3958 		/* Get the used rx_bd. */
3959 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3960 		sc->free_rx_bd++;
3961 
3962 		DBRUN(BCE_VERBOSE_RECV,
3963 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
3964 			bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3965 
3966 #ifdef DEVICE_POLLING
3967 		if (ifp->if_capenable & IFCAP_POLLING) {
3968 			if (sc->bce_rxcycles <= 0)
3969 				break;
3970 			sc->bce_rxcycles--;
3971 		}
3972 #endif
3973 
3974 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3975 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3976 
3977 			/* Validate that this is the last rx_bd. */
3978 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3979 				BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
3980 				__FILE__, __LINE__, sw_chain_cons);
3981 				bce_breakpoint(sc));
3982 
3983 			/* DRC - ToDo: If the received packet is small, say less */
3984 			/*             than 128 bytes, allocate a new mbuf here, */
3985 			/*             copy the data to that mbuf, and recycle   */
3986 			/*             the mapped jumbo frame.                   */
3987 
3988 			/* Unmap the mbuf from DMA space. */
3989 			bus_dmamap_sync(sc->rx_mbuf_tag,
3990 			    sc->rx_mbuf_map[sw_chain_cons],
3991 		    	BUS_DMASYNC_POSTREAD);
3992 			bus_dmamap_unload(sc->rx_mbuf_tag,
3993 			    sc->rx_mbuf_map[sw_chain_cons]);
3994 
3995 			/* Remove the mbuf from the driver's chain. */
3996 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3997 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3998 
3999 			/*
4000 			 * Frames received on the NetXteme II are prepended
4001 			 * with the l2_fhdr structure which provides status
4002 			 * information about the received frame (including
4003 			 * VLAN tags and checksum info) and are also
4004 			 * automatically adjusted to align the IP header
4005 			 * (i.e. two null bytes are inserted before the
4006 			 * Ethernet header).
4007 			 */
4008 			l2fhdr = mtod(m, struct l2_fhdr *);
4009 
4010 			len    = l2fhdr->l2_fhdr_pkt_len;
4011 			status = l2fhdr->l2_fhdr_status;
4012 
4013 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4014 				BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4015 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
4016 
4017 			/* Watch for unusual sized frames. */
4018 			DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4019 				BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4020 					"Min(%d), Actual(%d), Max(%d)\n",
4021 					__FILE__, __LINE__, (int) BCE_MIN_MTU,
4022 					len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4023 				bce_dump_mbuf(sc, m);
4024 		 		bce_breakpoint(sc));
4025 
4026 			len -= ETHER_CRC_LEN;
4027 
4028 			/* Check the received frame for errors. */
4029 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4030 				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4031 				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4032 
4033 				ifp->if_ierrors++;
4034 				DBRUNIF(1, sc->l2fhdr_status_errors++);
4035 
4036 				/* Reuse the mbuf for a new frame. */
4037 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4038 
4039 					DBRUNIF(1, bce_breakpoint(sc));
4040 					panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4041 
4042 				}
4043 				goto bce_rx_int_next_rx;
4044 			}
4045 
4046 			/*
4047 			 * Get a new mbuf for the rx_bd.   If no new
4048 			 * mbufs are available then reuse the current mbuf,
4049 			 * log an ierror on the interface, and generate
4050 			 * an error in the system log.
4051 			 */
4052 			if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4053 
4054 				DBRUN(BCE_WARN,
4055 					BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4056 					"new mbuf, incoming frame dropped!\n",
4057 					__FILE__, __LINE__));
4058 
4059 				ifp->if_ierrors++;
4060 
4061 				/* Try and reuse the exisitng mbuf. */
4062 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4063 
4064 					DBRUNIF(1, bce_breakpoint(sc));
4065 					panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4066 
4067 				}
4068 				goto bce_rx_int_next_rx;
4069 			}
4070 
4071 			/* Skip over the l2_fhdr when passing the data up the stack. */
4072 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4073 
4074 			/* Adjust the packet length to match the received data. */
4075 			m->m_pkthdr.len = m->m_len = len;
4076 
4077 			/* Send the packet to the appropriate interface. */
4078 			m->m_pkthdr.rcvif = ifp;
4079 
4080 			DBRUN(BCE_VERBOSE_RECV,
4081 				struct ether_header *eh;
4082 				eh = mtod(m, struct ether_header *);
4083 				BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4084 					__FUNCTION__, eh->ether_dhost, ":",
4085 					eh->ether_shost, ":", htons(eh->ether_type)));
4086 
4087 			/* Validate the checksum if offload enabled. */
4088 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4089 
4090 				/* Check for an IP datagram. */
4091 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4092 					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4093 
4094 					/* Check if the IP checksum is valid. */
4095 					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4096 						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4097 					else
4098 						DBPRINT(sc, BCE_WARN_SEND,
4099 							"%s(): Invalid IP checksum = 0x%04X!\n",
4100 							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4101 				}
4102 
4103 				/* Check for a valid TCP/UDP frame. */
4104 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4105 					L2_FHDR_STATUS_UDP_DATAGRAM)) {
4106 
4107 					/* Check for a good TCP/UDP checksum. */
4108 					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4109 						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4110 						m->m_pkthdr.csum_data =
4111 						    l2fhdr->l2_fhdr_tcp_udp_xsum;
4112 						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4113 							| CSUM_PSEUDO_HDR);
4114 					} else
4115 						DBPRINT(sc, BCE_WARN_SEND,
4116 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4117 							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4118 				}
4119 			}
4120 
4121 
4122 			/*
4123 			 * If we received a packet with a vlan tag,
4124 			 * attach that information to the packet.
4125 			 */
4126 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4127 				DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4128 					__FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4129 #if __FreeBSD_version < 700000
4130 				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4131 #else
4132 				m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4133 				m->m_flags |= M_VLANTAG;
4134 #endif
4135 			}
4136 
4137 			/* Pass the mbuf off to the upper layers. */
4138 			ifp->if_ipackets++;
4139 			DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4140 				__FUNCTION__);
4141 			BCE_UNLOCK(sc);
4142 			(*ifp->if_input)(ifp, m);
4143 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4144 			BCE_LOCK(sc);
4145 
4146 bce_rx_int_next_rx:
4147 			sw_prod = NEXT_RX_BD(sw_prod);
4148 		}
4149 
4150 		sw_cons = NEXT_RX_BD(sw_cons);
4151 
4152 		/* Refresh hw_cons to see if there's new work */
4153 		if (sw_cons == hw_cons) {
4154 			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4155 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4156 				hw_cons++;
4157 		}
4158 
4159 		/* Prevent speculative reads from getting ahead of the status block. */
4160 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4161 			BUS_SPACE_BARRIER_READ);
4162 	}
4163 
4164 	for (int i = 0; i < RX_PAGES; i++)
4165 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4166 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4167 
4168 	sc->rx_cons = sw_cons;
4169 	sc->rx_prod = sw_prod;
4170 	sc->rx_prod_bseq = sw_prod_bseq;
4171 
4172 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4173 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4174 
4175 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4176 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4177 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4178 }
4179 
4180 
4181 /****************************************************************************/
4182 /* Handles transmit completion interrupt events.                            */
4183 /*                                                                          */
4184 /* Returns:                                                                 */
4185 /*   Nothing.                                                               */
4186 /****************************************************************************/
4187 static void
4188 bce_tx_intr(struct bce_softc *sc)
4189 {
4190 	struct status_block *sblk = sc->status_block;
4191 	struct ifnet *ifp = sc->bce_ifp;
4192 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4193 
4194 	BCE_LOCK_ASSERT(sc);
4195 
4196 	DBRUNIF(1, sc->tx_interrupts++);
4197 
4198 	/* Get the hardware's view of the TX consumer index. */
4199 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4200 
4201 	/* Skip to the next entry if this is a chain page pointer. */
4202 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4203 		hw_tx_cons++;
4204 
4205 	sw_tx_cons = sc->tx_cons;
4206 
4207 	/* Prevent speculative reads from getting ahead of the status block. */
4208 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4209 		BUS_SPACE_BARRIER_READ);
4210 
4211 	/* Cycle through any completed TX chain page entries. */
4212 	while (sw_tx_cons != hw_tx_cons) {
4213 #ifdef BCE_DEBUG
4214 		struct tx_bd *txbd = NULL;
4215 #endif
4216 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4217 
4218 		DBPRINT(sc, BCE_INFO_SEND,
4219 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4220 			"sw_tx_chain_cons = 0x%04X\n",
4221 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4222 
4223 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4224 			BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4225 				" 0x%04X > 0x%04X\n",
4226 				__FILE__, __LINE__, sw_tx_chain_cons,
4227 				(int) MAX_TX_BD);
4228 			bce_breakpoint(sc));
4229 
4230 		DBRUNIF(1,
4231 			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4232 				[TX_IDX(sw_tx_chain_cons)]);
4233 
4234 		DBRUNIF((txbd == NULL),
4235 			BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4236 				__FILE__, __LINE__, sw_tx_chain_cons);
4237 			bce_breakpoint(sc));
4238 
4239 		DBRUN(BCE_INFO_SEND,
4240 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4241 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4242 
4243 		/*
4244 		 * Free the associated mbuf. Remember
4245 		 * that only the last tx_bd of a packet
4246 		 * has an mbuf pointer and DMA map.
4247 		 */
4248 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4249 
4250 			/* Validate that this is the last tx_bd. */
4251 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4252 				BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4253 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4254 				bce_breakpoint(sc));
4255 
4256 			DBRUN(BCE_INFO_SEND,
4257 				BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4258 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4259 
4260 			/* Unmap the mbuf. */
4261 			bus_dmamap_unload(sc->tx_mbuf_tag,
4262 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4263 
4264 			/* Free the mbuf. */
4265 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4266 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4267 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4268 
4269 			ifp->if_opackets++;
4270 		}
4271 
4272 		sc->used_tx_bd--;
4273 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4274 
4275 		/* Refresh hw_cons to see if there's new work. */
4276 		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4277 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4278 			hw_tx_cons++;
4279 
4280 		/* Prevent speculative reads from getting ahead of the status block. */
4281 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4282 			BUS_SPACE_BARRIER_READ);
4283 	}
4284 
4285 	/* Clear the TX timeout timer. */
4286 	sc->watchdog_timer = 0;
4287 
4288 	/* Clear the tx hardware queue full flag. */
4289 	if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4290 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4291 			BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4292 				__FUNCTION__, sc->used_tx_bd));
4293 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4294 	}
4295 
4296 	sc->tx_cons = sw_tx_cons;
4297 }
4298 
4299 
4300 /****************************************************************************/
4301 /* Disables interrupt generation.                                           */
4302 /*                                                                          */
4303 /* Returns:                                                                 */
4304 /*   Nothing.                                                               */
4305 /****************************************************************************/
4306 static void
4307 bce_disable_intr(struct bce_softc *sc)
4308 {
4309 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4310 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4311 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4312 }
4313 
4314 
4315 /****************************************************************************/
4316 /* Enables interrupt generation.                                            */
4317 /*                                                                          */
4318 /* Returns:                                                                 */
4319 /*   Nothing.                                                               */
4320 /****************************************************************************/
4321 static void
4322 bce_enable_intr(struct bce_softc *sc)
4323 {
4324 	u32 val;
4325 
4326 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4327 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4328 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4329 
4330 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4331 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4332 
4333 	val = REG_RD(sc, BCE_HC_COMMAND);
4334 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4335 }
4336 
4337 
4338 /****************************************************************************/
4339 /* Handles controller initialization.                                       */
4340 /*                                                                          */
4341 /* Must be called from a locked routine.                                    */
4342 /*                                                                          */
4343 /* Returns:                                                                 */
4344 /*   Nothing.                                                               */
4345 /****************************************************************************/
4346 static void
4347 bce_init_locked(struct bce_softc *sc)
4348 {
4349 	struct ifnet *ifp;
4350 	u32 ether_mtu;
4351 
4352 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4353 
4354 	BCE_LOCK_ASSERT(sc);
4355 
4356 	ifp = sc->bce_ifp;
4357 
4358 	/* Check if the driver is still running and bail out if it is. */
4359 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4360 		goto bce_init_locked_exit;
4361 
4362 	bce_stop(sc);
4363 
4364 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4365 		BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4366 			__FILE__, __LINE__);
4367 		goto bce_init_locked_exit;
4368 	}
4369 
4370 	if (bce_chipinit(sc)) {
4371 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4372 			__FILE__, __LINE__);
4373 		goto bce_init_locked_exit;
4374 	}
4375 
4376 	if (bce_blockinit(sc)) {
4377 		BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4378 			__FILE__, __LINE__);
4379 		goto bce_init_locked_exit;
4380 	}
4381 
4382 	/* Load our MAC address. */
4383 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4384 	bce_set_mac_addr(sc);
4385 
4386 	/* Calculate and program the Ethernet MTU size. */
4387 	ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4388 		ETHER_CRC_LEN;
4389 
4390 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4391 
4392 	/*
4393 	 * Program the mtu, enabling jumbo frame
4394 	 * support if necessary.  Also set the mbuf
4395 	 * allocation count for RX frames.
4396 	 */
4397 	if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4398 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4399 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4400 		sc->mbuf_alloc_size = MJUM9BYTES;
4401 	} else {
4402 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4403 		sc->mbuf_alloc_size = MCLBYTES;
4404 	}
4405 
4406 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4407 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4408 
4409 	DBPRINT(sc, BCE_INFO,
4410 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4411 		"max_frame_size = %d\n",
4412 		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4413 
4414 	/* Program appropriate promiscuous/multicast filtering. */
4415 	bce_set_rx_mode(sc);
4416 
4417 	/* Init RX buffer descriptor chain. */
4418 	bce_init_rx_chain(sc);
4419 
4420 	/* Init TX buffer descriptor chain. */
4421 	bce_init_tx_chain(sc);
4422 
4423 #ifdef DEVICE_POLLING
4424 	/* Disable interrupts if we are polling. */
4425 	if (ifp->if_capenable & IFCAP_POLLING) {
4426 		bce_disable_intr(sc);
4427 
4428 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4429 			(1 << 16) | sc->bce_rx_quick_cons_trip);
4430 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4431 			(1 << 16) | sc->bce_tx_quick_cons_trip);
4432 	} else
4433 #endif
4434 	/* Enable host interrupts. */
4435 	bce_enable_intr(sc);
4436 
4437 	bce_ifmedia_upd_locked(ifp);
4438 
4439 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4440 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4441 
4442 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4443 
4444 bce_init_locked_exit:
4445 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4446 
4447 	return;
4448 }
4449 
4450 static void
4451 bce_mgmt_init_locked(struct bce_softc *sc)
4452 {
4453 	u32 val;
4454 	struct ifnet *ifp;
4455 
4456 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4457 
4458 	BCE_LOCK_ASSERT(sc);
4459 
4460 	ifp = sc->bce_ifp;
4461 
4462 	/* Check if the driver is still running and bail out if it is. */
4463 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4464 		goto bce_mgmt_init_locked_exit;
4465 
4466 	/* Initialize the on-boards CPUs */
4467 	bce_init_cpus(sc);
4468 
4469 	val = (BCM_PAGE_BITS - 8) << 24;
4470 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4471 
4472 	/* Enable all critical blocks in the MAC. */
4473 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4474 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4475 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4476 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4477 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4478 	DELAY(20);
4479 
4480 	bce_ifmedia_upd_locked(ifp);
4481 bce_mgmt_init_locked_exit:
4482 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4483 
4484 	return;
4485 }
4486 
4487 
4488 /****************************************************************************/
4489 /* Handles controller initialization when called from an unlocked routine.  */
4490 /*                                                                          */
4491 /* Returns:                                                                 */
4492 /*   Nothing.                                                               */
4493 /****************************************************************************/
4494 static void
4495 bce_init(void *xsc)
4496 {
4497 	struct bce_softc *sc = xsc;
4498 
4499 	BCE_LOCK(sc);
4500 	bce_init_locked(sc);
4501 	BCE_UNLOCK(sc);
4502 }
4503 
4504 
4505 /****************************************************************************/
4506 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4507 /* memory visible to the controller.                                        */
4508 /*                                                                          */
4509 /* Returns:                                                                 */
4510 /*   0 for success, positive value for failure.                             */
4511 /****************************************************************************/
4512 static int
4513 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
4514 {
4515 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4516 	bus_dmamap_t map;
4517 	struct tx_bd *txbd = NULL;
4518 	struct mbuf *m0;
4519 	u16 vlan_tag = 0, flags = 0;
4520 	u16 chain_prod, prod;
4521 	u32 prod_bseq;
4522 
4523 #ifdef BCE_DEBUG
4524 	u16 debug_prod;
4525 #endif
4526 	int i, error, nsegs, rc = 0;
4527 
4528 	/* Transfer any checksum offload flags to the bd. */
4529 	m0 = *m_head;
4530 	if (m0->m_pkthdr.csum_flags) {
4531 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4532 			flags |= TX_BD_FLAGS_IP_CKSUM;
4533 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4534 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4535 	}
4536 
4537 	/* Transfer any VLAN tags to the bd. */
4538 	if (m0->m_flags & M_VLANTAG) {
4539 		flags |= TX_BD_FLAGS_VLAN_TAG;
4540 		vlan_tag = m0->m_pkthdr.ether_vtag;
4541 	}
4542 
4543 	/* Map the mbuf into DMAable memory. */
4544 	prod = sc->tx_prod;
4545 	chain_prod = TX_CHAIN_IDX(prod);
4546 	map = sc->tx_mbuf_map[chain_prod];
4547 
4548 	/* Map the mbuf into our DMA address space. */
4549 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4550 	    segs, &nsegs, BUS_DMA_NOWAIT);
4551 
4552 	if (error == EFBIG) {
4553 
4554 		/* Try to defrag the mbuf if there are too many segments. */
4555 	        DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4556                     __FUNCTION__, nsegs);
4557 
4558                 m0 = m_defrag(*m_head, M_DONTWAIT);
4559                 if (m0 == NULL) {
4560 			m_freem(*m_head);
4561 			*m_head = NULL;
4562 			return (ENOBUFS);
4563 		}
4564 
4565 		*m_head = m0;
4566 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4567 		    segs, &nsegs, BUS_DMA_NOWAIT);
4568 
4569 		/* Still getting an error after a defrag. */
4570 		if (error == ENOMEM) {
4571 			return (error);
4572 		} else if (error != 0) {
4573 			BCE_PRINTF(sc,
4574 			    "%s(%d): Error mapping mbuf into TX chain!\n",
4575 			    __FILE__, __LINE__);
4576 			m_freem(m0);
4577 			*m_head = NULL;
4578 			return (ENOBUFS);
4579 		}
4580 	} else if (error == ENOMEM) {
4581 		return (error);
4582 	} else if (error != 0) {
4583 		m_freem(m0);
4584 		*m_head = NULL;
4585 		return (error);
4586 	}
4587 
4588 	/*
4589 	 * The chip seems to require that at least 16 descriptors be kept
4590 	 * empty at all times.  Make sure we honor that.
4591 	 * XXX Would it be faster to assume worst case scenario for nsegs
4592 	 * and do this calculation higher up?
4593 	 */
4594 	if (nsegs > (USABLE_TX_BD - sc->used_tx_bd - BCE_TX_SLACK_SPACE)) {
4595 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
4596 		return (ENOBUFS);
4597 	}
4598 
4599 	/* prod points to an empty tx_bd at this point. */
4600 	prod_bseq  = sc->tx_prod_bseq;
4601 
4602 #ifdef BCE_DEBUG
4603 	debug_prod = chain_prod;
4604 #endif
4605 
4606 	DBPRINT(sc, BCE_INFO_SEND,
4607 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4608 		"prod_bseq = 0x%08X\n",
4609 		__FUNCTION__, prod, chain_prod, prod_bseq);
4610 
4611 	/*
4612 	 * Cycle through each mbuf segment that makes up
4613 	 * the outgoing frame, gathering the mapping info
4614 	 * for that segment and creating a tx_bd to for
4615 	 * the mbuf.
4616 	 */
4617 	for (i = 0; i < nsegs ; i++) {
4618 
4619 		chain_prod = TX_CHAIN_IDX(prod);
4620 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4621 
4622 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4623 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4624 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4625 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4626 		txbd->tx_bd_flags = htole16(flags);
4627 		prod_bseq += segs[i].ds_len;
4628 		if (i == 0)
4629 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4630 		prod = NEXT_TX_BD(prod);
4631 	}
4632 
4633 	/* Set the END flag on the last TX buffer descriptor. */
4634 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4635 
4636 	DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
4637 
4638 	DBPRINT(sc, BCE_INFO_SEND,
4639 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4640 		"prod_bseq = 0x%08X\n",
4641 		__FUNCTION__, prod, chain_prod, prod_bseq);
4642 
4643 	/*
4644 	 * Ensure that the mbuf pointer for this transmission
4645 	 * is placed at the array index of the last
4646 	 * descriptor in this chain.  This is done
4647 	 * because a single map is used for all
4648 	 * segments of the mbuf and we don't want to
4649 	 * unload the map before all of the segments
4650 	 * have been freed.
4651 	 */
4652 	sc->tx_mbuf_ptr[chain_prod] = m0;
4653 	sc->used_tx_bd += nsegs;
4654 
4655 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4656 		sc->tx_hi_watermark = sc->used_tx_bd);
4657 
4658 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4659 
4660 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
4661 
4662 	/* prod points to the next free tx_bd at this point. */
4663 	sc->tx_prod = prod;
4664 	sc->tx_prod_bseq = prod_bseq;
4665 
4666 	return(rc);
4667 }
4668 
4669 
4670 /****************************************************************************/
4671 /* Main transmit routine when called from another routine with a lock.      */
4672 /*                                                                          */
4673 /* Returns:                                                                 */
4674 /*   Nothing.                                                               */
4675 /****************************************************************************/
4676 static void
4677 bce_start_locked(struct ifnet *ifp)
4678 {
4679 	struct bce_softc *sc = ifp->if_softc;
4680 	struct mbuf *m_head = NULL;
4681 	int count = 0;
4682 	u16 tx_prod, tx_chain_prod;
4683 
4684 	/* If there's no link or the transmit queue is empty then just exit. */
4685 	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4686 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4687 			__FUNCTION__);
4688 		goto bce_start_locked_exit;
4689 	}
4690 
4691 	/* prod points to the next free tx_bd. */
4692 	tx_prod = sc->tx_prod;
4693 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4694 
4695 	DBPRINT(sc, BCE_INFO_SEND,
4696 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4697 		"tx_prod_bseq = 0x%08X\n",
4698 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4699 
4700 	/*
4701 	 * Keep adding entries while there is space in the ring.  We keep
4702 	 * BCE_TX_SLACK_SPACE entries unused at all times.
4703 	 */
4704 	while (sc->used_tx_bd < USABLE_TX_BD - BCE_TX_SLACK_SPACE) {
4705 
4706 		/* Check for any frames to send. */
4707 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4708 		if (m_head == NULL)
4709 			break;
4710 
4711 		/*
4712 		 * Pack the data into the transmit ring. If we
4713 		 * don't have room, place the mbuf back at the
4714 		 * head of the queue and set the OACTIVE flag
4715 		 * to wait for the NIC to drain the chain.
4716 		 */
4717 		if (bce_tx_encap(sc, &m_head)) {
4718 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4719 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4720 			DBPRINT(sc, BCE_INFO_SEND,
4721 				"TX chain is closed for business! Total tx_bd used = %d\n",
4722 				sc->used_tx_bd);
4723 			break;
4724 		}
4725 
4726 		count++;
4727 
4728 		/* Send a copy of the frame to any BPF listeners. */
4729 		BPF_MTAP(ifp, m_head);
4730 	}
4731 
4732 	if (count == 0) {
4733 		/* no packets were dequeued */
4734 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4735 			__FUNCTION__);
4736 		goto bce_start_locked_exit;
4737 	}
4738 
4739 	/* Update the driver's counters. */
4740 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4741 
4742 	DBPRINT(sc, BCE_INFO_SEND,
4743 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4744 		"tx_prod_bseq = 0x%08X\n",
4745 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4746 
4747 	/* Start the transmit. */
4748 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4749 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4750 
4751 	/* Set the tx timeout. */
4752 	sc->watchdog_timer = BCE_TX_TIMEOUT;
4753 
4754 bce_start_locked_exit:
4755 	return;
4756 }
4757 
4758 
4759 /****************************************************************************/
4760 /* Main transmit routine when called from another routine without a lock.   */
4761 /*                                                                          */
4762 /* Returns:                                                                 */
4763 /*   Nothing.                                                               */
4764 /****************************************************************************/
4765 static void
4766 bce_start(struct ifnet *ifp)
4767 {
4768 	struct bce_softc *sc = ifp->if_softc;
4769 
4770 	BCE_LOCK(sc);
4771 	bce_start_locked(ifp);
4772 	BCE_UNLOCK(sc);
4773 }
4774 
4775 
4776 /****************************************************************************/
4777 /* Handles any IOCTL calls from the operating system.                       */
4778 /*                                                                          */
4779 /* Returns:                                                                 */
4780 /*   0 for success, positive value for failure.                             */
4781 /****************************************************************************/
4782 static int
4783 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4784 {
4785 	struct bce_softc *sc = ifp->if_softc;
4786 	struct ifreq *ifr = (struct ifreq *) data;
4787 	struct mii_data *mii;
4788 	int mask, error = 0;
4789 
4790 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4791 
4792 	switch(command) {
4793 
4794 		/* Set the MTU. */
4795 		case SIOCSIFMTU:
4796 			/* Check that the MTU setting is supported. */
4797 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4798 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4799 				error = EINVAL;
4800 				break;
4801 			}
4802 
4803 			DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4804 
4805 			BCE_LOCK(sc);
4806 			ifp->if_mtu = ifr->ifr_mtu;
4807 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4808 			bce_init_locked(sc);
4809 			BCE_UNLOCK(sc);
4810 			break;
4811 
4812 		/* Set interface. */
4813 		case SIOCSIFFLAGS:
4814 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4815 
4816 			BCE_LOCK(sc);
4817 
4818 			/* Check if the interface is up. */
4819 			if (ifp->if_flags & IFF_UP) {
4820 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4821 					/* Change the promiscuous/multicast flags as necessary. */
4822 					bce_set_rx_mode(sc);
4823 				} else {
4824 					/* Start the HW */
4825 					bce_init_locked(sc);
4826 				}
4827 			} else {
4828 				/* The interface is down.  Check if the driver is running. */
4829 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4830 					bce_stop(sc);
4831 				}
4832 			}
4833 
4834 			BCE_UNLOCK(sc);
4835 			error = 0;
4836 
4837 			break;
4838 
4839 		/* Add/Delete multicast address */
4840 		case SIOCADDMULTI:
4841 		case SIOCDELMULTI:
4842 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4843 
4844 			BCE_LOCK(sc);
4845 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4846 				bce_set_rx_mode(sc);
4847 				error = 0;
4848 			}
4849 			BCE_UNLOCK(sc);
4850 
4851 			break;
4852 
4853 		/* Set/Get Interface media */
4854 		case SIOCSIFMEDIA:
4855 		case SIOCGIFMEDIA:
4856 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4857 
4858 			DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4859 				sc->bce_phy_flags);
4860 
4861 			DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4862 			mii = device_get_softc(sc->bce_miibus);
4863 			error = ifmedia_ioctl(ifp, ifr,
4864 			    &mii->mii_media, command);
4865 			break;
4866 
4867 		/* Set interface capability */
4868 		case SIOCSIFCAP:
4869 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4870 			DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4871 
4872 #ifdef DEVICE_POLLING
4873 			if (mask & IFCAP_POLLING) {
4874 				if (ifr->ifr_reqcap & IFCAP_POLLING) {
4875 
4876 					/* Setup the poll routine to call. */
4877 					error = ether_poll_register(bce_poll, ifp);
4878 					if (error) {
4879 						BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4880 							__FILE__, __LINE__);
4881 						goto bce_ioctl_exit;
4882 					}
4883 
4884 					/* Clear the interrupt. */
4885 					BCE_LOCK(sc);
4886 					bce_disable_intr(sc);
4887 
4888 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4889 						(1 << 16) | sc->bce_rx_quick_cons_trip);
4890 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4891 						(1 << 16) | sc->bce_tx_quick_cons_trip);
4892 
4893 					ifp->if_capenable |= IFCAP_POLLING;
4894 					BCE_UNLOCK(sc);
4895 				} else {
4896 					/* Clear the poll routine. */
4897 					error = ether_poll_deregister(ifp);
4898 
4899 					/* Enable interrupt even in error case */
4900 					BCE_LOCK(sc);
4901 					bce_enable_intr(sc);
4902 
4903 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4904 						(sc->bce_tx_quick_cons_trip_int << 16) |
4905 						sc->bce_tx_quick_cons_trip);
4906 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4907 						(sc->bce_rx_quick_cons_trip_int << 16) |
4908 						sc->bce_rx_quick_cons_trip);
4909 
4910 					ifp->if_capenable &= ~IFCAP_POLLING;
4911 					BCE_UNLOCK(sc);
4912 				}
4913 			}
4914 #endif /*DEVICE_POLLING */
4915 
4916 			/* Toggle the TX checksum capabilites enable flag. */
4917 			if (mask & IFCAP_TXCSUM) {
4918 				ifp->if_capenable ^= IFCAP_TXCSUM;
4919 				if (IFCAP_TXCSUM & ifp->if_capenable)
4920 					ifp->if_hwassist = BCE_IF_HWASSIST;
4921 				else
4922 					ifp->if_hwassist = 0;
4923 			}
4924 
4925 			/* Toggle the RX checksum capabilities enable flag. */
4926 			if (mask & IFCAP_RXCSUM) {
4927 				ifp->if_capenable ^= IFCAP_RXCSUM;
4928 				if (IFCAP_RXCSUM & ifp->if_capenable)
4929 					ifp->if_hwassist = BCE_IF_HWASSIST;
4930 				else
4931 					ifp->if_hwassist = 0;
4932 			}
4933 
4934 			/* Toggle VLAN_MTU capabilities enable flag. */
4935 			if (mask & IFCAP_VLAN_MTU) {
4936 				BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4937 					__FILE__, __LINE__);
4938 			}
4939 
4940 			/* Toggle VLANHWTAG capabilities enabled flag. */
4941 			if (mask & IFCAP_VLAN_HWTAGGING) {
4942 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4943 					BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4944 						"management firmware (ASF/IPMI/UMP) is running!\n",
4945 						__FILE__, __LINE__);
4946 				else
4947 					BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4948 						__FILE__, __LINE__);
4949 			}
4950 
4951 			break;
4952 		default:
4953 			DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4954 				(u32) command);
4955 
4956 			/* We don't know how to handle the IOCTL, pass it on. */
4957 			error = ether_ioctl(ifp, command, data);
4958 			break;
4959 	}
4960 
4961 #ifdef DEVICE_POLLING
4962 bce_ioctl_exit:
4963 #endif
4964 
4965 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4966 
4967 	return(error);
4968 }
4969 
4970 
4971 /****************************************************************************/
4972 /* Transmit timeout handler.                                                */
4973 /*                                                                          */
4974 /* Returns:                                                                 */
4975 /*   Nothing.                                                               */
4976 /****************************************************************************/
4977 static void
4978 bce_watchdog(struct bce_softc *sc)
4979 {
4980 
4981 	DBRUN(BCE_WARN_SEND,
4982 		bce_dump_driver_state(sc);
4983 		bce_dump_status_block(sc));
4984 
4985 	BCE_LOCK_ASSERT(sc);
4986 
4987 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
4988 		return;
4989 
4990 	/*
4991 	 * If we are in this routine because of pause frames, then
4992 	 * don't reset the hardware.
4993 	 */
4994 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4995 		return;
4996 
4997 	BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
4998 		__FILE__, __LINE__);
4999 
5000 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5001 
5002 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5003 
5004 	bce_init_locked(sc);
5005 	sc->bce_ifp->if_oerrors++;
5006 
5007 }
5008 
5009 
5010 #ifdef DEVICE_POLLING
5011 static void
5012 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5013 {
5014 	struct bce_softc *sc = ifp->if_softc;
5015 
5016 	BCE_LOCK_ASSERT(sc);
5017 
5018 	sc->bce_rxcycles = count;
5019 
5020 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5021 	    BUS_DMASYNC_POSTWRITE);
5022 
5023 	/* Check for any completed RX frames. */
5024 	if (sc->status_block->status_rx_quick_consumer_index0 !=
5025 		sc->hw_rx_cons)
5026 		bce_rx_intr(sc);
5027 
5028 	/* Check for any completed TX frames. */
5029 	if (sc->status_block->status_tx_quick_consumer_index0 !=
5030 		sc->hw_tx_cons)
5031 		bce_tx_intr(sc);
5032 
5033 	/* Check for new frames to transmit. */
5034 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5035 		bce_start_locked(ifp);
5036 
5037 }
5038 
5039 
5040 static void
5041 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5042 {
5043 	struct bce_softc *sc = ifp->if_softc;
5044 
5045 	BCE_LOCK(sc);
5046 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5047 		bce_poll_locked(ifp, cmd, count);
5048 	BCE_UNLOCK(sc);
5049 }
5050 #endif /* DEVICE_POLLING */
5051 
5052 
5053 #if 0
5054 static inline int
5055 bce_has_work(struct bce_softc *sc)
5056 {
5057 	struct status_block *stat = sc->status_block;
5058 
5059 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5060 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5061 		return 1;
5062 
5063 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5064 	    bp->link_up)
5065 		return 1;
5066 
5067 	return 0;
5068 }
5069 #endif
5070 
5071 
5072 /*
5073  * Interrupt handler.
5074  */
5075 /****************************************************************************/
5076 /* Main interrupt entry point.  Verifies that the controller generated the  */
5077 /* interrupt and then calls a separate routine for handle the various       */
5078 /* interrupt causes (PHY, TX, RX).                                          */
5079 /*                                                                          */
5080 /* Returns:                                                                 */
5081 /*   0 for success, positive value for failure.                             */
5082 /****************************************************************************/
5083 static void
5084 bce_intr(void *xsc)
5085 {
5086 	struct bce_softc *sc;
5087 	struct ifnet *ifp;
5088 	u32 status_attn_bits;
5089 
5090 	sc = xsc;
5091 	ifp = sc->bce_ifp;
5092 
5093 	BCE_LOCK(sc);
5094 
5095 	DBRUNIF(1, sc->interrupts_generated++);
5096 
5097 #ifdef DEVICE_POLLING
5098 	if (ifp->if_capenable & IFCAP_POLLING) {
5099 		DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5100 		goto bce_intr_exit;
5101 	}
5102 #endif
5103 
5104 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5105 	    BUS_DMASYNC_POSTWRITE);
5106 
5107 	/*
5108 	 * If the hardware status block index
5109 	 * matches the last value read by the
5110 	 * driver and we haven't asserted our
5111 	 * interrupt then there's nothing to do.
5112 	 */
5113 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5114 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5115 		goto bce_intr_exit;
5116 
5117 	/* Ack the interrupt and stop others from occuring. */
5118 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5119 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5120 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5121 
5122 	/* Keep processing data as long as there is work to do. */
5123 	for (;;) {
5124 
5125 		status_attn_bits = sc->status_block->status_attn_bits;
5126 
5127 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5128 			BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5129 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5130 
5131 		/* Was it a link change interrupt? */
5132 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5133 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5134 			bce_phy_intr(sc);
5135 
5136 		/* If any other attention is asserted then the chip is toast. */
5137 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5138 			(sc->status_block->status_attn_bits_ack &
5139 			~STATUS_ATTN_BITS_LINK_STATE))) {
5140 
5141 			DBRUN(1, sc->unexpected_attentions++);
5142 
5143 			BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5144 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5145 
5146 			DBRUN(BCE_FATAL,
5147 				if (bce_debug_unexpected_attention == 0)
5148 					bce_breakpoint(sc));
5149 
5150 			bce_init_locked(sc);
5151 			goto bce_intr_exit;
5152 		}
5153 
5154 		/* Check for any completed RX frames. */
5155 		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5156 			bce_rx_intr(sc);
5157 
5158 		/* Check for any completed TX frames. */
5159 		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5160 			bce_tx_intr(sc);
5161 
5162 		/* Save the status block index value for use during the next interrupt. */
5163 		sc->last_status_idx = sc->status_block->status_idx;
5164 
5165 		/* Prevent speculative reads from getting ahead of the status block. */
5166 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5167 			BUS_SPACE_BARRIER_READ);
5168 
5169 		/* If there's no work left then exit the interrupt service routine. */
5170 		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5171 	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5172 			break;
5173 
5174 	}
5175 
5176 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5177 	    BUS_DMASYNC_PREWRITE);
5178 
5179 	/* Re-enable interrupts. */
5180 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5181 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5182 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5183 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5184 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5185 
5186 	/* Handle any frames that arrived while handling the interrupt. */
5187 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5188 		bce_start_locked(ifp);
5189 
5190 bce_intr_exit:
5191 	BCE_UNLOCK(sc);
5192 }
5193 
5194 
5195 /****************************************************************************/
5196 /* Programs the various packet receive modes (broadcast and multicast).     */
5197 /*                                                                          */
5198 /* Returns:                                                                 */
5199 /*   Nothing.                                                               */
5200 /****************************************************************************/
5201 static void
5202 bce_set_rx_mode(struct bce_softc *sc)
5203 {
5204 	struct ifnet *ifp;
5205 	struct ifmultiaddr *ifma;
5206 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5207 	u32 rx_mode, sort_mode;
5208 	int h, i;
5209 
5210 	BCE_LOCK_ASSERT(sc);
5211 
5212 	ifp = sc->bce_ifp;
5213 
5214 	/* Initialize receive mode default settings. */
5215 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5216 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5217 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5218 
5219 	/*
5220 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5221 	 * be enbled.
5222 	 */
5223 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5224 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5225 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5226 
5227 	/*
5228 	 * Check for promiscuous, all multicast, or selected
5229 	 * multicast address filtering.
5230 	 */
5231 	if (ifp->if_flags & IFF_PROMISC) {
5232 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5233 
5234 		/* Enable promiscuous mode. */
5235 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5236 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5237 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5238 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5239 
5240 		/* Enable all multicast addresses. */
5241 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5242 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5243        	}
5244 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5245 	} else {
5246 		/* Accept one or more multicast(s). */
5247 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5248 
5249 		IF_ADDR_LOCK(ifp);
5250 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5251 			if (ifma->ifma_addr->sa_family != AF_LINK)
5252 				continue;
5253 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5254 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
5255 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5256 		}
5257 		IF_ADDR_UNLOCK(ifp);
5258 
5259 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5260 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5261 
5262 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5263 	}
5264 
5265 	/* Only make changes if the recive mode has actually changed. */
5266 	if (rx_mode != sc->rx_mode) {
5267 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5268 			rx_mode);
5269 
5270 		sc->rx_mode = rx_mode;
5271 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5272 	}
5273 
5274 	/* Disable and clear the exisitng sort before enabling a new sort. */
5275 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5276 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5277 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5278 }
5279 
5280 
5281 /****************************************************************************/
5282 /* Called periodically to updates statistics from the controllers           */
5283 /* statistics block.                                                        */
5284 /*                                                                          */
5285 /* Returns:                                                                 */
5286 /*   Nothing.                                                               */
5287 /****************************************************************************/
5288 static void
5289 bce_stats_update(struct bce_softc *sc)
5290 {
5291 	struct ifnet *ifp;
5292 	struct statistics_block *stats;
5293 
5294 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5295 
5296 	ifp = sc->bce_ifp;
5297 
5298 	stats = (struct statistics_block *) sc->stats_block;
5299 
5300 	/*
5301 	 * Update the interface statistics from the
5302 	 * hardware statistics.
5303 	 */
5304 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5305 
5306 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5307 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5308 					  (u_long) stats->stat_IfInMBUFDiscards +
5309 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5310 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5311 
5312 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5313 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5314 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5315 
5316 	/*
5317 	 * Certain controllers don't report
5318 	 * carrier sense errors correctly.
5319 	 * See errata E11_5708CA0_1165.
5320 	 */
5321 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5322 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5323 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5324 
5325 	/*
5326 	 * Update the sysctl statistics from the
5327 	 * hardware statistics.
5328 	 */
5329 	sc->stat_IfHCInOctets =
5330 		((u64) stats->stat_IfHCInOctets_hi << 32) +
5331 		 (u64) stats->stat_IfHCInOctets_lo;
5332 
5333 	sc->stat_IfHCInBadOctets =
5334 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5335 		 (u64) stats->stat_IfHCInBadOctets_lo;
5336 
5337 	sc->stat_IfHCOutOctets =
5338 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
5339 		 (u64) stats->stat_IfHCOutOctets_lo;
5340 
5341 	sc->stat_IfHCOutBadOctets =
5342 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5343 		 (u64) stats->stat_IfHCOutBadOctets_lo;
5344 
5345 	sc->stat_IfHCInUcastPkts =
5346 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5347 		 (u64) stats->stat_IfHCInUcastPkts_lo;
5348 
5349 	sc->stat_IfHCInMulticastPkts =
5350 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5351 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
5352 
5353 	sc->stat_IfHCInBroadcastPkts =
5354 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5355 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5356 
5357 	sc->stat_IfHCOutUcastPkts =
5358 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5359 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
5360 
5361 	sc->stat_IfHCOutMulticastPkts =
5362 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5363 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5364 
5365 	sc->stat_IfHCOutBroadcastPkts =
5366 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5367 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5368 
5369 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5370 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5371 
5372 	sc->stat_Dot3StatsCarrierSenseErrors =
5373 		stats->stat_Dot3StatsCarrierSenseErrors;
5374 
5375 	sc->stat_Dot3StatsFCSErrors =
5376 		stats->stat_Dot3StatsFCSErrors;
5377 
5378 	sc->stat_Dot3StatsAlignmentErrors =
5379 		stats->stat_Dot3StatsAlignmentErrors;
5380 
5381 	sc->stat_Dot3StatsSingleCollisionFrames =
5382 		stats->stat_Dot3StatsSingleCollisionFrames;
5383 
5384 	sc->stat_Dot3StatsMultipleCollisionFrames =
5385 		stats->stat_Dot3StatsMultipleCollisionFrames;
5386 
5387 	sc->stat_Dot3StatsDeferredTransmissions =
5388 		stats->stat_Dot3StatsDeferredTransmissions;
5389 
5390 	sc->stat_Dot3StatsExcessiveCollisions =
5391 		stats->stat_Dot3StatsExcessiveCollisions;
5392 
5393 	sc->stat_Dot3StatsLateCollisions =
5394 		stats->stat_Dot3StatsLateCollisions;
5395 
5396 	sc->stat_EtherStatsCollisions =
5397 		stats->stat_EtherStatsCollisions;
5398 
5399 	sc->stat_EtherStatsFragments =
5400 		stats->stat_EtherStatsFragments;
5401 
5402 	sc->stat_EtherStatsJabbers =
5403 		stats->stat_EtherStatsJabbers;
5404 
5405 	sc->stat_EtherStatsUndersizePkts =
5406 		stats->stat_EtherStatsUndersizePkts;
5407 
5408 	sc->stat_EtherStatsOverrsizePkts =
5409 		stats->stat_EtherStatsOverrsizePkts;
5410 
5411 	sc->stat_EtherStatsPktsRx64Octets =
5412 		stats->stat_EtherStatsPktsRx64Octets;
5413 
5414 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5415 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5416 
5417 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5418 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5419 
5420 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5421 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5422 
5423 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5424 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5425 
5426 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5427 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5428 
5429 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5430 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5431 
5432 	sc->stat_EtherStatsPktsTx64Octets =
5433 		stats->stat_EtherStatsPktsTx64Octets;
5434 
5435 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5436 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5437 
5438 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5439 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5440 
5441 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5442 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5443 
5444 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5445 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5446 
5447 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5448 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5449 
5450 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5451 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5452 
5453 	sc->stat_XonPauseFramesReceived =
5454 		stats->stat_XonPauseFramesReceived;
5455 
5456 	sc->stat_XoffPauseFramesReceived =
5457 		stats->stat_XoffPauseFramesReceived;
5458 
5459 	sc->stat_OutXonSent =
5460 		stats->stat_OutXonSent;
5461 
5462 	sc->stat_OutXoffSent =
5463 		stats->stat_OutXoffSent;
5464 
5465 	sc->stat_FlowControlDone =
5466 		stats->stat_FlowControlDone;
5467 
5468 	sc->stat_MacControlFramesReceived =
5469 		stats->stat_MacControlFramesReceived;
5470 
5471 	sc->stat_XoffStateEntered =
5472 		stats->stat_XoffStateEntered;
5473 
5474 	sc->stat_IfInFramesL2FilterDiscards =
5475 		stats->stat_IfInFramesL2FilterDiscards;
5476 
5477 	sc->stat_IfInRuleCheckerDiscards =
5478 		stats->stat_IfInRuleCheckerDiscards;
5479 
5480 	sc->stat_IfInFTQDiscards =
5481 		stats->stat_IfInFTQDiscards;
5482 
5483 	sc->stat_IfInMBUFDiscards =
5484 		stats->stat_IfInMBUFDiscards;
5485 
5486 	sc->stat_IfInRuleCheckerP4Hit =
5487 		stats->stat_IfInRuleCheckerP4Hit;
5488 
5489 	sc->stat_CatchupInRuleCheckerDiscards =
5490 		stats->stat_CatchupInRuleCheckerDiscards;
5491 
5492 	sc->stat_CatchupInFTQDiscards =
5493 		stats->stat_CatchupInFTQDiscards;
5494 
5495 	sc->stat_CatchupInMBUFDiscards =
5496 		stats->stat_CatchupInMBUFDiscards;
5497 
5498 	sc->stat_CatchupInRuleCheckerP4Hit =
5499 		stats->stat_CatchupInRuleCheckerP4Hit;
5500 
5501 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5502 }
5503 
5504 
5505 static void
5506 bce_tick(void *xsc)
5507 {
5508 	struct bce_softc *sc = xsc;
5509 	struct mii_data *mii = NULL;
5510 	struct ifnet *ifp;
5511 	u32 msg;
5512 
5513 	ifp = sc->bce_ifp;
5514 
5515 	BCE_LOCK_ASSERT(sc);
5516 
5517 	/* Tell the firmware that the driver is still running. */
5518 #ifdef BCE_DEBUG
5519 	msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5520 #else
5521 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5522 #endif
5523 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5524 
5525 	/* Update the statistics from the hardware statistics block. */
5526 	bce_stats_update(sc);
5527 
5528 	/* Check that chip hasn't hang. */
5529 	bce_watchdog(sc);
5530 
5531 	/* Schedule the next tick. */
5532 	callout_reset(
5533 		&sc->bce_stat_ch,			/* callout */
5534 		hz, 					/* ticks */
5535 		bce_tick, 				/* function */
5536 		sc);					/* function argument */
5537 
5538 	/* If link is up already up then we're done. */
5539 	if (sc->bce_link)
5540 		goto bce_tick_locked_exit;
5541 
5542 	mii = device_get_softc(sc->bce_miibus);
5543 	mii_tick(mii);
5544 
5545 	/* Check if the link has come up. */
5546 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5547 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5548 		sc->bce_link++;
5549 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5550 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5551 		    bootverbose)
5552 			BCE_PRINTF(sc, "Gigabit link up\n");
5553 		/* Now that link is up, handle any outstanding TX traffic. */
5554 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5555 			bce_start_locked(ifp);
5556 	}
5557 
5558 bce_tick_locked_exit:
5559 	return;
5560 }
5561 
5562 
5563 #ifdef BCE_DEBUG
5564 /****************************************************************************/
5565 /* Allows the driver state to be dumped through the sysctl interface.       */
5566 /*                                                                          */
5567 /* Returns:                                                                 */
5568 /*   0 for success, positive value for failure.                             */
5569 /****************************************************************************/
5570 static int
5571 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5572 {
5573         int error;
5574         int result;
5575         struct bce_softc *sc;
5576 
5577         result = -1;
5578         error = sysctl_handle_int(oidp, &result, 0, req);
5579 
5580         if (error || !req->newptr)
5581                 return (error);
5582 
5583         if (result == 1) {
5584                 sc = (struct bce_softc *)arg1;
5585                 bce_dump_driver_state(sc);
5586         }
5587 
5588         return error;
5589 }
5590 
5591 
5592 /****************************************************************************/
5593 /* Allows the hardware state to be dumped through the sysctl interface.     */
5594 /*                                                                          */
5595 /* Returns:                                                                 */
5596 /*   0 for success, positive value for failure.                             */
5597 /****************************************************************************/
5598 static int
5599 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5600 {
5601         int error;
5602         int result;
5603         struct bce_softc *sc;
5604 
5605         result = -1;
5606         error = sysctl_handle_int(oidp, &result, 0, req);
5607 
5608         if (error || !req->newptr)
5609                 return (error);
5610 
5611         if (result == 1) {
5612                 sc = (struct bce_softc *)arg1;
5613                 bce_dump_hw_state(sc);
5614         }
5615 
5616         return error;
5617 }
5618 
5619 
5620 /****************************************************************************/
5621 /*                                                                          */
5622 /*                                                                          */
5623 /* Returns:                                                                 */
5624 /*   0 for success, positive value for failure.                             */
5625 /****************************************************************************/
5626 static int
5627 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5628 {
5629         int error;
5630         int result;
5631         struct bce_softc *sc;
5632 
5633         result = -1;
5634         error = sysctl_handle_int(oidp, &result, 0, req);
5635 
5636         if (error || !req->newptr)
5637                 return (error);
5638 
5639         if (result == 1) {
5640                 sc = (struct bce_softc *)arg1;
5641                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5642         }
5643 
5644         return error;
5645 }
5646 
5647 
5648 /****************************************************************************/
5649 /*                                                                          */
5650 /*                                                                          */
5651 /* Returns:                                                                 */
5652 /*   0 for success, positive value for failure.                             */
5653 /****************************************************************************/
5654 static int
5655 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5656 {
5657         int error;
5658         int result;
5659         struct bce_softc *sc;
5660 
5661         result = -1;
5662         error = sysctl_handle_int(oidp, &result, 0, req);
5663 
5664         if (error || !req->newptr)
5665                 return (error);
5666 
5667         if (result == 1) {
5668                 sc = (struct bce_softc *)arg1;
5669                 bce_breakpoint(sc);
5670         }
5671 
5672         return error;
5673 }
5674 #endif
5675 
5676 
5677 /****************************************************************************/
5678 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5679 /*                                                                          */
5680 /* Returns:                                                                 */
5681 /*   0 for success, positive value for failure.                             */
5682 /****************************************************************************/
5683 static void
5684 bce_add_sysctls(struct bce_softc *sc)
5685 {
5686 	struct sysctl_ctx_list *ctx;
5687 	struct sysctl_oid_list *children;
5688 
5689 	ctx = device_get_sysctl_ctx(sc->bce_dev);
5690 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5691 
5692 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5693 		"driver_version",
5694 		CTLFLAG_RD, &bce_driver_version,
5695 		0, "bce driver version");
5696 
5697 #ifdef BCE_DEBUG
5698 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5699 		"rx_low_watermark",
5700 		CTLFLAG_RD, &sc->rx_low_watermark,
5701 		0, "Lowest level of free rx_bd's");
5702 
5703 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5704 		"tx_hi_watermark",
5705 		CTLFLAG_RD, &sc->tx_hi_watermark,
5706 		0, "Highest level of used tx_bd's");
5707 
5708 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5709 		"l2fhdr_status_errors",
5710 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5711 		0, "l2_fhdr status errors");
5712 
5713 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5714 		"unexpected_attentions",
5715 		CTLFLAG_RD, &sc->unexpected_attentions,
5716 		0, "unexpected attentions");
5717 
5718 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5719 		"lost_status_block_updates",
5720 		CTLFLAG_RD, &sc->lost_status_block_updates,
5721 		0, "lost status block updates");
5722 
5723 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5724 		"mbuf_alloc_failed",
5725 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5726 		0, "mbuf cluster allocation failures");
5727 #endif
5728 
5729 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5730 		"stat_IfHcInOctets",
5731 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5732 		"Bytes received");
5733 
5734 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5735 		"stat_IfHCInBadOctets",
5736 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5737 		"Bad bytes received");
5738 
5739 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5740 		"stat_IfHCOutOctets",
5741 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5742 		"Bytes sent");
5743 
5744 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5745 		"stat_IfHCOutBadOctets",
5746 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5747 		"Bad bytes sent");
5748 
5749 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5750 		"stat_IfHCInUcastPkts",
5751 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5752 		"Unicast packets received");
5753 
5754 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5755 		"stat_IfHCInMulticastPkts",
5756 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5757 		"Multicast packets received");
5758 
5759 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5760 		"stat_IfHCInBroadcastPkts",
5761 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5762 		"Broadcast packets received");
5763 
5764 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5765 		"stat_IfHCOutUcastPkts",
5766 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5767 		"Unicast packets sent");
5768 
5769 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5770 		"stat_IfHCOutMulticastPkts",
5771 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5772 		"Multicast packets sent");
5773 
5774 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5775 		"stat_IfHCOutBroadcastPkts",
5776 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5777 		"Broadcast packets sent");
5778 
5779 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5780 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5781 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5782 		0, "Internal MAC transmit errors");
5783 
5784 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5785 		"stat_Dot3StatsCarrierSenseErrors",
5786 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5787 		0, "Carrier sense errors");
5788 
5789 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5790 		"stat_Dot3StatsFCSErrors",
5791 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5792 		0, "Frame check sequence errors");
5793 
5794 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5795 		"stat_Dot3StatsAlignmentErrors",
5796 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5797 		0, "Alignment errors");
5798 
5799 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5800 		"stat_Dot3StatsSingleCollisionFrames",
5801 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5802 		0, "Single Collision Frames");
5803 
5804 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5805 		"stat_Dot3StatsMultipleCollisionFrames",
5806 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5807 		0, "Multiple Collision Frames");
5808 
5809 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5810 		"stat_Dot3StatsDeferredTransmissions",
5811 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5812 		0, "Deferred Transmissions");
5813 
5814 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5815 		"stat_Dot3StatsExcessiveCollisions",
5816 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5817 		0, "Excessive Collisions");
5818 
5819 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5820 		"stat_Dot3StatsLateCollisions",
5821 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5822 		0, "Late Collisions");
5823 
5824 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5825 		"stat_EtherStatsCollisions",
5826 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5827 		0, "Collisions");
5828 
5829 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5830 		"stat_EtherStatsFragments",
5831 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5832 		0, "Fragments");
5833 
5834 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5835 		"stat_EtherStatsJabbers",
5836 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5837 		0, "Jabbers");
5838 
5839 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5840 		"stat_EtherStatsUndersizePkts",
5841 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5842 		0, "Undersize packets");
5843 
5844 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5845 		"stat_EtherStatsOverrsizePkts",
5846 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5847 		0, "stat_EtherStatsOverrsizePkts");
5848 
5849 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5850 		"stat_EtherStatsPktsRx64Octets",
5851 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5852 		0, "Bytes received in 64 byte packets");
5853 
5854 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5855 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5856 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5857 		0, "Bytes received in 65 to 127 byte packets");
5858 
5859 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5860 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5861 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5862 		0, "Bytes received in 128 to 255 byte packets");
5863 
5864 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5865 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5866 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5867 		0, "Bytes received in 256 to 511 byte packets");
5868 
5869 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5870 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5871 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5872 		0, "Bytes received in 512 to 1023 byte packets");
5873 
5874 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5875 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5876 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5877 		0, "Bytes received in 1024 t0 1522 byte packets");
5878 
5879 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5880 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5881 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5882 		0, "Bytes received in 1523 to 9022 byte packets");
5883 
5884 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5885 		"stat_EtherStatsPktsTx64Octets",
5886 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5887 		0, "Bytes sent in 64 byte packets");
5888 
5889 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5890 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5891 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5892 		0, "Bytes sent in 65 to 127 byte packets");
5893 
5894 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5895 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5896 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5897 		0, "Bytes sent in 128 to 255 byte packets");
5898 
5899 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5900 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5901 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5902 		0, "Bytes sent in 256 to 511 byte packets");
5903 
5904 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5905 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5906 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5907 		0, "Bytes sent in 512 to 1023 byte packets");
5908 
5909 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5910 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5911 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5912 		0, "Bytes sent in 1024 to 1522 byte packets");
5913 
5914 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5915 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5916 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5917 		0, "Bytes sent in 1523 to 9022 byte packets");
5918 
5919 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5920 		"stat_XonPauseFramesReceived",
5921 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5922 		0, "XON pause frames receved");
5923 
5924 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5925 		"stat_XoffPauseFramesReceived",
5926 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5927 		0, "XOFF pause frames received");
5928 
5929 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5930 		"stat_OutXonSent",
5931 		CTLFLAG_RD, &sc->stat_OutXonSent,
5932 		0, "XON pause frames sent");
5933 
5934 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5935 		"stat_OutXoffSent",
5936 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5937 		0, "XOFF pause frames sent");
5938 
5939 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5940 		"stat_FlowControlDone",
5941 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5942 		0, "Flow control done");
5943 
5944 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5945 		"stat_MacControlFramesReceived",
5946 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5947 		0, "MAC control frames received");
5948 
5949 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5950 		"stat_XoffStateEntered",
5951 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5952 		0, "XOFF state entered");
5953 
5954 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5955 		"stat_IfInFramesL2FilterDiscards",
5956 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5957 		0, "Received L2 packets discarded");
5958 
5959 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5960 		"stat_IfInRuleCheckerDiscards",
5961 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5962 		0, "Received packets discarded by rule");
5963 
5964 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5965 		"stat_IfInFTQDiscards",
5966 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5967 		0, "Received packet FTQ discards");
5968 
5969 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5970 		"stat_IfInMBUFDiscards",
5971 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5972 		0, "Received packets discarded due to lack of controller buffer memory");
5973 
5974 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5975 		"stat_IfInRuleCheckerP4Hit",
5976 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5977 		0, "Received packets rule checker hits");
5978 
5979 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5980 		"stat_CatchupInRuleCheckerDiscards",
5981 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5982 		0, "Received packets discarded in Catchup path");
5983 
5984 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5985 		"stat_CatchupInFTQDiscards",
5986 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5987 		0, "Received packets discarded in FTQ in Catchup path");
5988 
5989 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5990 		"stat_CatchupInMBUFDiscards",
5991 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5992 		0, "Received packets discarded in controller buffer memory in Catchup path");
5993 
5994 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5995 		"stat_CatchupInRuleCheckerP4Hit",
5996 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5997 		0, "Received packets rule checker hits in Catchup path");
5998 
5999 #ifdef BCE_DEBUG
6000 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6001 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6002 		(void *)sc, 0,
6003 		bce_sysctl_driver_state, "I", "Drive state information");
6004 
6005 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6006 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6007 		(void *)sc, 0,
6008 		bce_sysctl_hw_state, "I", "Hardware state information");
6009 
6010 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6011 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6012 		(void *)sc, 0,
6013 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6014 
6015 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6016 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6017 		(void *)sc, 0,
6018 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6019 #endif
6020 
6021 }
6022 
6023 
6024 /****************************************************************************/
6025 /* BCE Debug Routines                                                       */
6026 /****************************************************************************/
6027 #ifdef BCE_DEBUG
6028 
6029 /****************************************************************************/
6030 /* Prints out information about an mbuf.                                    */
6031 /*                                                                          */
6032 /* Returns:                                                                 */
6033 /*   Nothing.                                                               */
6034 /****************************************************************************/
6035 static void
6036 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6037 {
6038 	u32 val_hi, val_lo;
6039 	struct mbuf *mp = m;
6040 
6041 	if (m == NULL) {
6042 		/* Index out of range. */
6043 		printf("mbuf ptr is null!\n");
6044 		return;
6045 	}
6046 
6047 	while (mp) {
6048 		val_hi = BCE_ADDR_HI(mp);
6049 		val_lo = BCE_ADDR_LO(mp);
6050 		BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6051 			   val_hi, val_lo, mp->m_len);
6052 
6053 		if (mp->m_flags & M_EXT)
6054 			printf("M_EXT ");
6055 		if (mp->m_flags & M_PKTHDR)
6056 			printf("M_PKTHDR ");
6057 		printf("\n");
6058 
6059 		if (mp->m_flags & M_EXT) {
6060 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6061 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6062 			BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6063 				val_hi, val_lo, mp->m_ext.ext_size);
6064 		}
6065 
6066 		mp = mp->m_next;
6067 	}
6068 
6069 
6070 }
6071 
6072 
6073 /****************************************************************************/
6074 /* Prints out the mbufs in the TX mbuf chain.                               */
6075 /*                                                                          */
6076 /* Returns:                                                                 */
6077 /*   Nothing.                                                               */
6078 /****************************************************************************/
6079 static void
6080 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6081 {
6082 	struct mbuf *m;
6083 
6084 	BCE_PRINTF(sc,
6085 		"----------------------------"
6086 		"  tx mbuf data  "
6087 		"----------------------------\n");
6088 
6089 	for (int i = 0; i < count; i++) {
6090 	 	m = sc->tx_mbuf_ptr[chain_prod];
6091 		BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6092 		bce_dump_mbuf(sc, m);
6093 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6094 	}
6095 
6096 	BCE_PRINTF(sc,
6097 		"----------------------------"
6098 		"----------------"
6099 		"----------------------------\n");
6100 }
6101 
6102 
6103 /*
6104  * This routine prints the RX mbuf chain.
6105  */
6106 static void
6107 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6108 {
6109 	struct mbuf *m;
6110 
6111 	BCE_PRINTF(sc,
6112 		"----------------------------"
6113 		"  rx mbuf data  "
6114 		"----------------------------\n");
6115 
6116 	for (int i = 0; i < count; i++) {
6117 	 	m = sc->rx_mbuf_ptr[chain_prod];
6118 		BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6119 		bce_dump_mbuf(sc, m);
6120 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6121 	}
6122 
6123 
6124 	BCE_PRINTF(sc,
6125 		"----------------------------"
6126 		"----------------"
6127 		"----------------------------\n");
6128 }
6129 
6130 
6131 static void
6132 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6133 {
6134 	if (idx > MAX_TX_BD)
6135 		/* Index out of range. */
6136 		BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6137 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6138 		/* TX Chain page pointer. */
6139 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6140 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6141 	else
6142 		/* Normal tx_bd entry. */
6143 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6144 			"vlan tag= 0x%4X, flags = 0x%04X\n", idx,
6145 			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6146 			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
6147 			txbd->tx_bd_flags);
6148 }
6149 
6150 
6151 static void
6152 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6153 {
6154 	if (idx > MAX_RX_BD)
6155 		/* Index out of range. */
6156 		BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6157 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6158 		/* TX Chain page pointer. */
6159 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6160 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6161 	else
6162 		/* Normal tx_bd entry. */
6163 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6164 			"flags = 0x%08X\n", idx,
6165 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6166 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
6167 }
6168 
6169 
6170 static void
6171 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6172 {
6173 	BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6174 		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6175 		"tcp_udp_xsum = 0x%04X\n", idx,
6176 		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6177 		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6178 		l2fhdr->l2_fhdr_tcp_udp_xsum);
6179 }
6180 
6181 
6182 /*
6183  * This routine prints the TX chain.
6184  */
6185 static void
6186 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6187 {
6188 	struct tx_bd *txbd;
6189 
6190 	/* First some info about the tx_bd chain structure. */
6191 	BCE_PRINTF(sc,
6192 		"----------------------------"
6193 		"  tx_bd  chain  "
6194 		"----------------------------\n");
6195 
6196 	BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6197 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6198 
6199 	BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6200 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6201 
6202 	BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6203 
6204 	BCE_PRINTF(sc, ""
6205 		"-----------------------------"
6206 		"   tx_bd data   "
6207 		"-----------------------------\n");
6208 
6209 	/* Now print out the tx_bd's themselves. */
6210 	for (int i = 0; i < count; i++) {
6211 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6212 		bce_dump_txbd(sc, tx_prod, txbd);
6213 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6214 	}
6215 
6216 	BCE_PRINTF(sc,
6217 		"-----------------------------"
6218 		"--------------"
6219 		"-----------------------------\n");
6220 }
6221 
6222 
6223 /*
6224  * This routine prints the RX chain.
6225  */
6226 static void
6227 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6228 {
6229 	struct rx_bd *rxbd;
6230 
6231 	/* First some info about the tx_bd chain structure. */
6232 	BCE_PRINTF(sc,
6233 		"----------------------------"
6234 		"  rx_bd  chain  "
6235 		"----------------------------\n");
6236 
6237 	BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6238 
6239 	BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6240 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6241 
6242 	BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6243 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6244 
6245 	BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6246 
6247 	BCE_PRINTF(sc,
6248 		"----------------------------"
6249 		"   rx_bd data   "
6250 		"----------------------------\n");
6251 
6252 	/* Now print out the rx_bd's themselves. */
6253 	for (int i = 0; i < count; i++) {
6254 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6255 		bce_dump_rxbd(sc, rx_prod, rxbd);
6256 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6257 	}
6258 
6259 	BCE_PRINTF(sc,
6260 		"----------------------------"
6261 		"--------------"
6262 		"----------------------------\n");
6263 }
6264 
6265 
6266 /*
6267  * This routine prints the status block.
6268  */
6269 static void
6270 bce_dump_status_block(struct bce_softc *sc)
6271 {
6272 	struct status_block *sblk;
6273 
6274 	sblk = sc->status_block;
6275 
6276    	BCE_PRINTF(sc, "----------------------------- Status Block "
6277 		"-----------------------------\n");
6278 
6279 	BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6280 		sblk->status_attn_bits, sblk->status_attn_bits_ack,
6281 		sblk->status_idx);
6282 
6283 	BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6284 		sblk->status_rx_quick_consumer_index0,
6285 		sblk->status_tx_quick_consumer_index0);
6286 
6287 	BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6288 
6289 	/* Theses indices are not used for normal L2 drivers. */
6290 	if (sblk->status_rx_quick_consumer_index1 ||
6291 		sblk->status_tx_quick_consumer_index1)
6292 		BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6293 			sblk->status_rx_quick_consumer_index1,
6294 			sblk->status_tx_quick_consumer_index1);
6295 
6296 	if (sblk->status_rx_quick_consumer_index2 ||
6297 		sblk->status_tx_quick_consumer_index2)
6298 		BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6299 			sblk->status_rx_quick_consumer_index2,
6300 			sblk->status_tx_quick_consumer_index2);
6301 
6302 	if (sblk->status_rx_quick_consumer_index3 ||
6303 		sblk->status_tx_quick_consumer_index3)
6304 		BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6305 			sblk->status_rx_quick_consumer_index3,
6306 			sblk->status_tx_quick_consumer_index3);
6307 
6308 	if (sblk->status_rx_quick_consumer_index4 ||
6309 		sblk->status_rx_quick_consumer_index5)
6310 		BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6311 			sblk->status_rx_quick_consumer_index4,
6312 			sblk->status_rx_quick_consumer_index5);
6313 
6314 	if (sblk->status_rx_quick_consumer_index6 ||
6315 		sblk->status_rx_quick_consumer_index7)
6316 		BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6317 			sblk->status_rx_quick_consumer_index6,
6318 			sblk->status_rx_quick_consumer_index7);
6319 
6320 	if (sblk->status_rx_quick_consumer_index8 ||
6321 		sblk->status_rx_quick_consumer_index9)
6322 		BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6323 			sblk->status_rx_quick_consumer_index8,
6324 			sblk->status_rx_quick_consumer_index9);
6325 
6326 	if (sblk->status_rx_quick_consumer_index10 ||
6327 		sblk->status_rx_quick_consumer_index11)
6328 		BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6329 			sblk->status_rx_quick_consumer_index10,
6330 			sblk->status_rx_quick_consumer_index11);
6331 
6332 	if (sblk->status_rx_quick_consumer_index12 ||
6333 		sblk->status_rx_quick_consumer_index13)
6334 		BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6335 			sblk->status_rx_quick_consumer_index12,
6336 			sblk->status_rx_quick_consumer_index13);
6337 
6338 	if (sblk->status_rx_quick_consumer_index14 ||
6339 		sblk->status_rx_quick_consumer_index15)
6340 		BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6341 			sblk->status_rx_quick_consumer_index14,
6342 			sblk->status_rx_quick_consumer_index15);
6343 
6344 	if (sblk->status_completion_producer_index ||
6345 		sblk->status_cmd_consumer_index)
6346 		BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6347 			sblk->status_completion_producer_index,
6348 			sblk->status_cmd_consumer_index);
6349 
6350 	BCE_PRINTF(sc, "-------------------------------------------"
6351 		"-----------------------------\n");
6352 }
6353 
6354 
6355 /*
6356  * This routine prints the statistics block.
6357  */
6358 static void
6359 bce_dump_stats_block(struct bce_softc *sc)
6360 {
6361 	struct statistics_block *sblk;
6362 
6363 	sblk = sc->stats_block;
6364 
6365 	BCE_PRINTF(sc, ""
6366 		"-----------------------------"
6367 		" Stats  Block "
6368 		"-----------------------------\n");
6369 
6370 	BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6371 		"IfHcInBadOctets      = 0x%08X:%08X\n",
6372 		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6373 		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6374 
6375 	BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6376 		"IfHcOutBadOctets     = 0x%08X:%08X\n",
6377 		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6378 		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6379 
6380 	BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6381 		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
6382 		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6383 		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6384 
6385 	BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6386 		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
6387 		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6388 		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6389 
6390 	BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6391 		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6392 		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6393 
6394 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6395 		BCE_PRINTF(sc, "0x%08X : "
6396 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6397 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6398 
6399 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6400 		BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6401 			sblk->stat_Dot3StatsCarrierSenseErrors);
6402 
6403 	if (sblk->stat_Dot3StatsFCSErrors)
6404 		BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6405 			sblk->stat_Dot3StatsFCSErrors);
6406 
6407 	if (sblk->stat_Dot3StatsAlignmentErrors)
6408 		BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6409 			sblk->stat_Dot3StatsAlignmentErrors);
6410 
6411 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6412 		BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6413 			sblk->stat_Dot3StatsSingleCollisionFrames);
6414 
6415 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6416 		BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6417 			sblk->stat_Dot3StatsMultipleCollisionFrames);
6418 
6419 	if (sblk->stat_Dot3StatsDeferredTransmissions)
6420 		BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6421 			sblk->stat_Dot3StatsDeferredTransmissions);
6422 
6423 	if (sblk->stat_Dot3StatsExcessiveCollisions)
6424 		BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6425 			sblk->stat_Dot3StatsExcessiveCollisions);
6426 
6427 	if (sblk->stat_Dot3StatsLateCollisions)
6428 		BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6429 			sblk->stat_Dot3StatsLateCollisions);
6430 
6431 	if (sblk->stat_EtherStatsCollisions)
6432 		BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6433 			sblk->stat_EtherStatsCollisions);
6434 
6435 	if (sblk->stat_EtherStatsFragments)
6436 		BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6437 			sblk->stat_EtherStatsFragments);
6438 
6439 	if (sblk->stat_EtherStatsJabbers)
6440 		BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6441 			sblk->stat_EtherStatsJabbers);
6442 
6443 	if (sblk->stat_EtherStatsUndersizePkts)
6444 		BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6445 			sblk->stat_EtherStatsUndersizePkts);
6446 
6447 	if (sblk->stat_EtherStatsOverrsizePkts)
6448 		BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6449 			sblk->stat_EtherStatsOverrsizePkts);
6450 
6451 	if (sblk->stat_EtherStatsPktsRx64Octets)
6452 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6453 			sblk->stat_EtherStatsPktsRx64Octets);
6454 
6455 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6456 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6457 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6458 
6459 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6460 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6461 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6462 
6463 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6464 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6465 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6466 
6467 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6468 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6469 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6470 
6471 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6472 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6473 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6474 
6475 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6476 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6477 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6478 
6479 	if (sblk->stat_EtherStatsPktsTx64Octets)
6480 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6481 			sblk->stat_EtherStatsPktsTx64Octets);
6482 
6483 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6484 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6485 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6486 
6487 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6488 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6489 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6490 
6491 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6492 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6493 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6494 
6495 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6496 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6497 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6498 
6499 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6500 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6501 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6502 
6503 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6504 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6505 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6506 
6507 	if (sblk->stat_XonPauseFramesReceived)
6508 		BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6509 			sblk->stat_XonPauseFramesReceived);
6510 
6511 	if (sblk->stat_XoffPauseFramesReceived)
6512 	   BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6513 			sblk->stat_XoffPauseFramesReceived);
6514 
6515 	if (sblk->stat_OutXonSent)
6516 		BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6517 			sblk->stat_OutXonSent);
6518 
6519 	if (sblk->stat_OutXoffSent)
6520 		BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6521 			sblk->stat_OutXoffSent);
6522 
6523 	if (sblk->stat_FlowControlDone)
6524 		BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6525 			sblk->stat_FlowControlDone);
6526 
6527 	if (sblk->stat_MacControlFramesReceived)
6528 		BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6529 			sblk->stat_MacControlFramesReceived);
6530 
6531 	if (sblk->stat_XoffStateEntered)
6532 		BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6533 			sblk->stat_XoffStateEntered);
6534 
6535 	if (sblk->stat_IfInFramesL2FilterDiscards)
6536 		BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6537 			sblk->stat_IfInFramesL2FilterDiscards);
6538 
6539 	if (sblk->stat_IfInRuleCheckerDiscards)
6540 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6541 			sblk->stat_IfInRuleCheckerDiscards);
6542 
6543 	if (sblk->stat_IfInFTQDiscards)
6544 		BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6545 			sblk->stat_IfInFTQDiscards);
6546 
6547 	if (sblk->stat_IfInMBUFDiscards)
6548 		BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6549 			sblk->stat_IfInMBUFDiscards);
6550 
6551 	if (sblk->stat_IfInRuleCheckerP4Hit)
6552 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6553 			sblk->stat_IfInRuleCheckerP4Hit);
6554 
6555 	if (sblk->stat_CatchupInRuleCheckerDiscards)
6556 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6557 			sblk->stat_CatchupInRuleCheckerDiscards);
6558 
6559 	if (sblk->stat_CatchupInFTQDiscards)
6560 		BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6561 			sblk->stat_CatchupInFTQDiscards);
6562 
6563 	if (sblk->stat_CatchupInMBUFDiscards)
6564 		BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6565 			sblk->stat_CatchupInMBUFDiscards);
6566 
6567 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6568 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6569 			sblk->stat_CatchupInRuleCheckerP4Hit);
6570 
6571 	BCE_PRINTF(sc,
6572 		"-----------------------------"
6573 		"--------------"
6574 		"-----------------------------\n");
6575 }
6576 
6577 
6578 static void
6579 bce_dump_driver_state(struct bce_softc *sc)
6580 {
6581 	u32 val_hi, val_lo;
6582 
6583 	BCE_PRINTF(sc,
6584 		"-----------------------------"
6585 		" Driver State "
6586 		"-----------------------------\n");
6587 
6588 	val_hi = BCE_ADDR_HI(sc);
6589 	val_lo = BCE_ADDR_LO(sc);
6590 	BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6591 		val_hi, val_lo);
6592 
6593 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6594 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6595 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6596 		val_hi, val_lo);
6597 
6598 	val_hi = BCE_ADDR_HI(sc->status_block);
6599 	val_lo = BCE_ADDR_LO(sc->status_block);
6600 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6601 		val_hi, val_lo);
6602 
6603 	val_hi = BCE_ADDR_HI(sc->stats_block);
6604 	val_lo = BCE_ADDR_LO(sc->stats_block);
6605 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6606 		val_hi, val_lo);
6607 
6608 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6609 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6610 	BCE_PRINTF(sc,
6611 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6612 		val_hi, val_lo);
6613 
6614 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6615 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6616 	BCE_PRINTF(sc,
6617 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6618 		val_hi, val_lo);
6619 
6620 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6621 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6622 	BCE_PRINTF(sc,
6623 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6624 		val_hi, val_lo);
6625 
6626 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6627 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6628 	BCE_PRINTF(sc,
6629 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6630 		val_hi, val_lo);
6631 
6632 	BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6633 		sc->interrupts_generated);
6634 
6635 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6636 		sc->rx_interrupts);
6637 
6638 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6639 		sc->tx_interrupts);
6640 
6641 	BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6642 		sc->last_status_idx);
6643 
6644 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6645 		sc->tx_prod);
6646 
6647 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6648 		sc->tx_cons);
6649 
6650 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6651 		sc->tx_prod_bseq);
6652 
6653 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6654 		sc->rx_prod);
6655 
6656 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6657 		sc->rx_cons);
6658 
6659 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6660 		sc->rx_prod_bseq);
6661 
6662 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6663 		sc->rx_mbuf_alloc);
6664 
6665 	BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6666 		sc->free_rx_bd);
6667 
6668 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6669 		sc->rx_low_watermark, (u32) USABLE_RX_BD);
6670 
6671 	BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6672 		sc->tx_mbuf_alloc);
6673 
6674 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6675 		sc->rx_mbuf_alloc);
6676 
6677 	BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6678 		sc->used_tx_bd);
6679 
6680 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6681 		sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6682 
6683 	BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6684 		sc->mbuf_alloc_failed);
6685 
6686 	BCE_PRINTF(sc,
6687 		"-----------------------------"
6688 		"--------------"
6689 		"-----------------------------\n");
6690 }
6691 
6692 
6693 static void
6694 bce_dump_hw_state(struct bce_softc *sc)
6695 {
6696 	u32 val1;
6697 
6698 	BCE_PRINTF(sc,
6699 		"----------------------------"
6700 		" Hardware State "
6701 		"----------------------------\n");
6702 
6703 	BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6704 
6705 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6706 	BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6707 		val1, BCE_MISC_ENABLE_STATUS_BITS);
6708 
6709 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6710 	BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6711 
6712 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6713 	BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6714 
6715 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6716 	BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6717 
6718 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6719 	BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6720 
6721 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6722 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6723 
6724 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6725 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6726 
6727 	val1 = REG_RD(sc, BCE_HC_STATUS);
6728 	BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6729 
6730 	BCE_PRINTF(sc,
6731 		"----------------------------"
6732 		"----------------"
6733 		"----------------------------\n");
6734 
6735 	BCE_PRINTF(sc,
6736 		"----------------------------"
6737 		" Register  Dump "
6738 		"----------------------------\n");
6739 
6740 	for (int i = 0x400; i < 0x8000; i += 0x10)
6741 		BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6742 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6743 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6744 
6745 	BCE_PRINTF(sc,
6746 		"----------------------------"
6747 		"----------------"
6748 		"----------------------------\n");
6749 }
6750 
6751 
6752 static void
6753 bce_breakpoint(struct bce_softc *sc)
6754 {
6755 
6756 	/* Unreachable code to shut the compiler up about unused functions. */
6757 	if (0) {
6758    		bce_dump_txbd(sc, 0, NULL);
6759 		bce_dump_rxbd(sc, 0, NULL);
6760 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6761 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6762 		bce_dump_l2fhdr(sc, 0, NULL);
6763 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6764 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6765 		bce_dump_status_block(sc);
6766 		bce_dump_stats_block(sc);
6767 		bce_dump_driver_state(sc);
6768 		bce_dump_hw_state(sc);
6769 	}
6770 
6771 	bce_dump_driver_state(sc);
6772 	/* Print the important status block fields. */
6773 	bce_dump_status_block(sc);
6774 
6775 	/* Call the debugger. */
6776 	breakpoint();
6777 
6778 	return;
6779 }
6780 #endif
6781