xref: /freebsd/sys/dev/bce/if_bce.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  *
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
57 
58 
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63 	u32 bce_debug = BCE_WARN;
64 
65 	/*          0 = Never              */
66 	/*          1 = 1 in 2,147,483,648 */
67 	/*        256 = 1 in     8,388,608 */
68 	/*       2048 = 1 in     1,048,576 */
69 	/*      65536 = 1 in        32,768 */
70 	/*    1048576 = 1 in         2,048 */
71 	/*  268435456 =	1 in             8 */
72 	/*  536870912 = 1 in             4 */
73 	/* 1073741824 = 1 in             2 */
74 
75 	/* Controls how often the l2_fhdr frame error check will fail. */
76 	int bce_debug_l2fhdr_status_check = 0;
77 
78 	/* Controls how often the unexpected attention check will fail. */
79 	int bce_debug_unexpected_attention = 0;
80 
81 	/* Controls how often to simulate an mbuf allocation failure. */
82 	int bce_debug_mbuf_allocation_failure = 0;
83 
84 	/* Controls how often to simulate a DMA mapping failure. */
85 	int bce_debug_dma_map_addr_failure = 0;
86 
87 	/* Controls how often to simulate a bootcode failure. */
88 	int bce_debug_bootcode_running_failure = 0;
89 #endif
90 
91 
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX		64
98 
99 static struct bce_type bce_devs[] = {
100 	/* BCM5706C Controllers and OEM boards. */
101 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102 		"HP NC370T Multifunction Gigabit Server Adapter" },
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104 		"HP NC370i Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
107 
108 	/* BCM5706S controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110 		"HP NC370F Multifunction Gigabit Server Adapter" },
111 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 
114 	/* BCM5708C controllers and OEM boards. */
115 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
117 
118 	/* BCM5708S controllers and OEM boards. */
119 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
121 	{ 0, 0, 0, 0, NULL }
122 };
123 
124 
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130 	/* Slow EEPROM */
131 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 	 "EEPROM - slow"},
135 	/* Expansion entry 0001 */
136 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 	 "Entry 0001"},
140 	/* Saifun SA25F010 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145 	 "Non-buffered flash (128kB)"},
146 	/* Saifun SA25F020 (non-buffered flash) */
147 	/* strap, cfg1, & write1 need updates */
148 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151 	 "Non-buffered flash (256kB)"},
152 	/* Expansion entry 0100 */
153 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0100"},
157 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
163 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167 	/* Saifun SA25F005 (non-buffered flash) */
168 	/* strap, cfg1, & write1 need updates */
169 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172 	 "Non-buffered flash (64kB)"},
173 	/* Fast EEPROM */
174 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - fast"},
178 	/* Expansion entry 1001 */
179 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 1001"},
183 	/* Expansion entry 1010 */
184 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 	 "Entry 1010"},
188 	/* ATMEL AT45DB011B (buffered flash) */
189 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192 	 "Buffered flash (128kB)"},
193 	/* Expansion entry 1100 */
194 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 	 "Entry 1100"},
198 	/* Expansion entry 1101 */
199 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 	 "Entry 1101"},
203 	/* Ateml Expansion entry 1110 */
204 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207 	 "Entry 1110 (Atmel)"},
208 	/* ATMEL AT45DB021B (buffered flash) */
209 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212 	 "Buffered flash (256kB)"},
213 };
214 
215 
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe				(device_t);
220 static int  bce_attach				(device_t);
221 static int  bce_detach				(device_t);
222 static void bce_shutdown			(device_t);
223 
224 
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, int, int);
232 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain		(struct bce_softc *, int, int);
236 static void bce_dump_rx_chain		(struct bce_softc *, int, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_breakpoint			(struct bce_softc *);
242 #endif
243 
244 
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
249 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
250 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg		(device_t, int, int);
252 static int  bce_miibus_write_reg	(device_t, int, int, int);
253 static void bce_miibus_statchg		(device_t);
254 
255 
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock	(struct bce_softc *);
260 static int  bce_release_nvram_lock	(struct bce_softc *);
261 static void bce_enable_nvram_access	(struct bce_softc *);
262 static void	bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram			(struct bce_softc *);
265 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test			(struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write	(struct bce_softc *);
269 static void bce_disable_nvram_write	(struct bce_softc *);
270 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
271 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
273 #endif
274 
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
279 static void bce_dma_map_tx_desc		(void *, bus_dma_segment_t *, int, bus_size_t, int);
280 static int  bce_dma_alloc			(device_t);
281 static void bce_dma_free			(struct bce_softc *);
282 static void bce_release_resources	(struct bce_softc *);
283 
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync				(struct bce_softc *, u32);
288 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus			(struct bce_softc *);
291 
292 static void bce_stop				(struct bce_softc *);
293 static int  bce_reset				(struct bce_softc *, u32);
294 static int  bce_chipinit 			(struct bce_softc *);
295 static int  bce_blockinit 			(struct bce_softc *);
296 static int  bce_get_buf				(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297 
298 static int  bce_init_tx_chain		(struct bce_softc *);
299 static int  bce_init_rx_chain		(struct bce_softc *);
300 static void bce_free_rx_chain		(struct bce_softc *);
301 static void bce_free_tx_chain		(struct bce_softc *);
302 
303 static int  bce_tx_encap			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
304 static void bce_start_locked		(struct ifnet *);
305 static void bce_start				(struct ifnet *);
306 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
307 static void bce_watchdog			(struct ifnet *);
308 static int  bce_ifmedia_upd			(struct ifnet *);
309 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked			(struct bce_softc *);
311 static void bce_init				(void *);
312 
313 static void bce_init_context		(struct bce_softc *);
314 static void bce_get_mac_addr		(struct bce_softc *);
315 static void bce_set_mac_addr		(struct bce_softc *);
316 static void bce_phy_intr			(struct bce_softc *);
317 static void bce_rx_intr				(struct bce_softc *);
318 static void bce_tx_intr				(struct bce_softc *);
319 static void bce_disable_intr		(struct bce_softc *);
320 static void bce_enable_intr			(struct bce_softc *);
321 
322 #ifdef DEVICE_POLLING
323 static void bce_poll_locked			(struct ifnet *, enum poll_cmd, int);
324 static void bce_poll				(struct ifnet *, enum poll_cmd, int);
325 #endif
326 static void bce_intr				(void *);
327 static void bce_set_rx_mode			(struct bce_softc *);
328 static void bce_stats_update		(struct bce_softc *);
329 static void bce_tick_locked			(struct bce_softc *);
330 static void bce_tick				(void *);
331 static void bce_add_sysctls			(struct bce_softc *);
332 
333 
334 /****************************************************************************/
335 /* FreeBSD device dispatch table.                                           */
336 /****************************************************************************/
337 static device_method_t bce_methods[] = {
338 	/* Device interface */
339 	DEVMETHOD(device_probe,		bce_probe),
340 	DEVMETHOD(device_attach,	bce_attach),
341 	DEVMETHOD(device_detach,	bce_detach),
342 	DEVMETHOD(device_shutdown,	bce_shutdown),
343 
344 	/* bus interface */
345 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
346 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
347 
348 	/* MII interface */
349 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
350 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
351 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
352 
353 	{ 0, 0 }
354 };
355 
356 static driver_t bce_driver = {
357 	"bce",
358 	bce_methods,
359 	sizeof(struct bce_softc)
360 };
361 
362 static devclass_t bce_devclass;
363 
364 MODULE_DEPEND(bce, pci, 1, 1, 1);
365 MODULE_DEPEND(bce, ether, 1, 1, 1);
366 MODULE_DEPEND(bce, miibus, 1, 1, 1);
367 
368 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370 
371 
372 /****************************************************************************/
373 /* Device probe function.                                                   */
374 /*                                                                          */
375 /* Compares the device to the driver's list of supported devices and        */
376 /* reports back to the OS whether this is the right driver for the device.  */
377 /*                                                                          */
378 /* Returns:                                                                 */
379 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380 /****************************************************************************/
381 static int
382 bce_probe(device_t dev)
383 {
384 	struct bce_type *t;
385 	struct bce_softc *sc;
386 	char *descbuf;
387 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
388 
389 	t = bce_devs;
390 
391 	sc = device_get_softc(dev);
392 	bzero(sc, sizeof(struct bce_softc));
393 	sc->bce_unit = device_get_unit(dev);
394 	sc->bce_dev = dev;
395 
396 	/* Get the data for the device to be probed. */
397 	vid  = pci_get_vendor(dev);
398 	did  = pci_get_device(dev);
399 	svid = pci_get_subvendor(dev);
400 	sdid = pci_get_subdevice(dev);
401 
402 	DBPRINT(sc, BCE_VERBOSE_LOAD,
403 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405 
406 	/* Look through the list of known devices for a match. */
407 	while(t->bce_name != NULL) {
408 
409 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
410 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412 
413 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414 
415 			if (descbuf == NULL)
416 				return(ENOMEM);
417 
418 			/* Print out the device identity. */
419 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
420 				t->bce_name,
421 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423 			    bce_driver_version);
424 
425 			device_set_desc_copy(dev, descbuf);
426 			free(descbuf, M_TEMP);
427 			return(BUS_PROBE_DEFAULT);
428 		}
429 		t++;
430 	}
431 
432 	DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
433 		__FILE__, __LINE__);
434 
435 	return(ENXIO);
436 }
437 
438 
439 /****************************************************************************/
440 /* Device attach function.                                                  */
441 /*                                                                          */
442 /* Allocates device resources, performs secondary chip identification,      */
443 /* resets and initializes the hardware, and initializes driver instance     */
444 /* variables.                                                               */
445 /*                                                                          */
446 /* Returns:                                                                 */
447 /*   0 on success, positive value on failure.                               */
448 /****************************************************************************/
449 static int
450 bce_attach(device_t dev)
451 {
452 	struct bce_softc *sc;
453 	struct ifnet *ifp;
454 	u32 val;
455 	int mbuf, rid, rc = 0;
456 
457 	sc = device_get_softc(dev);
458 	sc->bce_dev = dev;
459 
460 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461 
462 	mbuf = device_get_unit(dev);
463 	sc->bce_unit = mbuf;
464 
465 	pci_enable_busmaster(dev);
466 
467 	/* Allocate PCI memory resources. */
468 	rid = PCIR_BAR(0);
469 	sc->bce_res = bus_alloc_resource_any(
470 		dev, 							/* dev */
471 		SYS_RES_MEMORY, 				/* type */
472 		&rid,							/* rid */
473 	    RF_ACTIVE | PCI_RF_DENSE);		/* flags */
474 
475 	if (sc->bce_res == NULL) {
476 		BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
477 			__FILE__, __LINE__);
478 		rc = ENXIO;
479 		goto bce_attach_fail;
480 	}
481 
482 	/* Get various resource handles. */
483 	sc->bce_btag    = rman_get_bustag(sc->bce_res);
484 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486 
487 	/* Allocate PCI IRQ resources. */
488 	rid = 0;
489 	sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
490 	    RF_SHAREABLE | RF_ACTIVE);
491 
492 	if (sc->bce_irq == NULL) {
493 		BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
494 			__FILE__, __LINE__);
495 		rc = ENXIO;
496 		goto bce_attach_fail;
497 	}
498 
499 	/* Initialize mutex for the current device instance. */
500 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
501 
502 	/*
503 	 * Configure byte swap and enable indirect register access.
504 	 * Rely on CPU to do target byte swapping on big endian systems.
505 	 * Access to registers outside of PCI configurtion space are not
506 	 * valid until this is done.
507 	 */
508 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
509 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
510 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
511 
512 	/* Save ASIC revsion info. */
513 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
514 
515 	/* Weed out any non-production controller revisions. */
516 	switch(BCE_CHIP_ID(sc)) {
517 		case BCE_CHIP_ID_5706_A0:
518 		case BCE_CHIP_ID_5706_A1:
519 		case BCE_CHIP_ID_5708_A0:
520 		case BCE_CHIP_ID_5708_B0:
521 			BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
522 				__FILE__, __LINE__,
523 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
524 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
525 			rc = ENODEV;
526 			goto bce_attach_fail;
527 	}
528 
529 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
530 		BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
531 			__FILE__, __LINE__);
532 		rc = ENODEV;
533 		goto bce_attach_fail;
534 	}
535 
536 	/*
537 	 * The embedded PCIe to PCI-X bridge (EPB)
538 	 * in the 5708 cannot address memory above
539 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
540 	 */
541 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
542 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
543 	else
544 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
545 
546 	/*
547 	 * Find the base address for shared memory access.
548 	 * Newer versions of bootcode use a signature and offset
549 	 * while older versions use a fixed address.
550 	 */
551 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
552 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
553 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
554 	else
555 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
556 
557 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
558 
559 	/* Set initial device and PHY flags */
560 	sc->bce_flags = 0;
561 	sc->bce_phy_flags = 0;
562 
563 	/* Get PCI bus information (speed and type). */
564 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
565 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
566 		u32 clkreg;
567 
568 		sc->bce_flags |= BCE_PCIX_FLAG;
569 
570 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
571 
572 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
573 		switch (clkreg) {
574 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
575 			sc->bus_speed_mhz = 133;
576 			break;
577 
578 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
579 			sc->bus_speed_mhz = 100;
580 			break;
581 
582 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
583 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
584 			sc->bus_speed_mhz = 66;
585 			break;
586 
587 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
588 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
589 			sc->bus_speed_mhz = 50;
590 			break;
591 
592 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
593 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
594 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
595 			sc->bus_speed_mhz = 33;
596 			break;
597 		}
598 	} else {
599 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
600 			sc->bus_speed_mhz = 66;
601 		else
602 			sc->bus_speed_mhz = 33;
603 	}
604 
605 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
606 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
607 
608 	BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
609 		sc->bce_chipid,
610 		((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
611 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
612 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
613 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
614 		sc->bus_speed_mhz);
615 
616 	/* Reset the controller. */
617 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
618 		rc = ENXIO;
619 		goto bce_attach_fail;
620 	}
621 
622 	/* Initialize the controller. */
623 	if (bce_chipinit(sc)) {
624 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
625 			__FILE__, __LINE__);
626 		rc = ENXIO;
627 		goto bce_attach_fail;
628 	}
629 
630 	/* Perform NVRAM test. */
631 	if (bce_nvram_test(sc)) {
632 		BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
633 			__FILE__, __LINE__);
634 		rc = ENXIO;
635 		goto bce_attach_fail;
636 	}
637 
638 	/* Fetch the permanent Ethernet MAC address. */
639 	bce_get_mac_addr(sc);
640 
641 	/*
642 	 * Trip points control how many BDs
643 	 * should be ready before generating an
644 	 * interrupt while ticks control how long
645 	 * a BD can sit in the chain before
646 	 * generating an interrupt.  Set the default
647 	 * values for the RX and TX rings.
648 	 */
649 
650 #ifdef BCE_DRBUG
651 	/* Force more frequent interrupts. */
652 	sc->bce_tx_quick_cons_trip_int = 1;
653 	sc->bce_tx_quick_cons_trip     = 1;
654 	sc->bce_tx_ticks_int           = 0;
655 	sc->bce_tx_ticks               = 0;
656 
657 	sc->bce_rx_quick_cons_trip_int = 1;
658 	sc->bce_rx_quick_cons_trip     = 1;
659 	sc->bce_rx_ticks_int           = 0;
660 	sc->bce_rx_ticks               = 0;
661 #else
662 	sc->bce_tx_quick_cons_trip_int = 20;
663 	sc->bce_tx_quick_cons_trip     = 20;
664 	sc->bce_tx_ticks_int           = 80;
665 	sc->bce_tx_ticks               = 80;
666 
667 	sc->bce_rx_quick_cons_trip_int = 6;
668 	sc->bce_rx_quick_cons_trip     = 6;
669 	sc->bce_rx_ticks_int           = 18;
670 	sc->bce_rx_ticks               = 18;
671 #endif
672 
673 	/* Update statistics once every second. */
674 	sc->bce_stats_ticks = 1000000 & 0xffff00;
675 
676 	/*
677 	 * The copper based NetXtreme II controllers
678 	 * use an integrated PHY at address 1 while
679 	 * the SerDes controllers use a PHY at
680 	 * address 2.
681 	 */
682 	sc->bce_phy_addr = 1;
683 
684 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
685 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
686 		sc->bce_flags |= BCE_NO_WOL_FLAG;
687 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
688 			sc->bce_phy_addr = 2;
689 			val = REG_RD_IND(sc, sc->bce_shmem_base +
690 					 BCE_SHARED_HW_CFG_CONFIG);
691 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
692 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
693 		}
694 	}
695 
696 	/* Allocate DMA memory resources. */
697 	if (bce_dma_alloc(dev)) {
698 		BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
699 		    __FILE__, __LINE__);
700 		rc = ENXIO;
701 		goto bce_attach_fail;
702 	}
703 
704 	/* Allocate an ifnet structure. */
705 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
706 	if (ifp == NULL) {
707 		BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
708 			__FILE__, __LINE__);
709 		rc = ENXIO;
710 		goto bce_attach_fail;
711 	}
712 
713 	/* Initialize the ifnet interface. */
714 	ifp->if_softc        = sc;
715 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717 	ifp->if_ioctl        = bce_ioctl;
718 	ifp->if_start        = bce_start;
719 	ifp->if_timer        = 0;
720 	ifp->if_watchdog     = bce_watchdog;
721 	ifp->if_init         = bce_init;
722 	ifp->if_mtu          = ETHERMTU;
723 	ifp->if_hwassist     = BCE_IF_HWASSIST;
724 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
725 	ifp->if_capenable    = ifp->if_capabilities;
726 
727 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
728 	sc->mbuf_alloc_size  = MCLBYTES;
729 #ifdef DEVICE_POLLING
730 	ifp->if_capabilities |= IFCAP_POLLING;
731 #endif
732 
733 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
734 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
735 		ifp->if_baudrate = IF_Gbps(2.5);
736 	else
737 		ifp->if_baudrate = IF_Gbps(1);
738 
739 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
740 	IFQ_SET_READY(&ifp->if_snd);
741 
742 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
743 		BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n",
744 			__FILE__, __LINE__);
745 		rc = ENODEV;
746 		goto bce_attach_fail;
747 	} else {
748 		/* Look for our PHY. */
749 		if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
750 			bce_ifmedia_sts)) {
751 			BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
752 				__FILE__, __LINE__);
753 			rc = ENXIO;
754 			goto bce_attach_fail;
755 		}
756 	}
757 
758 	/* Attach to the Ethernet interface list. */
759 	ether_ifattach(ifp, sc->eaddr);
760 
761 #if __FreeBSD_version < 500000
762 	callout_init(&sc->bce_stat_ch);
763 #else
764 	callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
765 #endif
766 
767 	/* Hookup IRQ last. */
768 	rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
769 	   bce_intr, sc, &sc->bce_intrhand);
770 
771 	if (rc) {
772 		BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
773 			__FILE__, __LINE__);
774 		bce_detach(dev);
775 		goto bce_attach_exit;
776 	}
777 
778 	/* Print some important debugging info. */
779 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
780 
781 	/* Add the supported sysctls to the kernel. */
782 	bce_add_sysctls(sc);
783 
784 	goto bce_attach_exit;
785 
786 bce_attach_fail:
787 	bce_release_resources(sc);
788 
789 bce_attach_exit:
790 
791 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
792 
793 	return(rc);
794 }
795 
796 
797 /****************************************************************************/
798 /* Device detach function.                                                  */
799 /*                                                                          */
800 /* Stops the controller, resets the controller, and releases resources.     */
801 /*                                                                          */
802 /* Returns:                                                                 */
803 /*   0 on success, positive value on failure.                               */
804 /****************************************************************************/
805 static int
806 bce_detach(device_t dev)
807 {
808 	struct bce_softc *sc;
809 	struct ifnet *ifp;
810 
811 	sc = device_get_softc(dev);
812 
813 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
814 
815 	ifp = sc->bce_ifp;
816 
817 #ifdef DEVICE_POLLING
818 	if (ifp->if_capenable & IFCAP_POLLING)
819 		ether_poll_deregister(ifp);
820 #endif
821 
822 	/* Stop and reset the controller. */
823 	BCE_LOCK(sc);
824 	bce_stop(sc);
825 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
826 	BCE_UNLOCK(sc);
827 
828 	ether_ifdetach(ifp);
829 
830 	/* If we have a child device on the MII bus remove it too. */
831 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
832 		ifmedia_removeall(&sc->bce_ifmedia);
833 	} else {
834 		bus_generic_detach(dev);
835 		device_delete_child(dev, sc->bce_miibus);
836 	}
837 
838 	/* Release all remaining resources. */
839 	bce_release_resources(sc);
840 
841 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
842 
843 	return(0);
844 }
845 
846 
847 /****************************************************************************/
848 /* Device shutdown function.                                                */
849 /*                                                                          */
850 /* Stops and resets the controller.                                         */
851 /*                                                                          */
852 /* Returns:                                                                 */
853 /*   Nothing                                                                */
854 /****************************************************************************/
855 static void
856 bce_shutdown(device_t dev)
857 {
858 	struct bce_softc *sc = device_get_softc(dev);
859 
860 	BCE_LOCK(sc);
861 	bce_stop(sc);
862 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
863 	BCE_UNLOCK(sc);
864 }
865 
866 
867 /****************************************************************************/
868 /* Indirect register read.                                                  */
869 /*                                                                          */
870 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
871 /* configuration space.  Using this mechanism avoids issues with posted     */
872 /* reads but is much slower than memory-mapped I/O.                         */
873 /*                                                                          */
874 /* Returns:                                                                 */
875 /*   The value of the register.                                             */
876 /****************************************************************************/
877 static u32
878 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
879 {
880 	device_t dev;
881 	dev = sc->bce_dev;
882 
883 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
884 #ifdef BCE_DEBUG
885 	{
886 		u32 val;
887 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
888 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
889 			__FUNCTION__, offset, val);
890 		return val;
891 	}
892 #else
893 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894 #endif
895 }
896 
897 
898 /****************************************************************************/
899 /* Indirect register write.                                                 */
900 /*                                                                          */
901 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
902 /* configuration space.  Using this mechanism avoids issues with posted     */
903 /* writes but is muchh slower than memory-mapped I/O.                       */
904 /*                                                                          */
905 /* Returns:                                                                 */
906 /*   Nothing.                                                               */
907 /****************************************************************************/
908 static void
909 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
910 {
911 	device_t dev;
912 	dev = sc->bce_dev;
913 
914 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
915 		__FUNCTION__, offset, val);
916 
917 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
918 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
919 }
920 
921 
922 /****************************************************************************/
923 /* Context memory write.                                                    */
924 /*                                                                          */
925 /* The NetXtreme II controller uses context memory to track connection      */
926 /* information for L2 and higher network protocols.                         */
927 /*                                                                          */
928 /* Returns:                                                                 */
929 /*   Nothing.                                                               */
930 /****************************************************************************/
931 static void
932 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
933 {
934 
935 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
936 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
937 
938 	offset += cid_addr;
939 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
940 	REG_WR(sc, BCE_CTX_DATA, val);
941 }
942 
943 
944 /****************************************************************************/
945 /* PHY register read.                                                       */
946 /*                                                                          */
947 /* Implements register reads on the MII bus.                                */
948 /*                                                                          */
949 /* Returns:                                                                 */
950 /*   The value of the register.                                             */
951 /****************************************************************************/
952 static int
953 bce_miibus_read_reg(device_t dev, int phy, int reg)
954 {
955 	struct bce_softc *sc;
956 	u32 val;
957 	int i;
958 
959 	sc = device_get_softc(dev);
960 
961 	/* Make sure we are accessing the correct PHY address. */
962 	if (phy != sc->bce_phy_addr) {
963 		DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
964 		return(0);
965 	}
966 
967 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
968 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
969 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
970 
971 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
972 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
973 
974 		DELAY(40);
975 	}
976 
977 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
978 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
979 		BCE_EMAC_MDIO_COMM_START_BUSY;
980 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
981 
982 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
983 		DELAY(10);
984 
985 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
986 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
987 			DELAY(5);
988 
989 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
990 			val &= BCE_EMAC_MDIO_COMM_DATA;
991 
992 			break;
993 		}
994 	}
995 
996 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
997 		BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
998 			__FILE__, __LINE__, phy, reg);
999 		val = 0x0;
1000 	} else {
1001 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1002 	}
1003 
1004 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1005 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1006 
1007 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1008 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1009 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1010 
1011 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1012 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1013 
1014 		DELAY(40);
1015 	}
1016 
1017 	return (val & 0xffff);
1018 
1019 }
1020 
1021 
1022 /****************************************************************************/
1023 /* PHY register write.                                                      */
1024 /*                                                                          */
1025 /* Implements register writes on the MII bus.                               */
1026 /*                                                                          */
1027 /* Returns:                                                                 */
1028 /*   The value of the register.                                             */
1029 /****************************************************************************/
1030 static int
1031 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1032 {
1033 	struct bce_softc *sc;
1034 	u32 val1;
1035 	int i;
1036 
1037 	sc = device_get_softc(dev);
1038 
1039 	/* Make sure we are accessing the correct PHY address. */
1040 	if (phy != sc->bce_phy_addr) {
1041 		DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1042 		return(0);
1043 	}
1044 
1045 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1046 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1047 
1048 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1049 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1050 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1051 
1052 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1053 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1054 
1055 		DELAY(40);
1056 	}
1057 
1058 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1059 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1060 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1061 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1062 
1063 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1064 		DELAY(10);
1065 
1066 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1067 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1068 			DELAY(5);
1069 			break;
1070 		}
1071 	}
1072 
1073 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1074 		BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1075 			__FILE__, __LINE__);
1076 
1077 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1078 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1079 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1080 
1081 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1082 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1083 
1084 		DELAY(40);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 
1091 /****************************************************************************/
1092 /* MII bus status change.                                                   */
1093 /*                                                                          */
1094 /* Called by the MII bus driver when the PHY establishes link to set the    */
1095 /* MAC interface registers.                                                 */
1096 /*                                                                          */
1097 /* Returns:                                                                 */
1098 /*   Nothing.                                                               */
1099 /****************************************************************************/
1100 static void
1101 bce_miibus_statchg(device_t dev)
1102 {
1103 	struct bce_softc *sc;
1104 	struct mii_data *mii;
1105 
1106 	sc = device_get_softc(dev);
1107 
1108 	mii = device_get_softc(sc->bce_miibus);
1109 
1110 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1111 
1112 	/* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1113 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1114 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1115 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1116 	} else {
1117 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1118 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1119 	}
1120 
1121 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1122 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1123 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1124 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1125 	} else {
1126 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1127 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1128 	}
1129 }
1130 
1131 
1132 /****************************************************************************/
1133 /* Acquire NVRAM lock.                                                      */
1134 /*                                                                          */
1135 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1136 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1137 /* for use by the driver.                                                   */
1138 /*                                                                          */
1139 /* Returns:                                                                 */
1140 /*   0 on success, positive value on failure.                               */
1141 /****************************************************************************/
1142 static int
1143 bce_acquire_nvram_lock(struct bce_softc *sc)
1144 {
1145 	u32 val;
1146 	int j;
1147 
1148 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1149 
1150 	/* Request access to the flash interface. */
1151 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1152 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1153 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1154 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1155 			break;
1156 
1157 		DELAY(5);
1158 	}
1159 
1160 	if (j >= NVRAM_TIMEOUT_COUNT) {
1161 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1162 		return EBUSY;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 
1169 /****************************************************************************/
1170 /* Release NVRAM lock.                                                      */
1171 /*                                                                          */
1172 /* When the caller is finished accessing NVRAM the lock must be released.   */
1173 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1174 /* for use by the driver.                                                   */
1175 /*                                                                          */
1176 /* Returns:                                                                 */
1177 /*   0 on success, positive value on failure.                               */
1178 /****************************************************************************/
1179 static int
1180 bce_release_nvram_lock(struct bce_softc *sc)
1181 {
1182 	int j;
1183 	u32 val;
1184 
1185 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1186 
1187 	/*
1188 	 * Relinquish nvram interface.
1189 	 */
1190 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1191 
1192 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1193 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1194 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1195 			break;
1196 
1197 		DELAY(5);
1198 	}
1199 
1200 	if (j >= NVRAM_TIMEOUT_COUNT) {
1201 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1202 		return EBUSY;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 
1209 #ifdef BCE_NVRAM_WRITE_SUPPORT
1210 /****************************************************************************/
1211 /* Enable NVRAM write access.                                               */
1212 /*                                                                          */
1213 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1214 /*                                                                          */
1215 /* Returns:                                                                 */
1216 /*   0 on success, positive value on failure.                               */
1217 /****************************************************************************/
1218 static int
1219 bce_enable_nvram_write(struct bce_softc *sc)
1220 {
1221 	u32 val;
1222 
1223 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1224 
1225 	val = REG_RD(sc, BCE_MISC_CFG);
1226 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1227 
1228 	if (!sc->bce_flash_info->buffered) {
1229 		int j;
1230 
1231 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1232 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1233 
1234 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1235 			DELAY(5);
1236 
1237 			val = REG_RD(sc, BCE_NVM_COMMAND);
1238 			if (val & BCE_NVM_COMMAND_DONE)
1239 				break;
1240 		}
1241 
1242 		if (j >= NVRAM_TIMEOUT_COUNT) {
1243 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1244 			return EBUSY;
1245 		}
1246 	}
1247 	return 0;
1248 }
1249 
1250 
1251 /****************************************************************************/
1252 /* Disable NVRAM write access.                                              */
1253 /*                                                                          */
1254 /* When the caller is finished writing to NVRAM write access must be        */
1255 /* disabled.                                                                */
1256 /*                                                                          */
1257 /* Returns:                                                                 */
1258 /*   Nothing.                                                               */
1259 /****************************************************************************/
1260 static void
1261 bce_disable_nvram_write(struct bce_softc *sc)
1262 {
1263 	u32 val;
1264 
1265 	DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1266 
1267 	val = REG_RD(sc, BCE_MISC_CFG);
1268 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1269 }
1270 #endif
1271 
1272 
1273 /****************************************************************************/
1274 /* Enable NVRAM access.                                                     */
1275 /*                                                                          */
1276 /* Before accessing NVRAM for read or write operations the caller must      */
1277 /* enabled NVRAM access.                                                    */
1278 /*                                                                          */
1279 /* Returns:                                                                 */
1280 /*   Nothing.                                                               */
1281 /****************************************************************************/
1282 static void
1283 bce_enable_nvram_access(struct bce_softc *sc)
1284 {
1285 	u32 val;
1286 
1287 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1288 
1289 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1290 	/* Enable both bits, even on read. */
1291 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1292 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1293 }
1294 
1295 
1296 /****************************************************************************/
1297 /* Disable NVRAM access.                                                    */
1298 /*                                                                          */
1299 /* When the caller is finished accessing NVRAM access must be disabled.     */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   Nothing.                                                               */
1303 /****************************************************************************/
1304 static void
1305 bce_disable_nvram_access(struct bce_softc *sc)
1306 {
1307 	u32 val;
1308 
1309 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1310 
1311 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1312 
1313 	/* Disable both bits, even after read. */
1314 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1315 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1316 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1317 }
1318 
1319 
1320 #ifdef BCE_NVRAM_WRITE_SUPPORT
1321 /****************************************************************************/
1322 /* Erase NVRAM page before writing.                                         */
1323 /*                                                                          */
1324 /* Non-buffered flash parts require that a page be erased before it is      */
1325 /* written.                                                                 */
1326 /*                                                                          */
1327 /* Returns:                                                                 */
1328 /*   0 on success, positive value on failure.                               */
1329 /****************************************************************************/
1330 static int
1331 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1332 {
1333 	u32 cmd;
1334 	int j;
1335 
1336 	/* Buffered flash doesn't require an erase. */
1337 	if (sc->bce_flash_info->buffered)
1338 		return 0;
1339 
1340 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1341 
1342 	/* Build an erase command. */
1343 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1344 	      BCE_NVM_COMMAND_DOIT;
1345 
1346 	/*
1347 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1348 	 * and issue the erase command.
1349 	 */
1350 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1351 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1352 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1353 
1354 	/* Wait for completion. */
1355 	 */
1356 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1357 		u32 val;
1358 
1359 		DELAY(5);
1360 
1361 		val = REG_RD(sc, BCE_NVM_COMMAND);
1362 		if (val & BCE_NVM_COMMAND_DONE)
1363 			break;
1364 	}
1365 
1366 	if (j >= NVRAM_TIMEOUT_COUNT) {
1367 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1368 		return EBUSY;
1369 	}
1370 
1371 	return 0;
1372 }
1373 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1374 
1375 
1376 /****************************************************************************/
1377 /* Read a dword (32 bits) from NVRAM.                                       */
1378 /*                                                                          */
1379 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1380 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1381 /*                                                                          */
1382 /* Returns:                                                                 */
1383 /*   0 on success and the 32 bit value read, positive value on failure.     */
1384 /****************************************************************************/
1385 static int
1386 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1387 							u32 cmd_flags)
1388 {
1389 	u32 cmd;
1390 	int i, rc = 0;
1391 
1392 	/* Build the command word. */
1393 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1394 
1395 	/* Calculate the offset for buffered flash. */
1396 	if (sc->bce_flash_info->buffered) {
1397 		offset = ((offset / sc->bce_flash_info->page_size) <<
1398 			   sc->bce_flash_info->page_bits) +
1399 			  (offset % sc->bce_flash_info->page_size);
1400 	}
1401 
1402 	/*
1403 	 * Clear the DONE bit separately, set the address to read,
1404 	 * and issue the read.
1405 	 */
1406 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1407 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1408 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1409 
1410 	/* Wait for completion. */
1411 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1412 		u32 val;
1413 
1414 		DELAY(5);
1415 
1416 		val = REG_RD(sc, BCE_NVM_COMMAND);
1417 		if (val & BCE_NVM_COMMAND_DONE) {
1418 			val = REG_RD(sc, BCE_NVM_READ);
1419 
1420 			val = bce_be32toh(val);
1421 			memcpy(ret_val, &val, 4);
1422 			break;
1423 		}
1424 	}
1425 
1426 	/* Check for errors. */
1427 	if (i >= NVRAM_TIMEOUT_COUNT) {
1428 		BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1429 			__FILE__, __LINE__, offset);
1430 		rc = EBUSY;
1431 	}
1432 
1433 	return(rc);
1434 }
1435 
1436 
1437 #ifdef BCE_NVRAM_WRITE_SUPPORT
1438 /****************************************************************************/
1439 /* Write a dword (32 bits) to NVRAM.                                        */
1440 /*                                                                          */
1441 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1442 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1443 /* enabled NVRAM write access.                                              */
1444 /*                                                                          */
1445 /* Returns:                                                                 */
1446 /*   0 on success, positive value on failure.                               */
1447 /****************************************************************************/
1448 static int
1449 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1450 	u32 cmd_flags)
1451 {
1452 	u32 cmd, val32;
1453 	int j;
1454 
1455 	/* Build the command word. */
1456 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1457 
1458 	/* Calculate the offset for buffered flash. */
1459 	if (sc->bce_flash_info->buffered) {
1460 		offset = ((offset / sc->bce_flash_info->page_size) <<
1461 			  sc->bce_flash_info->page_bits) +
1462 			 (offset % sc->bce_flash_info->page_size);
1463 	}
1464 
1465 	/*
1466 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1467 	 * set the NVRAM address to write, and issue the write command
1468 	 */
1469 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1470 	memcpy(&val32, val, 4);
1471 	val32 = htobe32(val32);
1472 	REG_WR(sc, BCE_NVM_WRITE, val32);
1473 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1474 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1475 
1476 	/* Wait for completion. */
1477 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1478 		DELAY(5);
1479 
1480 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1481 			break;
1482 	}
1483 	if (j >= NVRAM_TIMEOUT_COUNT) {
1484 		BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1485 			__FILE__, __LINE__, offset);
1486 		return EBUSY;
1487 	}
1488 
1489 	return 0;
1490 }
1491 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1492 
1493 
1494 /****************************************************************************/
1495 /* Initialize NVRAM access.                                                 */
1496 /*                                                                          */
1497 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1498 /* access that device.                                                      */
1499 /*                                                                          */
1500 /* Returns:                                                                 */
1501 /*   0 on success, positive value on failure.                               */
1502 /****************************************************************************/
1503 static int
1504 bce_init_nvram(struct bce_softc *sc)
1505 {
1506 	u32 val;
1507 	int j, entry_count, rc;
1508 	struct flash_spec *flash;
1509 
1510 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1511 
1512 	/* Determine the selected interface. */
1513 	val = REG_RD(sc, BCE_NVM_CFG1);
1514 
1515 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1516 
1517 	rc = 0;
1518 
1519 	/*
1520 	 * Flash reconfiguration is required to support additional
1521 	 * NVRAM devices not directly supported in hardware.
1522 	 * Check if the flash interface was reconfigured
1523 	 * by the bootcode.
1524 	 */
1525 
1526 	if (val & 0x40000000) {
1527 		/* Flash interface reconfigured by bootcode. */
1528 
1529 		DBPRINT(sc,BCE_INFO_LOAD,
1530 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1531 
1532 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1533 		     j++, flash++) {
1534 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1535 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1536 				sc->bce_flash_info = flash;
1537 				break;
1538 			}
1539 		}
1540 	} else {
1541 		/* Flash interface not yet reconfigured. */
1542 		u32 mask;
1543 
1544 		DBPRINT(sc,BCE_INFO_LOAD,
1545 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1546 
1547 		if (val & (1 << 23))
1548 			mask = FLASH_BACKUP_STRAP_MASK;
1549 		else
1550 			mask = FLASH_STRAP_MASK;
1551 
1552 		/* Look for the matching NVRAM device configuration data. */
1553 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1554 
1555 			/* Check if the device matches any of the known devices. */
1556 			if ((val & mask) == (flash->strapping & mask)) {
1557 				/* Found a device match. */
1558 				sc->bce_flash_info = flash;
1559 
1560 				/* Request access to the flash interface. */
1561 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1562 					return rc;
1563 
1564 				/* Reconfigure the flash interface. */
1565 				bce_enable_nvram_access(sc);
1566 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1567 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1568 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1569 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1570 				bce_disable_nvram_access(sc);
1571 				bce_release_nvram_lock(sc);
1572 
1573 				break;
1574 			}
1575 		}
1576 	}
1577 
1578 	/* Check if a matching device was found. */
1579 	if (j == entry_count) {
1580 		sc->bce_flash_info = NULL;
1581 		BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1582 			__FILE__, __LINE__);
1583 		rc = ENODEV;
1584 	}
1585 
1586 	/* Write the flash config data to the shared memory interface. */
1587 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1588 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1589 	if (val)
1590 		sc->bce_flash_size = val;
1591 	else
1592 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1593 
1594 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1595 		sc->bce_flash_info->total_size);
1596 
1597 	DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1598 
1599 	return rc;
1600 }
1601 
1602 
1603 /****************************************************************************/
1604 /* Read an arbitrary range of data from NVRAM.                              */
1605 /*                                                                          */
1606 /* Prepares the NVRAM interface for access and reads the requested data     */
1607 /* into the supplied buffer.                                                */
1608 /*                                                                          */
1609 /* Returns:                                                                 */
1610 /*   0 on success and the data read, positive value on failure.             */
1611 /****************************************************************************/
1612 static int
1613 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1614 	int buf_size)
1615 {
1616 	int rc = 0;
1617 	u32 cmd_flags, offset32, len32, extra;
1618 
1619 	if (buf_size == 0)
1620 		return 0;
1621 
1622 	/* Request access to the flash interface. */
1623 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1624 		return rc;
1625 
1626 	/* Enable access to flash interface */
1627 	bce_enable_nvram_access(sc);
1628 
1629 	len32 = buf_size;
1630 	offset32 = offset;
1631 	extra = 0;
1632 
1633 	cmd_flags = 0;
1634 
1635 	if (offset32 & 3) {
1636 		u8 buf[4];
1637 		u32 pre_len;
1638 
1639 		offset32 &= ~3;
1640 		pre_len = 4 - (offset & 3);
1641 
1642 		if (pre_len >= len32) {
1643 			pre_len = len32;
1644 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1645 		}
1646 		else {
1647 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1648 		}
1649 
1650 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1651 
1652 		if (rc)
1653 			return rc;
1654 
1655 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1656 
1657 		offset32 += 4;
1658 		ret_buf += pre_len;
1659 		len32 -= pre_len;
1660 	}
1661 
1662 	if (len32 & 3) {
1663 		extra = 4 - (len32 & 3);
1664 		len32 = (len32 + 4) & ~3;
1665 	}
1666 
1667 	if (len32 == 4) {
1668 		u8 buf[4];
1669 
1670 		if (cmd_flags)
1671 			cmd_flags = BCE_NVM_COMMAND_LAST;
1672 		else
1673 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1674 				    BCE_NVM_COMMAND_LAST;
1675 
1676 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1677 
1678 		memcpy(ret_buf, buf, 4 - extra);
1679 	}
1680 	else if (len32 > 0) {
1681 		u8 buf[4];
1682 
1683 		/* Read the first word. */
1684 		if (cmd_flags)
1685 			cmd_flags = 0;
1686 		else
1687 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1688 
1689 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1690 
1691 		/* Advance to the next dword. */
1692 		offset32 += 4;
1693 		ret_buf += 4;
1694 		len32 -= 4;
1695 
1696 		while (len32 > 4 && rc == 0) {
1697 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1698 
1699 			/* Advance to the next dword. */
1700 			offset32 += 4;
1701 			ret_buf += 4;
1702 			len32 -= 4;
1703 		}
1704 
1705 		if (rc)
1706 			return rc;
1707 
1708 		cmd_flags = BCE_NVM_COMMAND_LAST;
1709 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1710 
1711 		memcpy(ret_buf, buf, 4 - extra);
1712 	}
1713 
1714 	/* Disable access to flash interface and release the lock. */
1715 	bce_disable_nvram_access(sc);
1716 	bce_release_nvram_lock(sc);
1717 
1718 	return rc;
1719 }
1720 
1721 
1722 #ifdef BCE_NVRAM_WRITE_SUPPORT
1723 /****************************************************************************/
1724 /* Write an arbitrary range of data from NVRAM.                             */
1725 /*                                                                          */
1726 /* Prepares the NVRAM interface for write access and writes the requested   */
1727 /* data from the supplied buffer.  The caller is responsible for            */
1728 /* calculating any appropriate CRCs.                                        */
1729 /*                                                                          */
1730 /* Returns:                                                                 */
1731 /*   0 on success, positive value on failure.                               */
1732 /****************************************************************************/
1733 static int
1734 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1735 	int buf_size)
1736 {
1737 	u32 written, offset32, len32;
1738 	u8 *buf, start[4], end[4];
1739 	int rc = 0;
1740 	int align_start, align_end;
1741 
1742 	buf = data_buf;
1743 	offset32 = offset;
1744 	len32 = buf_size;
1745 	align_start = align_end = 0;
1746 
1747 	if ((align_start = (offset32 & 3))) {
1748 		offset32 &= ~3;
1749 		len32 += align_start;
1750 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1751 			return rc;
1752 	}
1753 
1754 	if (len32 & 3) {
1755 	       	if ((len32 > 4) || !align_start) {
1756 			align_end = 4 - (len32 & 3);
1757 			len32 += align_end;
1758 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1759 				end, 4))) {
1760 				return rc;
1761 			}
1762 		}
1763 	}
1764 
1765 	if (align_start || align_end) {
1766 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1767 		if (buf == 0)
1768 			return ENOMEM;
1769 		if (align_start) {
1770 			memcpy(buf, start, 4);
1771 		}
1772 		if (align_end) {
1773 			memcpy(buf + len32 - 4, end, 4);
1774 		}
1775 		memcpy(buf + align_start, data_buf, buf_size);
1776 	}
1777 
1778 	written = 0;
1779 	while ((written < len32) && (rc == 0)) {
1780 		u32 page_start, page_end, data_start, data_end;
1781 		u32 addr, cmd_flags;
1782 		int i;
1783 		u8 flash_buffer[264];
1784 
1785 	    /* Find the page_start addr */
1786 		page_start = offset32 + written;
1787 		page_start -= (page_start % sc->bce_flash_info->page_size);
1788 		/* Find the page_end addr */
1789 		page_end = page_start + sc->bce_flash_info->page_size;
1790 		/* Find the data_start addr */
1791 		data_start = (written == 0) ? offset32 : page_start;
1792 		/* Find the data_end addr */
1793 		data_end = (page_end > offset32 + len32) ?
1794 			(offset32 + len32) : page_end;
1795 
1796 		/* Request access to the flash interface. */
1797 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1798 			goto nvram_write_end;
1799 
1800 		/* Enable access to flash interface */
1801 		bce_enable_nvram_access(sc);
1802 
1803 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1804 		if (sc->bce_flash_info->buffered == 0) {
1805 			int j;
1806 
1807 			/* Read the whole page into the buffer
1808 			 * (non-buffer flash only) */
1809 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1810 				if (j == (sc->bce_flash_info->page_size - 4)) {
1811 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1812 				}
1813 				rc = bce_nvram_read_dword(sc,
1814 					page_start + j,
1815 					&flash_buffer[j],
1816 					cmd_flags);
1817 
1818 				if (rc)
1819 					goto nvram_write_end;
1820 
1821 				cmd_flags = 0;
1822 			}
1823 		}
1824 
1825 		/* Enable writes to flash interface (unlock write-protect) */
1826 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1827 			goto nvram_write_end;
1828 
1829 		/* Erase the page */
1830 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1831 			goto nvram_write_end;
1832 
1833 		/* Re-enable the write again for the actual write */
1834 		bce_enable_nvram_write(sc);
1835 
1836 		/* Loop to write back the buffer data from page_start to
1837 		 * data_start */
1838 		i = 0;
1839 		if (sc->bce_flash_info->buffered == 0) {
1840 			for (addr = page_start; addr < data_start;
1841 				addr += 4, i += 4) {
1842 
1843 				rc = bce_nvram_write_dword(sc, addr,
1844 					&flash_buffer[i], cmd_flags);
1845 
1846 				if (rc != 0)
1847 					goto nvram_write_end;
1848 
1849 				cmd_flags = 0;
1850 			}
1851 		}
1852 
1853 		/* Loop to write the new data from data_start to data_end */
1854 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1855 			if ((addr == page_end - 4) ||
1856 				((sc->bce_flash_info->buffered) &&
1857 				 (addr == data_end - 4))) {
1858 
1859 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1860 			}
1861 			rc = bce_nvram_write_dword(sc, addr, buf,
1862 				cmd_flags);
1863 
1864 			if (rc != 0)
1865 				goto nvram_write_end;
1866 
1867 			cmd_flags = 0;
1868 			buf += 4;
1869 		}
1870 
1871 		/* Loop to write back the buffer data from data_end
1872 		 * to page_end */
1873 		if (sc->bce_flash_info->buffered == 0) {
1874 			for (addr = data_end; addr < page_end;
1875 				addr += 4, i += 4) {
1876 
1877 				if (addr == page_end-4) {
1878 					cmd_flags = BCE_NVM_COMMAND_LAST;
1879                 		}
1880 				rc = bce_nvram_write_dword(sc, addr,
1881 					&flash_buffer[i], cmd_flags);
1882 
1883 				if (rc != 0)
1884 					goto nvram_write_end;
1885 
1886 				cmd_flags = 0;
1887 			}
1888 		}
1889 
1890 		/* Disable writes to flash interface (lock write-protect) */
1891 		bce_disable_nvram_write(sc);
1892 
1893 		/* Disable access to flash interface */
1894 		bce_disable_nvram_access(sc);
1895 		bce_release_nvram_lock(sc);
1896 
1897 		/* Increment written */
1898 		written += data_end - data_start;
1899 	}
1900 
1901 nvram_write_end:
1902 	if (align_start || align_end)
1903 		free(buf, M_DEVBUF);
1904 
1905 	return rc;
1906 }
1907 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1908 
1909 
1910 /****************************************************************************/
1911 /* Verifies that NVRAM is accessible and contains valid data.               */
1912 /*                                                                          */
1913 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1914 /* correct.                                                                 */
1915 /*                                                                          */
1916 /* Returns:                                                                 */
1917 /*   0 on success, positive value on failure.                               */
1918 /****************************************************************************/
1919 static int
1920 bce_nvram_test(struct bce_softc *sc)
1921 {
1922 	u32 buf[BCE_NVRAM_SIZE / 4];
1923 	u8 *data = (u8 *) buf;
1924 	int rc = 0;
1925 	u32 magic, csum;
1926 
1927 
1928 	/*
1929 	 * Check that the device NVRAM is valid by reading
1930 	 * the magic value at offset 0.
1931 	 */
1932 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1933 		goto bce_nvram_test_done;
1934 
1935 
1936     magic = bce_be32toh(buf[0]);
1937 	if (magic != BCE_NVRAM_MAGIC) {
1938 		rc = ENODEV;
1939 		BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1940 			"Found: 0x%08X\n",
1941 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1942 		goto bce_nvram_test_done;
1943 	}
1944 
1945 	/*
1946 	 * Verify that the device NVRAM includes valid
1947 	 * configuration data.
1948 	 */
1949 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1950 		goto bce_nvram_test_done;
1951 
1952 	csum = ether_crc32_le(data, 0x100);
1953 	if (csum != BCE_CRC32_RESIDUAL) {
1954 		rc = ENODEV;
1955 		BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1956 			"Expected: 0x%08X, Found: 0x%08X\n",
1957 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1958 		goto bce_nvram_test_done;
1959 	}
1960 
1961 	csum = ether_crc32_le(data + 0x100, 0x100);
1962 	if (csum != BCE_CRC32_RESIDUAL) {
1963 		BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1964 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1965 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1966 		rc = ENODEV;
1967 	}
1968 
1969 bce_nvram_test_done:
1970 	return rc;
1971 }
1972 
1973 
1974 /****************************************************************************/
1975 /* Free any DMA memory owned by the driver.                                 */
1976 /*                                                                          */
1977 /* Scans through each data structre that requires DMA memory and frees      */
1978 /* the memory if allocated.                                                 */
1979 /*                                                                          */
1980 /* Returns:                                                                 */
1981 /*   Nothing.                                                               */
1982 /****************************************************************************/
1983 static void
1984 bce_dma_free(struct bce_softc *sc)
1985 {
1986 	int i;
1987 
1988 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1989 
1990 	/* Destroy the status block. */
1991 	if (sc->status_block != NULL)
1992 		bus_dmamem_free(
1993 			sc->status_tag,
1994 		    sc->status_block,
1995 		    sc->status_map);
1996 
1997 	if (sc->status_map != NULL) {
1998 		bus_dmamap_unload(
1999 			sc->status_tag,
2000 		    sc->status_map);
2001 		bus_dmamap_destroy(sc->status_tag,
2002 		    sc->status_map);
2003 	}
2004 
2005 	if (sc->status_tag != NULL)
2006 		bus_dma_tag_destroy(sc->status_tag);
2007 
2008 
2009 	/* Destroy the statistics block. */
2010 	if (sc->stats_block != NULL)
2011 		bus_dmamem_free(
2012 			sc->stats_tag,
2013 		    sc->stats_block,
2014 		    sc->stats_map);
2015 
2016 	if (sc->stats_map != NULL) {
2017 		bus_dmamap_unload(
2018 			sc->stats_tag,
2019 		    sc->stats_map);
2020 		bus_dmamap_destroy(sc->stats_tag,
2021 		    sc->stats_map);
2022 	}
2023 
2024 	if (sc->stats_tag != NULL)
2025 		bus_dma_tag_destroy(sc->stats_tag);
2026 
2027 
2028 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2029 	for (i = 0; i < TX_PAGES; i++ ) {
2030 		if (sc->tx_bd_chain[i] != NULL)
2031 			bus_dmamem_free(
2032 				sc->tx_bd_chain_tag,
2033 			    sc->tx_bd_chain[i],
2034 			    sc->tx_bd_chain_map[i]);
2035 
2036 		if (sc->tx_bd_chain_map[i] != NULL) {
2037 			bus_dmamap_unload(
2038 				sc->tx_bd_chain_tag,
2039 		    	sc->tx_bd_chain_map[i]);
2040 			bus_dmamap_destroy(
2041 				sc->tx_bd_chain_tag,
2042 			    sc->tx_bd_chain_map[i]);
2043 		}
2044 
2045 	}
2046 
2047 	/* Destroy the TX buffer descriptor tag. */
2048 	if (sc->tx_bd_chain_tag != NULL)
2049 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2050 
2051 
2052 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2053 	for (i = 0; i < RX_PAGES; i++ ) {
2054 		if (sc->rx_bd_chain[i] != NULL)
2055 			bus_dmamem_free(
2056 				sc->rx_bd_chain_tag,
2057 			    sc->rx_bd_chain[i],
2058 			    sc->rx_bd_chain_map[i]);
2059 
2060 		if (sc->rx_bd_chain_map[i] != NULL) {
2061 			bus_dmamap_unload(
2062 				sc->rx_bd_chain_tag,
2063 		    	sc->rx_bd_chain_map[i]);
2064 			bus_dmamap_destroy(
2065 				sc->rx_bd_chain_tag,
2066 			    sc->rx_bd_chain_map[i]);
2067 		}
2068 	}
2069 
2070 	/* Destroy the RX buffer descriptor tag. */
2071 	if (sc->rx_bd_chain_tag != NULL)
2072 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2073 
2074 
2075 	/* Unload and destroy the TX mbuf maps. */
2076 	for (i = 0; i < TOTAL_TX_BD; i++) {
2077 		if (sc->tx_mbuf_map[i] != NULL) {
2078 			bus_dmamap_unload(sc->tx_mbuf_tag,
2079 				sc->tx_mbuf_map[i]);
2080 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2081 	 			sc->tx_mbuf_map[i]);
2082 		}
2083 	}
2084 
2085 	/* Destroy the TX mbuf tag. */
2086 	if (sc->tx_mbuf_tag != NULL)
2087 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2088 
2089 
2090 	/* Unload and destroy the RX mbuf maps. */
2091 	for (i = 0; i < TOTAL_RX_BD; i++) {
2092 		if (sc->rx_mbuf_map[i] != NULL) {
2093 			bus_dmamap_unload(sc->rx_mbuf_tag,
2094 				sc->rx_mbuf_map[i]);
2095 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2096 	 			sc->rx_mbuf_map[i]);
2097 		}
2098 	}
2099 
2100 	/* Destroy the RX mbuf tag. */
2101 	if (sc->rx_mbuf_tag != NULL)
2102 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2103 
2104 
2105 	/* Destroy the parent tag */
2106 	if (sc->parent_tag != NULL)
2107 		bus_dma_tag_destroy(sc->parent_tag);
2108 
2109 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2110 
2111 }
2112 
2113 
2114 /****************************************************************************/
2115 /* Get DMA memory from the OS.                                              */
2116 /*                                                                          */
2117 /* Validates that the OS has provided DMA buffers in response to a          */
2118 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2119 /* When the callback is used the OS will return 0 for the mapping function  */
2120 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2121 /* failures back to the caller.                                             */
2122 /*                                                                          */
2123 /* Returns:                                                                 */
2124 /*   Nothing.                                                               */
2125 /****************************************************************************/
2126 static void
2127 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2128 {
2129 	struct bce_dmamap_arg *map_arg = arg;
2130 	struct bce_softc *sc = map_arg->sc;
2131 
2132 	/* Simulate a mapping failure. */
2133 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2134 		BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2135 			__FILE__, __LINE__);
2136 		error = ENOMEM);
2137 
2138 	/* Check for an error and signal the caller that an error occurred. */
2139 	if (error || (nseg > map_arg->maxsegs)) {
2140 		BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2141 		"nseg = %d, maxsegs = %d\n",
2142 			__FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2143 		map_arg->maxsegs = 0;
2144 		goto bce_dma_map_addr_exit;
2145 	}
2146 
2147 	map_arg->busaddr = segs->ds_addr;
2148 
2149 bce_dma_map_addr_exit:
2150 	return;
2151 }
2152 
2153 
2154 /****************************************************************************/
2155 /* Map TX buffers into TX buffer descriptors.                               */
2156 /*                                                                          */
2157 /* Given a series of DMA memory containting an outgoing frame, map the      */
2158 /* segments into the tx_bd structure used by the hardware.                  */
2159 /*                                                                          */
2160 /* Returns:                                                                 */
2161 /*   Nothing.                                                               */
2162 /****************************************************************************/
2163 static void
2164 bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2165 	int nseg, bus_size_t mapsize, int error)
2166 {
2167 	struct bce_dmamap_arg *map_arg;
2168 	struct bce_softc *sc;
2169 	struct tx_bd *txbd = NULL;
2170 	int i = 0;
2171 	u16 prod, chain_prod;
2172 	u32	prod_bseq;
2173 #ifdef BCE_DEBUG
2174 	u16 debug_prod;
2175 #endif
2176 
2177 	map_arg = arg;
2178 	sc = map_arg->sc;
2179 
2180 	if (error) {
2181 		DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2182 			__FUNCTION__, error);
2183 		return;
2184 	}
2185 
2186 	/* Signal error to caller if there's too many segments */
2187 	if (nseg > map_arg->maxsegs) {
2188 		DBPRINT(sc, BCE_WARN,
2189 			"%s(): Mapped TX descriptors: max segs = %d, "
2190 			"actual segs = %d\n",
2191 			__FUNCTION__, map_arg->maxsegs, nseg);
2192 
2193 		map_arg->maxsegs = 0;
2194 		return;
2195 	}
2196 
2197 	/* prod points to an empty tx_bd at this point. */
2198 	prod       = map_arg->prod;
2199 	chain_prod = map_arg->chain_prod;
2200 	prod_bseq  = map_arg->prod_bseq;
2201 
2202 #ifdef BCE_DEBUG
2203 	debug_prod = chain_prod;
2204 #endif
2205 
2206 	DBPRINT(sc, BCE_INFO_SEND,
2207 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2208 		"prod_bseq = 0x%08X\n",
2209 		__FUNCTION__, prod, chain_prod, prod_bseq);
2210 
2211 	/*
2212 	 * Cycle through each mbuf segment that makes up
2213 	 * the outgoing frame, gathering the mapping info
2214 	 * for that segment and creating a tx_bd to for
2215 	 * the mbuf.
2216 	 */
2217 
2218 	txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2219 
2220 	/* Setup the first tx_bd for the first segment. */
2221 	txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2222 	txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2223 	txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2224 	txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2225 			TX_BD_FLAGS_START);
2226 	prod_bseq += segs[i].ds_len;
2227 
2228 	/* Setup any remaing segments. */
2229 	for (i = 1; i < nseg; i++) {
2230 		prod       = NEXT_TX_BD(prod);
2231 		chain_prod = TX_CHAIN_IDX(prod);
2232 
2233 		txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2234 
2235 		txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2236 		txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2237 		txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2238 		txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2239 
2240 		prod_bseq += segs[i].ds_len;
2241 	}
2242 
2243 	/* Set the END flag on the last TX buffer descriptor. */
2244 	txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2245 
2246 	DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2247 
2248 	DBPRINT(sc, BCE_INFO_SEND,
2249 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
2250 		"prod_bseq = 0x%08X\n",
2251 		__FUNCTION__, prod, chain_prod, prod_bseq);
2252 
2253 	/* prod points to the last tx_bd at this point. */
2254 	map_arg->maxsegs    = nseg;
2255 	map_arg->prod       = prod;
2256 	map_arg->chain_prod = chain_prod;
2257 	map_arg->prod_bseq  = prod_bseq;
2258 }
2259 
2260 
2261 /****************************************************************************/
2262 /* Allocate any DMA memory needed by the driver.                            */
2263 /*                                                                          */
2264 /* Allocates DMA memory needed for the various global structures needed by  */
2265 /* hardware.                                                                */
2266 /*                                                                          */
2267 /* Returns:                                                                 */
2268 /*   0 for success, positive value for failure.                             */
2269 /****************************************************************************/
2270 static int
2271 bce_dma_alloc(device_t dev)
2272 {
2273 	struct bce_softc *sc;
2274 	int i, error, rc = 0;
2275 	struct bce_dmamap_arg map_arg;
2276 
2277 	sc = device_get_softc(dev);
2278 
2279 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2280 
2281 	/*
2282 	 * Allocate the parent bus DMA tag appropriate for PCI.
2283 	 */
2284 	if (bus_dma_tag_create(NULL,		/* parent     */
2285 			BCE_DMA_ALIGN,				/* alignment  */
2286 			BCE_DMA_BOUNDARY,			/* boundary   */
2287 			sc->max_bus_addr,			/* lowaddr    */
2288 			BUS_SPACE_MAXADDR,			/* highaddr   */
2289 			NULL, 						/* filterfunc */
2290 			NULL,						/* filterarg  */
2291 			MAXBSIZE, 					/* maxsize    */
2292 			BUS_SPACE_UNRESTRICTED,		/* nsegments  */
2293 			BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2294 			0,							/* flags      */
2295 			NULL, 						/* locfunc    */
2296 			NULL,						/* lockarg    */
2297 			&sc->parent_tag)) {
2298 		BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2299 			__FILE__, __LINE__);
2300 		rc = ENOMEM;
2301 		goto bce_dma_alloc_exit;
2302 	}
2303 
2304 	/*
2305 	 * Create a DMA tag for the status block, allocate and clear the
2306 	 * memory, map the memory into DMA space, and fetch the physical
2307 	 * address of the block.
2308 	 */
2309 	if (bus_dma_tag_create(
2310 			sc->parent_tag,			/* parent      */
2311 	    	BCE_DMA_ALIGN,			/* alignment   */
2312 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2313 	    	sc->max_bus_addr,		/* lowaddr     */
2314 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2315 	    	NULL, 					/* filterfunc  */
2316 	    	NULL, 					/* filterarg   */
2317 	    	BCE_STATUS_BLK_SZ, 		/* maxsize     */
2318 	    	1,						/* nsegments   */
2319 	    	BCE_STATUS_BLK_SZ, 		/* maxsegsize  */
2320 	    	0,						/* flags       */
2321 	    	NULL, 					/* lockfunc    */
2322 	    	NULL,					/* lockarg     */
2323 	    	&sc->status_tag)) {
2324 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2325 			__FILE__, __LINE__);
2326 		rc = ENOMEM;
2327 		goto bce_dma_alloc_exit;
2328 	}
2329 
2330 	if(bus_dmamem_alloc(
2331 			sc->status_tag,				/* dmat        */
2332 	    	(void **)&sc->status_block,	/* vaddr       */
2333 	    	BUS_DMA_NOWAIT,					/* flags       */
2334 	    	&sc->status_map)) {
2335 		BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2336 			__FILE__, __LINE__);
2337 		rc = ENOMEM;
2338 		goto bce_dma_alloc_exit;
2339 	}
2340 
2341 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2342 
2343 	map_arg.sc = sc;
2344 	map_arg.maxsegs = 1;
2345 
2346 	error = bus_dmamap_load(
2347 			sc->status_tag,	   		/* dmat        */
2348 	    	sc->status_map,	   		/* map         */
2349 	    	sc->status_block,	 	/* buf         */
2350 	    	BCE_STATUS_BLK_SZ,	 	/* buflen      */
2351 	    	bce_dma_map_addr, 	 	/* callback    */
2352 	    	&map_arg,			 	/* callbackarg */
2353 	    	BUS_DMA_NOWAIT);		/* flags       */
2354 
2355 	if(error || (map_arg.maxsegs == 0)) {
2356 		BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2357 			__FILE__, __LINE__);
2358 		rc = ENOMEM;
2359 		goto bce_dma_alloc_exit;
2360 	}
2361 
2362 	sc->status_block_paddr = map_arg.busaddr;
2363 	/* DRC - Fix for 64 bit addresses. */
2364 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2365 		(u32) sc->status_block_paddr);
2366 
2367 	/*
2368 	 * Create a DMA tag for the statistics block, allocate and clear the
2369 	 * memory, map the memory into DMA space, and fetch the physical
2370 	 * address of the block.
2371 	 */
2372 	if (bus_dma_tag_create(
2373 			sc->parent_tag,			/* parent      */
2374 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2375 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2376 	    	sc->max_bus_addr,		/* lowaddr     */
2377 	    	BUS_SPACE_MAXADDR,		/* highaddr    */
2378 	    	NULL,		 	  		/* filterfunc  */
2379 	    	NULL, 			  		/* filterarg   */
2380 	    	BCE_STATS_BLK_SZ, 		/* maxsize     */
2381 	    	1,				  		/* nsegments   */
2382 	    	BCE_STATS_BLK_SZ, 		/* maxsegsize  */
2383 	    	0, 				  		/* flags       */
2384 	    	NULL, 			  		/* lockfunc    */
2385 	    	NULL, 			  		/* lockarg     */
2386 	    	&sc->stats_tag)) {
2387 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2388 			__FILE__, __LINE__);
2389 		rc = ENOMEM;
2390 		goto bce_dma_alloc_exit;
2391 	}
2392 
2393 	if (bus_dmamem_alloc(
2394 			sc->stats_tag,				/* dmat        */
2395 	    	(void **)&sc->stats_block,	/* vaddr       */
2396 	    	BUS_DMA_NOWAIT,	 			/* flags       */
2397 	    	&sc->stats_map)) {
2398 		BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2399 			__FILE__, __LINE__);
2400 		rc = ENOMEM;
2401 		goto bce_dma_alloc_exit;
2402 	}
2403 
2404 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2405 
2406 	map_arg.sc = sc;
2407 	map_arg.maxsegs = 1;
2408 
2409 	error = bus_dmamap_load(
2410 			sc->stats_tag,	 	/* dmat        */
2411 	    	sc->stats_map,	 	/* map         */
2412 	    	sc->stats_block, 	/* buf         */
2413 	    	BCE_STATS_BLK_SZ,	/* buflen      */
2414 	    	bce_dma_map_addr,	/* callback    */
2415 	    	&map_arg, 		 	/* callbackarg */
2416 	    	BUS_DMA_NOWAIT);	/* flags       */
2417 
2418 	if(error || (map_arg.maxsegs == 0)) {
2419 		BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2420 			__FILE__, __LINE__);
2421 		rc = ENOMEM;
2422 		goto bce_dma_alloc_exit;
2423 	}
2424 
2425 	sc->stats_block_paddr = map_arg.busaddr;
2426 	/* DRC - Fix for 64 bit address. */
2427 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2428 		(u32) sc->stats_block_paddr);
2429 
2430 	/*
2431 	 * Create a DMA tag for the TX buffer descriptor chain,
2432 	 * allocate and clear the  memory, and fetch the
2433 	 * physical address of the block.
2434 	 */
2435 	if(bus_dma_tag_create(
2436 			sc->parent_tag,		  /* parent      */
2437 	    	BCM_PAGE_SIZE,		  /* alignment   */
2438 	    	BCE_DMA_BOUNDARY,	  /* boundary    */
2439 			sc->max_bus_addr,	  /* lowaddr     */
2440 			BUS_SPACE_MAXADDR, 	  /* highaddr    */
2441 			NULL, 				  /* filterfunc  */
2442 			NULL, 				  /* filterarg   */
2443 			BCE_TX_CHAIN_PAGE_SZ, /* maxsize     */
2444 			1,			  		  /* nsegments   */
2445 			BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize  */
2446 			0,				 	  /* flags       */
2447 			NULL, 				  /* lockfunc    */
2448 			NULL,				  /* lockarg     */
2449 			&sc->tx_bd_chain_tag)) {
2450 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2451 			__FILE__, __LINE__);
2452 		rc = ENOMEM;
2453 		goto bce_dma_alloc_exit;
2454 	}
2455 
2456 	for (i = 0; i < TX_PAGES; i++) {
2457 
2458 		if(bus_dmamem_alloc(
2459 				sc->tx_bd_chain_tag,			/* tag   */
2460 	    		(void **)&sc->tx_bd_chain[i],	/* vaddr */
2461 	    		BUS_DMA_NOWAIT,					/* flags */
2462 		    	&sc->tx_bd_chain_map[i])) {
2463 			BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2464 				"chain DMA memory!\n", __FILE__, __LINE__);
2465 			rc = ENOMEM;
2466 			goto bce_dma_alloc_exit;
2467 		}
2468 
2469 		map_arg.maxsegs = 1;
2470 		map_arg.sc = sc;
2471 
2472 		error = bus_dmamap_load(
2473 				sc->tx_bd_chain_tag,	 /* dmat        */
2474 	    		sc->tx_bd_chain_map[i],	 /* map         */
2475 	    		sc->tx_bd_chain[i],		 /* buf         */
2476 		    	BCE_TX_CHAIN_PAGE_SZ,  	 /* buflen      */
2477 		    	bce_dma_map_addr, 	   	 /* callback    */
2478 	    		&map_arg, 			   	 /* callbackarg */
2479 	    		BUS_DMA_NOWAIT);	   	 /* flags       */
2480 
2481 		if(error || (map_arg.maxsegs == 0)) {
2482 			BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2483 				__FILE__, __LINE__);
2484 			rc = ENOMEM;
2485 			goto bce_dma_alloc_exit;
2486 		}
2487 
2488 		sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2489 		/* DRC - Fix for 64 bit systems. */
2490 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2491 			i, (u32) sc->tx_bd_chain_paddr[i]);
2492 	}
2493 
2494 	/* Create a DMA tag for TX mbufs. */
2495 	if (bus_dma_tag_create(
2496 			sc->parent_tag,	 	 	/* parent      */
2497 	    	BCE_DMA_ALIGN,	 		/* alignment   */
2498 	    	BCE_DMA_BOUNDARY, 		/* boundary    */
2499 			sc->max_bus_addr,		/* lowaddr     */
2500 			BUS_SPACE_MAXADDR,		/* highaddr    */
2501 			NULL, 			  		/* filterfunc  */
2502 			NULL, 			  		/* filterarg   */
2503 			MCLBYTES * BCE_MAX_SEGMENTS,	/* maxsize     */
2504 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2505 			MCLBYTES,				/* maxsegsize  */
2506 			0,				 		/* flags       */
2507 			NULL, 			  		/* lockfunc    */
2508 			NULL,			  		/* lockarg     */
2509 	    	&sc->tx_mbuf_tag)) {
2510 		BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2511 			__FILE__, __LINE__);
2512 		rc = ENOMEM;
2513 		goto bce_dma_alloc_exit;
2514 	}
2515 
2516 	/* Create DMA maps for the TX mbufs clusters. */
2517 	for (i = 0; i < TOTAL_TX_BD; i++) {
2518 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2519 			&sc->tx_mbuf_map[i])) {
2520 			BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2521 				__FILE__, __LINE__);
2522 			rc = ENOMEM;
2523 			goto bce_dma_alloc_exit;
2524 		}
2525 	}
2526 
2527 	/*
2528 	 * Create a DMA tag for the RX buffer descriptor chain,
2529 	 * allocate and clear the  memory, and fetch the physical
2530 	 * address of the blocks.
2531 	 */
2532 	if (bus_dma_tag_create(
2533 			sc->parent_tag,			/* parent      */
2534 	    	BCM_PAGE_SIZE,			/* alignment   */
2535 	    	BCE_DMA_BOUNDARY,		/* boundary    */
2536 			BUS_SPACE_MAXADDR,		/* lowaddr     */
2537 			sc->max_bus_addr,		/* lowaddr     */
2538 			NULL,					/* filter      */
2539 			NULL, 					/* filterarg   */
2540 			BCE_RX_CHAIN_PAGE_SZ,	/* maxsize     */
2541 			1, 						/* nsegments   */
2542 			BCE_RX_CHAIN_PAGE_SZ,	/* maxsegsize  */
2543 			0,				 		/* flags       */
2544 			NULL,					/* lockfunc    */
2545 			NULL,					/* lockarg     */
2546 			&sc->rx_bd_chain_tag)) {
2547 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2548 			__FILE__, __LINE__);
2549 		rc = ENOMEM;
2550 		goto bce_dma_alloc_exit;
2551 	}
2552 
2553 	for (i = 0; i < RX_PAGES; i++) {
2554 
2555 		if (bus_dmamem_alloc(
2556 				sc->rx_bd_chain_tag,			/* tag   */
2557 	    		(void **)&sc->rx_bd_chain[i], 	/* vaddr */
2558 	    		BUS_DMA_NOWAIT,				  	/* flags */
2559 		    	&sc->rx_bd_chain_map[i])) {
2560 			BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2561 				"DMA memory!\n", __FILE__, __LINE__);
2562 			rc = ENOMEM;
2563 			goto bce_dma_alloc_exit;
2564 		}
2565 
2566 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2567 
2568 		map_arg.maxsegs = 1;
2569 		map_arg.sc = sc;
2570 
2571 		error = bus_dmamap_load(
2572 				sc->rx_bd_chain_tag,	/* dmat        */
2573 	    		sc->rx_bd_chain_map[i],	/* map         */
2574 	    		sc->rx_bd_chain[i],		/* buf         */
2575 		    	BCE_RX_CHAIN_PAGE_SZ,  	/* buflen      */
2576 		    	bce_dma_map_addr,	   	/* callback    */
2577 	    		&map_arg,			   	/* callbackarg */
2578 	    		BUS_DMA_NOWAIT);		/* flags       */
2579 
2580 		if(error || (map_arg.maxsegs == 0)) {
2581 			BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2582 				__FILE__, __LINE__);
2583 			rc = ENOMEM;
2584 			goto bce_dma_alloc_exit;
2585 		}
2586 
2587 		sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2588 		/* DRC - Fix for 64 bit systems. */
2589 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2590 			i, (u32) sc->rx_bd_chain_paddr[i]);
2591 	}
2592 
2593 	/*
2594 	 * Create a DMA tag for RX mbufs.
2595 	 */
2596 	if (bus_dma_tag_create(
2597 			sc->parent_tag,			/* parent      */
2598 	    	BCE_DMA_ALIGN,		  	/* alignment   */
2599 	    	BCE_DMA_BOUNDARY,	  	/* boundary    */
2600 			sc->max_bus_addr,	  	/* lowaddr     */
2601 			BUS_SPACE_MAXADDR, 	  	/* highaddr    */
2602 			NULL, 				  	/* filterfunc  */
2603 			NULL, 				  	/* filterarg   */
2604 			MJUM9BYTES,				/* maxsize     */
2605 			BCE_MAX_SEGMENTS,  		/* nsegments   */
2606 			MJUM9BYTES,				/* maxsegsize  */
2607 			0,				 	  	/* flags       */
2608 			NULL, 				  	/* lockfunc    */
2609 			NULL,				  	/* lockarg     */
2610 	    	&sc->rx_mbuf_tag)) {
2611 		BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2612 			__FILE__, __LINE__);
2613 		rc = ENOMEM;
2614 		goto bce_dma_alloc_exit;
2615 	}
2616 
2617 	/* Create DMA maps for the RX mbuf clusters. */
2618 	for (i = 0; i < TOTAL_RX_BD; i++) {
2619 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2620 				&sc->rx_mbuf_map[i])) {
2621 			BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2622 				__FILE__, __LINE__);
2623 			rc = ENOMEM;
2624 			goto bce_dma_alloc_exit;
2625 		}
2626 	}
2627 
2628 bce_dma_alloc_exit:
2629 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2630 
2631 	return(rc);
2632 }
2633 
2634 
2635 /****************************************************************************/
2636 /* Release all resources used by the driver.                                */
2637 /*                                                                          */
2638 /* Releases all resources acquired by the driver including interrupts,      */
2639 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2640 /*                                                                          */
2641 /* Returns:                                                                 */
2642 /*   Nothing.                                                               */
2643 /****************************************************************************/
2644 static void
2645 bce_release_resources(struct bce_softc *sc)
2646 {
2647 	device_t dev;
2648 
2649 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2650 
2651 	dev = sc->bce_dev;
2652 
2653 	bce_dma_free(sc);
2654 
2655 	if (sc->bce_intrhand != NULL)
2656 		bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2657 
2658 	if (sc->bce_irq != NULL)
2659 		bus_release_resource(dev,
2660 			SYS_RES_IRQ,
2661 			0,
2662 			sc->bce_irq);
2663 
2664 	if (sc->bce_res != NULL)
2665 		bus_release_resource(dev,
2666 			SYS_RES_MEMORY,
2667 		    PCIR_BAR(0),
2668 		    sc->bce_res);
2669 
2670 	if (sc->bce_ifp != NULL)
2671 		if_free(sc->bce_ifp);
2672 
2673 
2674 	if (mtx_initialized(&sc->bce_mtx))
2675 		BCE_LOCK_DESTROY(sc);
2676 
2677 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2678 
2679 }
2680 
2681 
2682 /****************************************************************************/
2683 /* Firmware synchronization.                                                */
2684 /*                                                                          */
2685 /* Before performing certain events such as a chip reset, synchronize with  */
2686 /* the firmware first.                                                      */
2687 /*                                                                          */
2688 /* Returns:                                                                 */
2689 /*   0 for success, positive value for failure.                             */
2690 /****************************************************************************/
2691 static int
2692 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2693 {
2694 	int i, rc = 0;
2695 	u32 val;
2696 
2697 	/* Don't waste any time if we've timed out before. */
2698 	if (sc->bce_fw_timed_out) {
2699 		rc = EBUSY;
2700 		goto bce_fw_sync_exit;
2701 	}
2702 
2703 	/* Increment the message sequence number. */
2704 	sc->bce_fw_wr_seq++;
2705 	msg_data |= sc->bce_fw_wr_seq;
2706 
2707  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2708 
2709 	/* Send the message to the bootcode driver mailbox. */
2710 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2711 
2712 	/* Wait for the bootcode to acknowledge the message. */
2713 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2714 		/* Check for a response in the bootcode firmware mailbox. */
2715 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2716 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2717 			break;
2718 		DELAY(1000);
2719 	}
2720 
2721 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2722 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2723 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2724 
2725 		BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2726 			"msg_data = 0x%08X\n",
2727 			__FILE__, __LINE__, msg_data);
2728 
2729 		msg_data &= ~BCE_DRV_MSG_CODE;
2730 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2731 
2732 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2733 
2734 		sc->bce_fw_timed_out = 1;
2735 		rc = EBUSY;
2736 	}
2737 
2738 bce_fw_sync_exit:
2739 	return (rc);
2740 }
2741 
2742 
2743 /****************************************************************************/
2744 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2745 /*                                                                          */
2746 /* Returns:                                                                 */
2747 /*   Nothing.                                                               */
2748 /****************************************************************************/
2749 static void
2750 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2751 	u32 rv2p_code_len, u32 rv2p_proc)
2752 {
2753 	int i;
2754 	u32 val;
2755 
2756 	for (i = 0; i < rv2p_code_len; i += 8) {
2757 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2758 		rv2p_code++;
2759 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2760 		rv2p_code++;
2761 
2762 		if (rv2p_proc == RV2P_PROC1) {
2763 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2764 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2765 		}
2766 		else {
2767 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2768 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2769 		}
2770 	}
2771 
2772 	/* Reset the processor, un-stall is done later. */
2773 	if (rv2p_proc == RV2P_PROC1) {
2774 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2775 	}
2776 	else {
2777 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2778 	}
2779 }
2780 
2781 
2782 /****************************************************************************/
2783 /* Load RISC processor firmware.                                            */
2784 /*                                                                          */
2785 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2786 /* associated with a particular processor.                                  */
2787 /*                                                                          */
2788 /* Returns:                                                                 */
2789 /*   Nothing.                                                               */
2790 /****************************************************************************/
2791 static void
2792 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2793 	struct fw_info *fw)
2794 {
2795 	u32 offset;
2796 	u32 val;
2797 
2798 	/* Halt the CPU. */
2799 	val = REG_RD_IND(sc, cpu_reg->mode);
2800 	val |= cpu_reg->mode_value_halt;
2801 	REG_WR_IND(sc, cpu_reg->mode, val);
2802 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2803 
2804 	/* Load the Text area. */
2805 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2806 	if (fw->text) {
2807 		int j;
2808 
2809 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2810 			REG_WR_IND(sc, offset, fw->text[j]);
2811 	        }
2812 	}
2813 
2814 	/* Load the Data area. */
2815 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2816 	if (fw->data) {
2817 		int j;
2818 
2819 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2820 			REG_WR_IND(sc, offset, fw->data[j]);
2821 		}
2822 	}
2823 
2824 	/* Load the SBSS area. */
2825 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2826 	if (fw->sbss) {
2827 		int j;
2828 
2829 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2830 			REG_WR_IND(sc, offset, fw->sbss[j]);
2831 		}
2832 	}
2833 
2834 	/* Load the BSS area. */
2835 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2836 	if (fw->bss) {
2837 		int j;
2838 
2839 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2840 			REG_WR_IND(sc, offset, fw->bss[j]);
2841 		}
2842 	}
2843 
2844 	/* Load the Read-Only area. */
2845 	offset = cpu_reg->spad_base +
2846 		(fw->rodata_addr - cpu_reg->mips_view_base);
2847 	if (fw->rodata) {
2848 		int j;
2849 
2850 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2851 			REG_WR_IND(sc, offset, fw->rodata[j]);
2852 		}
2853 	}
2854 
2855 	/* Clear the pre-fetch instruction. */
2856 	REG_WR_IND(sc, cpu_reg->inst, 0);
2857 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2858 
2859 	/* Start the CPU. */
2860 	val = REG_RD_IND(sc, cpu_reg->mode);
2861 	val &= ~cpu_reg->mode_value_halt;
2862 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2863 	REG_WR_IND(sc, cpu_reg->mode, val);
2864 }
2865 
2866 
2867 /****************************************************************************/
2868 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2869 /*                                                                          */
2870 /* Loads the firmware for each CPU and starts the CPU.                      */
2871 /*                                                                          */
2872 /* Returns:                                                                 */
2873 /*   Nothing.                                                               */
2874 /****************************************************************************/
2875 static void
2876 bce_init_cpus(struct bce_softc *sc)
2877 {
2878 	struct cpu_reg cpu_reg;
2879 	struct fw_info fw;
2880 
2881 	/* Initialize the RV2P processor. */
2882 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2883 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2884 
2885 	/* Initialize the RX Processor. */
2886 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2887 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2888 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2889 	cpu_reg.state = BCE_RXP_CPU_STATE;
2890 	cpu_reg.state_value_clear = 0xffffff;
2891 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2892 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2893 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2894 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2895 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2896 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2897 	cpu_reg.mips_view_base = 0x8000000;
2898 
2899 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2900 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2901 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2902 	fw.start_addr = bce_RXP_b06FwStartAddr;
2903 
2904 	fw.text_addr = bce_RXP_b06FwTextAddr;
2905 	fw.text_len = bce_RXP_b06FwTextLen;
2906 	fw.text_index = 0;
2907 	fw.text = bce_RXP_b06FwText;
2908 
2909 	fw.data_addr = bce_RXP_b06FwDataAddr;
2910 	fw.data_len = bce_RXP_b06FwDataLen;
2911 	fw.data_index = 0;
2912 	fw.data = bce_RXP_b06FwData;
2913 
2914 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2915 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2916 	fw.sbss_index = 0;
2917 	fw.sbss = bce_RXP_b06FwSbss;
2918 
2919 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2920 	fw.bss_len = bce_RXP_b06FwBssLen;
2921 	fw.bss_index = 0;
2922 	fw.bss = bce_RXP_b06FwBss;
2923 
2924 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2925 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2926 	fw.rodata_index = 0;
2927 	fw.rodata = bce_RXP_b06FwRodata;
2928 
2929 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2930 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2931 
2932 	/* Initialize the TX Processor. */
2933 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2934 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2935 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2936 	cpu_reg.state = BCE_TXP_CPU_STATE;
2937 	cpu_reg.state_value_clear = 0xffffff;
2938 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2939 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2940 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2941 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2942 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2943 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2944 	cpu_reg.mips_view_base = 0x8000000;
2945 
2946 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2947 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2948 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2949 	fw.start_addr = bce_TXP_b06FwStartAddr;
2950 
2951 	fw.text_addr = bce_TXP_b06FwTextAddr;
2952 	fw.text_len = bce_TXP_b06FwTextLen;
2953 	fw.text_index = 0;
2954 	fw.text = bce_TXP_b06FwText;
2955 
2956 	fw.data_addr = bce_TXP_b06FwDataAddr;
2957 	fw.data_len = bce_TXP_b06FwDataLen;
2958 	fw.data_index = 0;
2959 	fw.data = bce_TXP_b06FwData;
2960 
2961 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2962 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2963 	fw.sbss_index = 0;
2964 	fw.sbss = bce_TXP_b06FwSbss;
2965 
2966 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2967 	fw.bss_len = bce_TXP_b06FwBssLen;
2968 	fw.bss_index = 0;
2969 	fw.bss = bce_TXP_b06FwBss;
2970 
2971 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2972 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2973 	fw.rodata_index = 0;
2974 	fw.rodata = bce_TXP_b06FwRodata;
2975 
2976 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2977 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2978 
2979 	/* Initialize the TX Patch-up Processor. */
2980 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2981 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2982 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2983 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2984 	cpu_reg.state_value_clear = 0xffffff;
2985 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2986 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2987 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2988 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2989 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2990 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2991 	cpu_reg.mips_view_base = 0x8000000;
2992 
2993 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2994 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2995 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2996 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2997 
2998 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2999 	fw.text_len = bce_TPAT_b06FwTextLen;
3000 	fw.text_index = 0;
3001 	fw.text = bce_TPAT_b06FwText;
3002 
3003 	fw.data_addr = bce_TPAT_b06FwDataAddr;
3004 	fw.data_len = bce_TPAT_b06FwDataLen;
3005 	fw.data_index = 0;
3006 	fw.data = bce_TPAT_b06FwData;
3007 
3008 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3009 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3010 	fw.sbss_index = 0;
3011 	fw.sbss = bce_TPAT_b06FwSbss;
3012 
3013 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3014 	fw.bss_len = bce_TPAT_b06FwBssLen;
3015 	fw.bss_index = 0;
3016 	fw.bss = bce_TPAT_b06FwBss;
3017 
3018 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3019 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3020 	fw.rodata_index = 0;
3021 	fw.rodata = bce_TPAT_b06FwRodata;
3022 
3023 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3024 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3025 
3026 	/* Initialize the Completion Processor. */
3027 	cpu_reg.mode = BCE_COM_CPU_MODE;
3028 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3029 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3030 	cpu_reg.state = BCE_COM_CPU_STATE;
3031 	cpu_reg.state_value_clear = 0xffffff;
3032 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3033 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3034 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3035 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3036 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3037 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3038 	cpu_reg.mips_view_base = 0x8000000;
3039 
3040 	fw.ver_major = bce_COM_b06FwReleaseMajor;
3041 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3042 	fw.ver_fix = bce_COM_b06FwReleaseFix;
3043 	fw.start_addr = bce_COM_b06FwStartAddr;
3044 
3045 	fw.text_addr = bce_COM_b06FwTextAddr;
3046 	fw.text_len = bce_COM_b06FwTextLen;
3047 	fw.text_index = 0;
3048 	fw.text = bce_COM_b06FwText;
3049 
3050 	fw.data_addr = bce_COM_b06FwDataAddr;
3051 	fw.data_len = bce_COM_b06FwDataLen;
3052 	fw.data_index = 0;
3053 	fw.data = bce_COM_b06FwData;
3054 
3055 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3056 	fw.sbss_len = bce_COM_b06FwSbssLen;
3057 	fw.sbss_index = 0;
3058 	fw.sbss = bce_COM_b06FwSbss;
3059 
3060 	fw.bss_addr = bce_COM_b06FwBssAddr;
3061 	fw.bss_len = bce_COM_b06FwBssLen;
3062 	fw.bss_index = 0;
3063 	fw.bss = bce_COM_b06FwBss;
3064 
3065 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3066 	fw.rodata_len = bce_COM_b06FwRodataLen;
3067 	fw.rodata_index = 0;
3068 	fw.rodata = bce_COM_b06FwRodata;
3069 
3070 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3071 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3072 }
3073 
3074 
3075 /****************************************************************************/
3076 /* Initialize context memory.                                               */
3077 /*                                                                          */
3078 /* Clears the memory associated with each Context ID (CID).                 */
3079 /*                                                                          */
3080 /* Returns:                                                                 */
3081 /*   Nothing.                                                               */
3082 /****************************************************************************/
3083 static void
3084 bce_init_context(struct bce_softc *sc)
3085 {
3086 	u32 vcid;
3087 
3088 	vcid = 96;
3089 	while (vcid) {
3090 		u32 vcid_addr, pcid_addr, offset;
3091 
3092 		vcid--;
3093 
3094    		vcid_addr = GET_CID_ADDR(vcid);
3095 		pcid_addr = vcid_addr;
3096 
3097 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3098 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3099 
3100 		/* Zero out the context. */
3101 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3102 			CTX_WR(sc, 0x00, offset, 0);
3103 		}
3104 
3105 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3106 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3107 	}
3108 }
3109 
3110 
3111 /****************************************************************************/
3112 /* Fetch the permanent MAC address of the controller.                       */
3113 /*                                                                          */
3114 /* Returns:                                                                 */
3115 /*   Nothing.                                                               */
3116 /****************************************************************************/
3117 static void
3118 bce_get_mac_addr(struct bce_softc *sc)
3119 {
3120 	u32 mac_lo = 0, mac_hi = 0;
3121 
3122 	/*
3123 	 * The NetXtreme II bootcode populates various NIC
3124 	 * power-on and runtime configuration items in a
3125 	 * shared memory area.  The factory configured MAC
3126 	 * address is available from both NVRAM and the
3127 	 * shared memory area so we'll read the value from
3128 	 * shared memory for speed.
3129 	 */
3130 
3131 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3132 		BCE_PORT_HW_CFG_MAC_UPPER);
3133 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3134 		BCE_PORT_HW_CFG_MAC_LOWER);
3135 
3136 	if ((mac_lo == 0) && (mac_hi == 0)) {
3137 		BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3138 			__FILE__, __LINE__);
3139 	} else {
3140 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3141 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3142 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3143 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3144 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3145 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3146 	}
3147 
3148 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3149 }
3150 
3151 
3152 /****************************************************************************/
3153 /* Program the MAC address.                                                 */
3154 /*                                                                          */
3155 /* Returns:                                                                 */
3156 /*   Nothing.                                                               */
3157 /****************************************************************************/
3158 static void
3159 bce_set_mac_addr(struct bce_softc *sc)
3160 {
3161 	u32 val;
3162 	u8 *mac_addr = sc->eaddr;
3163 
3164 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3165 
3166 	val = (mac_addr[0] << 8) | mac_addr[1];
3167 
3168 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3169 
3170 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3171 		(mac_addr[4] << 8) | mac_addr[5];
3172 
3173 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3174 }
3175 
3176 
3177 /****************************************************************************/
3178 /* Stop the controller.                                                     */
3179 /*                                                                          */
3180 /* Returns:                                                                 */
3181 /*   Nothing.                                                               */
3182 /****************************************************************************/
3183 static void
3184 bce_stop(struct bce_softc *sc)
3185 {
3186 	struct ifnet *ifp;
3187 	struct ifmedia_entry *ifm;
3188 	struct mii_data *mii = NULL;
3189 	int mtmp, itmp;
3190 
3191 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3192 
3193 	BCE_LOCK_ASSERT(sc);
3194 
3195 	ifp = sc->bce_ifp;
3196 
3197 	mii = device_get_softc(sc->bce_miibus);
3198 
3199 	callout_stop(&sc->bce_stat_ch);
3200 
3201 	/* Disable the transmit/receive blocks. */
3202 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3203 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3204 	DELAY(20);
3205 
3206 	bce_disable_intr(sc);
3207 
3208 	/* Tell firmware that the driver is going away. */
3209 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3210 
3211 	/* Free the RX lists. */
3212 	bce_free_rx_chain(sc);
3213 
3214 	/* Free TX buffers. */
3215 	bce_free_tx_chain(sc);
3216 
3217 	/*
3218 	 * Isolate/power down the PHY, but leave the media selection
3219 	 * unchanged so that things will be put back to normal when
3220 	 * we bring the interface back up.
3221 	 */
3222 
3223 	itmp = ifp->if_flags;
3224 	ifp->if_flags |= IFF_UP;
3225 	/*
3226 	 * If we are called from bce_detach(), mii is already NULL.
3227 	 */
3228 	if (mii != NULL) {
3229 		ifm = mii->mii_media.ifm_cur;
3230 		mtmp = ifm->ifm_media;
3231 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3232 		mii_mediachg(mii);
3233 		ifm->ifm_media = mtmp;
3234 	}
3235 
3236 	ifp->if_flags = itmp;
3237 	ifp->if_timer = 0;
3238 
3239 	sc->bce_link = 0;
3240 
3241 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3242 
3243 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3244 
3245 }
3246 
3247 
3248 static int
3249 bce_reset(struct bce_softc *sc, u32 reset_code)
3250 {
3251 	u32 val;
3252 	int i, rc = 0;
3253 
3254 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3255 
3256 	/* Wait for pending PCI transactions to complete. */
3257 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3258 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3259 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3260 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3261 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3262 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3263 	DELAY(5);
3264 
3265 	/* Assume bootcode is running. */
3266 	sc->bce_fw_timed_out = 0;
3267 
3268 	/* Give the firmware a chance to prepare for the reset. */
3269 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3270 	if (rc)
3271 		goto bce_reset_exit;
3272 
3273 	/* Set a firmware reminder that this is a soft reset. */
3274 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3275 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3276 
3277 	/* Dummy read to force the chip to complete all current transactions. */
3278 	val = REG_RD(sc, BCE_MISC_ID);
3279 
3280 	/* Chip reset. */
3281 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3282 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3283 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3284 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3285 
3286 	/* Allow up to 30us for reset to complete. */
3287 	for (i = 0; i < 10; i++) {
3288 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3289 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3290 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3291 			break;
3292 		}
3293 		DELAY(10);
3294 	}
3295 
3296 	/* Check that reset completed successfully. */
3297 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3298 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3299 		BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3300 			__FILE__, __LINE__);
3301 		rc = EBUSY;
3302 		goto bce_reset_exit;
3303 	}
3304 
3305 	/* Make sure byte swapping is properly configured. */
3306 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3307 	if (val != 0x01020304) {
3308 		BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3309 			__FILE__, __LINE__);
3310 		rc = ENODEV;
3311 		goto bce_reset_exit;
3312 	}
3313 
3314 	/* Just completed a reset, assume that firmware is running again. */
3315 	sc->bce_fw_timed_out = 0;
3316 
3317 	/* Wait for the firmware to finish its initialization. */
3318 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3319 	if (rc)
3320 		BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3321 			__FILE__, __LINE__);
3322 
3323 bce_reset_exit:
3324 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3325 
3326 	return (rc);
3327 }
3328 
3329 
3330 static int
3331 bce_chipinit(struct bce_softc *sc)
3332 {
3333 	u32 val;
3334 	int rc = 0;
3335 
3336 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3337 
3338 	/* Make sure the interrupt is not active. */
3339 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3340 
3341 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3342 	/* channels and PCI clock compensation delay.                      */
3343 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3344 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3345 #if BYTE_ORDER == BIG_ENDIAN
3346 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3347 #endif
3348 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3349 	      DMA_READ_CHANS << 12 |
3350 	      DMA_WRITE_CHANS << 16;
3351 
3352 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3353 
3354 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3355 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3356 
3357 	/*
3358 	 * This setting resolves a problem observed on certain Intel PCI
3359 	 * chipsets that cannot handle multiple outstanding DMA operations.
3360 	 * See errata E9_5706A1_65.
3361 	 */
3362 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3363 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3364 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3365 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3366 
3367 	REG_WR(sc, BCE_DMA_CONFIG, val);
3368 
3369 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3370 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3371 		u16 val;
3372 
3373 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3374 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3375 	}
3376 
3377 	/* Enable the RX_V2P and Context state machines before access. */
3378 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3379 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3380 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3381 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3382 
3383 	/* Initialize context mapping and zero out the quick contexts. */
3384 	bce_init_context(sc);
3385 
3386 	/* Initialize the on-boards CPUs */
3387 	bce_init_cpus(sc);
3388 
3389 	/* Prepare NVRAM for access. */
3390 	if (bce_init_nvram(sc)) {
3391 		rc = ENODEV;
3392 		goto bce_chipinit_exit;
3393 	}
3394 
3395 	/* Set the kernel bypass block size */
3396 	val = REG_RD(sc, BCE_MQ_CONFIG);
3397 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3398 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3399 	REG_WR(sc, BCE_MQ_CONFIG, val);
3400 
3401 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3402 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3403 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3404 
3405 	val = (BCM_PAGE_BITS - 8) << 24;
3406 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3407 
3408 	/* Configure page size. */
3409 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3410 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3411 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3412 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3413 
3414 bce_chipinit_exit:
3415 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3416 
3417 	return(rc);
3418 }
3419 
3420 
3421 /****************************************************************************/
3422 /* Initialize the controller in preparation to send/receive traffic.        */
3423 /*                                                                          */
3424 /* Returns:                                                                 */
3425 /*   0 for success, positive value for failure.                             */
3426 /****************************************************************************/
3427 static int
3428 bce_blockinit(struct bce_softc *sc)
3429 {
3430 	u32 reg, val;
3431 	int rc = 0;
3432 
3433 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3434 
3435 	/* Load the hardware default MAC address. */
3436 	bce_set_mac_addr(sc);
3437 
3438 	/* Set the Ethernet backoff seed value */
3439 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3440 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3441 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3442 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3443 
3444 	sc->last_status_idx = 0;
3445 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3446 
3447 	/* Set up link change interrupt generation. */
3448 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3449 
3450 	/* Program the physical address of the status block. */
3451 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3452 		BCE_ADDR_LO(sc->status_block_paddr));
3453 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3454 		BCE_ADDR_HI(sc->status_block_paddr));
3455 
3456 	/* Program the physical address of the statistics block. */
3457 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3458 		BCE_ADDR_LO(sc->stats_block_paddr));
3459 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3460 		BCE_ADDR_HI(sc->stats_block_paddr));
3461 
3462 	/* Program various host coalescing parameters. */
3463 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3464 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3465 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3466 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3467 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3468 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3469 	REG_WR(sc, BCE_HC_TX_TICKS,
3470 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3471 	REG_WR(sc, BCE_HC_RX_TICKS,
3472 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3473 	REG_WR(sc, BCE_HC_COM_TICKS,
3474 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3475 	REG_WR(sc, BCE_HC_CMD_TICKS,
3476 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3477 	REG_WR(sc, BCE_HC_STATS_TICKS,
3478 		(sc->bce_stats_ticks & 0xffff00));
3479 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3480 		0xbb8);  /* 3ms */
3481 	REG_WR(sc, BCE_HC_CONFIG,
3482 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3483 		BCE_HC_CONFIG_COLLECT_STATS));
3484 
3485 	/* Clear the internal statistics counters. */
3486 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3487 
3488 	/* Verify that bootcode is running. */
3489 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3490 
3491 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3492 		BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3493 			__FILE__, __LINE__);
3494 		reg = 0);
3495 
3496 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3497 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3498 		BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3499 			"Expected: 08%08X\n", __FILE__, __LINE__,
3500 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3501 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3502 		rc = ENODEV;
3503 		goto bce_blockinit_exit;
3504 	}
3505 
3506 	/* Check if any management firmware is running. */
3507 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3508 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3509 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3510 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3511 	}
3512 
3513 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3514 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3515 
3516 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3517 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3518 
3519 	/* Enable link state change interrupt generation. */
3520 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3521 
3522 	/* Enable all remaining blocks in the MAC. */
3523 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3524 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3525 	DELAY(20);
3526 
3527 bce_blockinit_exit:
3528 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3529 
3530 	return (rc);
3531 }
3532 
3533 
3534 /****************************************************************************/
3535 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3536 /*                                                                          */
3537 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3538 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3539 /* necessary.                                                               */
3540 /*                                                                          */
3541 /* Returns:                                                                 */
3542 /*   0 for success, positive value for failure.                             */
3543 /****************************************************************************/
3544 static int
3545 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3546 	u32 *prod_bseq)
3547 {
3548 	bus_dmamap_t		map;
3549 	bus_dma_segment_t	segs[4];
3550 	struct mbuf *m_new = NULL;
3551 	struct rx_bd		*rxbd;
3552 	int i, nsegs, error, rc = 0;
3553 #ifdef BCE_DEBUG
3554 	u16 debug_chain_prod = *chain_prod;
3555 #endif
3556 
3557 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3558 		__FUNCTION__);
3559 
3560 	/* Make sure the inputs are valid. */
3561 	DBRUNIF((*chain_prod > MAX_RX_BD),
3562 		BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3563 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3564 
3565 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3566 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3567 
3568 	if (m == NULL) {
3569 
3570 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3571 			BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3572 				__FILE__, __LINE__);
3573 			sc->mbuf_alloc_failed++;
3574 			rc = ENOBUFS;
3575 			goto bce_get_buf_exit);
3576 
3577 		/* This is a new mbuf allocation. */
3578 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3579 		if (m_new == NULL) {
3580 
3581 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3582 				__FILE__, __LINE__);
3583 
3584 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3585 
3586 			rc = ENOBUFS;
3587 			goto bce_get_buf_exit;
3588 		}
3589 
3590 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3591 		m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3592 		if (!(m_new->m_flags & M_EXT)) {
3593 
3594 			DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3595 				__FILE__, __LINE__);
3596 
3597 			m_freem(m_new);
3598 
3599 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3600 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3601 
3602 			rc = ENOBUFS;
3603 			goto bce_get_buf_exit;
3604 		}
3605 
3606 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3607 	} else {
3608 		m_new = m;
3609 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3610 		m_new->m_data = m_new->m_ext.ext_buf;
3611 	}
3612 
3613 	/* Map the mbuf cluster into device memory. */
3614 	map = sc->rx_mbuf_map[*chain_prod];
3615 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3616 	    segs, &nsegs, BUS_DMA_NOWAIT);
3617 
3618 	if (error) {
3619 		BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3620 			__FILE__, __LINE__);
3621 
3622 		m_freem(m_new);
3623 
3624 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3625 
3626 		rc = ENOBUFS;
3627 		goto bce_get_buf_exit;
3628 	}
3629 
3630 	/* Watch for overflow. */
3631 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3632 		BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3633 			__FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3634 
3635 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3636 		sc->rx_low_watermark = sc->free_rx_bd);
3637 
3638 	/* Setup the rx_bd for the first segment. */
3639 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3640 
3641 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3642 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3643 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3644 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3645 	*prod_bseq += segs[0].ds_len;
3646 
3647 	for (i = 1; i < nsegs; i++) {
3648 
3649 		*prod = NEXT_RX_BD(*prod);
3650 		*chain_prod = RX_CHAIN_IDX(*prod);
3651 
3652 		rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3653 
3654 		rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3655 		rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3656 		rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3657 		rxbd->rx_bd_flags     = 0;
3658 		*prod_bseq += segs[i].ds_len;
3659 	}
3660 
3661 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3662 
3663 	/* Save the mbuf and update our counter. */
3664 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3665 	sc->free_rx_bd -= nsegs;
3666 
3667 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3668 		nsegs));
3669 
3670 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3671 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3672 
3673 bce_get_buf_exit:
3674 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3675 		__FUNCTION__);
3676 
3677 	return(rc);
3678 }
3679 
3680 
3681 /****************************************************************************/
3682 /* Allocate memory and initialize the TX data structures.                   */
3683 /*                                                                          */
3684 /* Returns:                                                                 */
3685 /*   0 for success, positive value for failure.                             */
3686 /****************************************************************************/
3687 static int
3688 bce_init_tx_chain(struct bce_softc *sc)
3689 {
3690 	struct tx_bd *txbd;
3691 	u32 val;
3692 	int i, rc = 0;
3693 
3694 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3695 
3696 	/* Set the initial TX producer/consumer indices. */
3697 	sc->tx_prod        = 0;
3698 	sc->tx_cons        = 0;
3699 	sc->tx_prod_bseq   = 0;
3700 	sc->used_tx_bd = 0;
3701 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3702 
3703 	/*
3704 	 * The NetXtreme II supports a linked-list structre called
3705 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3706 	 * consists of a series of 1 or more chain pages, each of which
3707 	 * consists of a fixed number of BD entries.
3708 	 * The last BD entry on each page is a pointer to the next page
3709 	 * in the chain, and the last pointer in the BD chain
3710 	 * points back to the beginning of the chain.
3711 	 */
3712 
3713 	/* Set the TX next pointer chain entries. */
3714 	for (i = 0; i < TX_PAGES; i++) {
3715 		int j;
3716 
3717 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3718 
3719 		/* Check if we've reached the last page. */
3720 		if (i == (TX_PAGES - 1))
3721 			j = 0;
3722 		else
3723 			j = i + 1;
3724 
3725 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3726 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3727 	}
3728 
3729 	/*
3730 	 * Initialize the context ID for an L2 TX chain.
3731 	 */
3732 	val = BCE_L2CTX_TYPE_TYPE_L2;
3733 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3734 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3735 
3736 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3737 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3738 
3739 	/* Point the hardware to the first page in the chain. */
3740 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3741 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3742 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3743 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3744 
3745 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3746 
3747 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3748 
3749 	return(rc);
3750 }
3751 
3752 
3753 /****************************************************************************/
3754 /* Free memory and clear the TX data structures.                            */
3755 /*                                                                          */
3756 /* Returns:                                                                 */
3757 /*   Nothing.                                                               */
3758 /****************************************************************************/
3759 static void
3760 bce_free_tx_chain(struct bce_softc *sc)
3761 {
3762 	int i;
3763 
3764 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3765 
3766 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3767 	for (i = 0; i < TOTAL_TX_BD; i++) {
3768 		if (sc->tx_mbuf_ptr[i] != NULL) {
3769 			if (sc->tx_mbuf_map != NULL)
3770 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3771 					BUS_DMASYNC_POSTWRITE);
3772 			m_freem(sc->tx_mbuf_ptr[i]);
3773 			sc->tx_mbuf_ptr[i] = NULL;
3774 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3775 		}
3776 	}
3777 
3778 	/* Clear each TX chain page. */
3779 	for (i = 0; i < TX_PAGES; i++)
3780 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3781 
3782 	/* Check if we lost any mbufs in the process. */
3783 	DBRUNIF((sc->tx_mbuf_alloc),
3784 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3785 			"from tx chain!\n",
3786 			__FILE__, __LINE__, sc->tx_mbuf_alloc));
3787 
3788 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3789 }
3790 
3791 
3792 /****************************************************************************/
3793 /* Allocate memory and initialize the RX data structures.                   */
3794 /*                                                                          */
3795 /* Returns:                                                                 */
3796 /*   0 for success, positive value for failure.                             */
3797 /****************************************************************************/
3798 static int
3799 bce_init_rx_chain(struct bce_softc *sc)
3800 {
3801 	struct rx_bd *rxbd;
3802 	int i, rc = 0;
3803 	u16 prod, chain_prod;
3804 	u32 prod_bseq, val;
3805 
3806 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3807 
3808 	/* Initialize the RX producer and consumer indices. */
3809 	sc->rx_prod        = 0;
3810 	sc->rx_cons        = 0;
3811 	sc->rx_prod_bseq   = 0;
3812 	sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3813 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3814 
3815 	/* Initialize the RX next pointer chain entries. */
3816 	for (i = 0; i < RX_PAGES; i++) {
3817 		int j;
3818 
3819 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3820 
3821 		/* Check if we've reached the last page. */
3822 		if (i == (RX_PAGES - 1))
3823 			j = 0;
3824 		else
3825 			j = i + 1;
3826 
3827 		/* Setup the chain page pointers. */
3828 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3829 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3830 	}
3831 
3832 	/* Initialize the context ID for an L2 RX chain. */
3833 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3834 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3835 	val |= 0x02 << 8;
3836 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3837 
3838 	/* Point the hardware to the first page in the chain. */
3839 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3840 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3841 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3842 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3843 
3844 	/* Allocate mbuf clusters for the rx_bd chain. */
3845 	prod = prod_bseq = 0;
3846 	while (prod < BCE_RX_SLACK_SPACE) {
3847 		chain_prod = RX_CHAIN_IDX(prod);
3848 		if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3849 			BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3850 				__FILE__, __LINE__, chain_prod);
3851 			rc = ENOBUFS;
3852 			break;
3853 		}
3854 		prod = NEXT_RX_BD(prod);
3855 	}
3856 
3857 	/* Save the RX chain producer index. */
3858 	sc->rx_prod      = prod;
3859 	sc->rx_prod_bseq = prod_bseq;
3860 
3861 	for (i = 0; i < RX_PAGES; i++) {
3862 		bus_dmamap_sync(
3863 			sc->rx_bd_chain_tag,
3864 	    	sc->rx_bd_chain_map[i],
3865 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3866 	}
3867 
3868 	/* Tell the chip about the waiting rx_bd's. */
3869 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3870 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3871 
3872 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3873 
3874 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3875 
3876 	return(rc);
3877 }
3878 
3879 
3880 /****************************************************************************/
3881 /* Free memory and clear the RX data structures.                            */
3882 /*                                                                          */
3883 /* Returns:                                                                 */
3884 /*   Nothing.                                                               */
3885 /****************************************************************************/
3886 static void
3887 bce_free_rx_chain(struct bce_softc *sc)
3888 {
3889 	int i;
3890 
3891 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3892 
3893 	/* Free any mbufs still in the RX mbuf chain. */
3894 	for (i = 0; i < TOTAL_RX_BD; i++) {
3895 		if (sc->rx_mbuf_ptr[i] != NULL) {
3896 			if (sc->rx_mbuf_map[i] != NULL)
3897 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3898 					BUS_DMASYNC_POSTREAD);
3899 			m_freem(sc->rx_mbuf_ptr[i]);
3900 			sc->rx_mbuf_ptr[i] = NULL;
3901 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3902 		}
3903 	}
3904 
3905 	/* Clear each RX chain page. */
3906 	for (i = 0; i < RX_PAGES; i++)
3907 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3908 
3909 	/* Check if we lost any mbufs in the process. */
3910 	DBRUNIF((sc->rx_mbuf_alloc),
3911 		BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3912 			__FILE__, __LINE__, sc->rx_mbuf_alloc));
3913 
3914 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3915 }
3916 
3917 
3918 /****************************************************************************/
3919 /* Set media options.                                                       */
3920 /*                                                                          */
3921 /* Returns:                                                                 */
3922 /*   0 for success, positive value for failure.                             */
3923 /****************************************************************************/
3924 static int
3925 bce_ifmedia_upd(struct ifnet *ifp)
3926 {
3927 	struct bce_softc *sc;
3928 	struct mii_data *mii;
3929 	struct ifmedia *ifm;
3930 	int rc = 0;
3931 
3932 	sc = ifp->if_softc;
3933 	ifm = &sc->bce_ifmedia;
3934 
3935 	/* DRC - ToDo: Add SerDes support. */
3936 
3937 	mii = device_get_softc(sc->bce_miibus);
3938 	sc->bce_link = 0;
3939 	if (mii->mii_instance) {
3940 		struct mii_softc *miisc;
3941 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3942 		    miisc = LIST_NEXT(miisc, mii_list))
3943 			mii_phy_reset(miisc);
3944 	}
3945 	mii_mediachg(mii);
3946 
3947 	return(rc);
3948 }
3949 
3950 
3951 /****************************************************************************/
3952 /* Reports current media status.                                            */
3953 /*                                                                          */
3954 /* Returns:                                                                 */
3955 /*   Nothing.                                                               */
3956 /****************************************************************************/
3957 static void
3958 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3959 {
3960 	struct bce_softc *sc;
3961 	struct mii_data *mii;
3962 
3963 	sc = ifp->if_softc;
3964 
3965 	BCE_LOCK(sc);
3966 
3967 	mii = device_get_softc(sc->bce_miibus);
3968 
3969 	/* DRC - ToDo: Add SerDes support. */
3970 
3971 	mii_pollstat(mii);
3972 	ifmr->ifm_active = mii->mii_media_active;
3973 	ifmr->ifm_status = mii->mii_media_status;
3974 
3975 	BCE_UNLOCK(sc);
3976 }
3977 
3978 
3979 /****************************************************************************/
3980 /* Handles PHY generated interrupt events.                                  */
3981 /*                                                                          */
3982 /* Returns:                                                                 */
3983 /*   Nothing.                                                               */
3984 /****************************************************************************/
3985 static void
3986 bce_phy_intr(struct bce_softc *sc)
3987 {
3988 	u32 new_link_state, old_link_state;
3989 
3990 	new_link_state = sc->status_block->status_attn_bits &
3991 		STATUS_ATTN_BITS_LINK_STATE;
3992 	old_link_state = sc->status_block->status_attn_bits_ack &
3993 		STATUS_ATTN_BITS_LINK_STATE;
3994 
3995 	/* Handle any changes if the link state has changed. */
3996 	if (new_link_state != old_link_state) {
3997 
3998 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3999 
4000 		sc->bce_link = 0;
4001 		callout_stop(&sc->bce_stat_ch);
4002 		bce_tick_locked(sc);
4003 
4004 		/* Update the status_attn_bits_ack field in the status block. */
4005 		if (new_link_state) {
4006 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4007 				STATUS_ATTN_BITS_LINK_STATE);
4008 			DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4009 		}
4010 		else {
4011 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4012 				STATUS_ATTN_BITS_LINK_STATE);
4013 			DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4014 		}
4015 
4016 	}
4017 
4018 	/* Acknowledge the link change interrupt. */
4019 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4020 }
4021 
4022 
4023 /****************************************************************************/
4024 /* Handles received frame interrupt events.                                 */
4025 /*                                                                          */
4026 /* Returns:                                                                 */
4027 /*   Nothing.                                                               */
4028 /****************************************************************************/
4029 static void
4030 bce_rx_intr(struct bce_softc *sc)
4031 {
4032 	struct status_block *sblk = sc->status_block;
4033 	struct ifnet *ifp = sc->bce_ifp;
4034 	u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4035 	u32 sw_prod_bseq;
4036 	struct l2_fhdr *l2fhdr;
4037 
4038 	DBRUNIF(1, sc->rx_interrupts++);
4039 
4040 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4041 	for (int i = 0; i < RX_PAGES; i++)
4042 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4043 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4044 
4045 	/* Get the hardware's view of the RX consumer index. */
4046 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4047 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4048 		hw_cons++;
4049 
4050 	/* Get working copies of the driver's view of the RX indices. */
4051 	sw_cons = sc->rx_cons;
4052 	sw_prod = sc->rx_prod;
4053 	sw_prod_bseq = sc->rx_prod_bseq;
4054 
4055 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4056 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4057 		__FUNCTION__, sw_prod, sw_cons,
4058 		sw_prod_bseq);
4059 
4060 	/* Prevent speculative reads from getting ahead of the status block. */
4061 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4062 		BUS_SPACE_BARRIER_READ);
4063 
4064 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4065 		sc->rx_low_watermark = sc->free_rx_bd);
4066 
4067 	/*
4068 	 * Scan through the receive chain as long
4069 	 * as there is work to do.
4070 	 */
4071 	while (sw_cons != hw_cons) {
4072 		struct mbuf *m;
4073 		struct rx_bd *rxbd;
4074 		unsigned int len;
4075 		u32 status;
4076 
4077 		/* Convert the producer/consumer indices to an actual rx_bd index. */
4078 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4079 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4080 
4081 		/* Get the used rx_bd. */
4082 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4083 		sc->free_rx_bd++;
4084 
4085 		DBRUN(BCE_VERBOSE_RECV,
4086 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4087 			bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4088 
4089 #ifdef DEVICE_POLLING
4090 		if (ifp->if_capenable & IFCAP_POLLING) {
4091 			if (sc->bce_rxcycles <= 0)
4092 				break;
4093 			sc->bce_rxcycles--;
4094 		}
4095 #endif
4096 
4097 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4098 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4099 
4100 			/* Validate that this is the last rx_bd. */
4101 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4102 				BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4103 				__FILE__, __LINE__, sw_chain_cons);
4104 				bce_breakpoint(sc));
4105 
4106 			/* DRC - ToDo: If the received packet is small, say less */
4107 			/*             than 128 bytes, allocate a new mbuf here, */
4108 			/*             copy the data to that mbuf, and recycle   */
4109 			/*             the mapped jumbo frame.                   */
4110 
4111 			/* Unmap the mbuf from DMA space. */
4112 			bus_dmamap_sync(sc->rx_mbuf_tag,
4113 			    sc->rx_mbuf_map[sw_chain_cons],
4114 		    	BUS_DMASYNC_POSTREAD);
4115 			bus_dmamap_unload(sc->rx_mbuf_tag,
4116 			    sc->rx_mbuf_map[sw_chain_cons]);
4117 
4118 			/* Remove the mbuf from the driver's chain. */
4119 			m = sc->rx_mbuf_ptr[sw_chain_cons];
4120 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4121 
4122 			/*
4123 			 * Frames received on the NetXteme II are prepended
4124 			 * with the l2_fhdr structure which provides status
4125 			 * information about the received frame (including
4126 			 * VLAN tags and checksum info) and are also
4127 			 * automatically adjusted to align the IP header
4128 			 * (i.e. two null bytes are inserted before the
4129 			 * Ethernet header).
4130 			 */
4131 			l2fhdr = mtod(m, struct l2_fhdr *);
4132 
4133 			len    = l2fhdr->l2_fhdr_pkt_len;
4134 			status = l2fhdr->l2_fhdr_status;
4135 
4136 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4137 				BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4138 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
4139 
4140 			/* Watch for unusual sized frames. */
4141 			DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4142 				BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4143 					"Min(%d), Actual(%d), Max(%d)\n",
4144 					__FILE__, __LINE__, (int) BCE_MIN_MTU,
4145 					len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4146 				bce_dump_mbuf(sc, m);
4147 		 		bce_breakpoint(sc));
4148 
4149 			len -= ETHER_CRC_LEN;
4150 
4151 			/* Check the received frame for errors. */
4152 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4153 				L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4154 				L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4155 
4156 				ifp->if_ierrors++;
4157 				DBRUNIF(1, sc->l2fhdr_status_errors++);
4158 
4159 				/* Reuse the mbuf for a new frame. */
4160 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4161 
4162 					DBRUNIF(1, bce_breakpoint(sc));
4163 					panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4164 
4165 				}
4166 				goto bce_rx_int_next_rx;
4167 			}
4168 
4169 			/*
4170 			 * Get a new mbuf for the rx_bd.   If no new
4171 			 * mbufs are available then reuse the current mbuf,
4172 			 * log an ierror on the interface, and generate
4173 			 * an error in the system log.
4174 			 */
4175 			if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4176 
4177 				DBRUN(BCE_WARN,
4178 					BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4179 					"new mbuf, incoming frame dropped!\n",
4180 					__FILE__, __LINE__));
4181 
4182 				ifp->if_ierrors++;
4183 
4184 				/* Try and reuse the exisitng mbuf. */
4185 				if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4186 
4187 					DBRUNIF(1, bce_breakpoint(sc));
4188 					panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4189 
4190 				}
4191 				goto bce_rx_int_next_rx;
4192 			}
4193 
4194 			/* Skip over the l2_fhdr when passing the data up the stack. */
4195 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4196 
4197 			/* Adjust the packet length to match the received data. */
4198 			m->m_pkthdr.len = m->m_len = len;
4199 
4200 			/* Send the packet to the appropriate interface. */
4201 			m->m_pkthdr.rcvif = ifp;
4202 
4203 			DBRUN(BCE_VERBOSE_RECV,
4204 				struct ether_header *eh;
4205 				eh = mtod(m, struct ether_header *);
4206 				BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4207 					__FUNCTION__, eh->ether_dhost, ":",
4208 					eh->ether_shost, ":", htons(eh->ether_type)));
4209 
4210 			/* Validate the checksum if offload enabled. */
4211 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4212 
4213 				/* Check for an IP datagram. */
4214 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4215 					m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4216 
4217 					/* Check if the IP checksum is valid. */
4218 					if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4219 						m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4220 					else
4221 						DBPRINT(sc, BCE_WARN_SEND,
4222 							"%s(): Invalid IP checksum = 0x%04X!\n",
4223 							__FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4224 				}
4225 
4226 				/* Check for a valid TCP/UDP frame. */
4227 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4228 					L2_FHDR_STATUS_UDP_DATAGRAM)) {
4229 
4230 					/* Check for a good TCP/UDP checksum. */
4231 					if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4232 						      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4233 						m->m_pkthdr.csum_data =
4234 						    l2fhdr->l2_fhdr_tcp_udp_xsum;
4235 						m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4236 							| CSUM_PSEUDO_HDR);
4237 					} else
4238 						DBPRINT(sc, BCE_WARN_SEND,
4239 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4240 							__FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4241 				}
4242 			}
4243 
4244 
4245 			/*
4246 			 * If we received a packet with a vlan tag,
4247 			 * attach that information to the packet.
4248 			 */
4249 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4250 				DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4251 					__FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4252 #if __FreeBSD_version < 700000
4253 				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4254 #else
4255 				VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag);
4256 				if (m == NULL)
4257 					continue;
4258 #endif
4259 			}
4260 
4261 			/* Pass the mbuf off to the upper layers. */
4262 			ifp->if_ipackets++;
4263 			DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4264 				__FUNCTION__);
4265 			BCE_UNLOCK(sc);
4266 			(*ifp->if_input)(ifp, m);
4267 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4268 			BCE_LOCK(sc);
4269 
4270 bce_rx_int_next_rx:
4271 			sw_prod = NEXT_RX_BD(sw_prod);
4272 		}
4273 
4274 		sw_cons = NEXT_RX_BD(sw_cons);
4275 
4276 		/* Refresh hw_cons to see if there's new work */
4277 		if (sw_cons == hw_cons) {
4278 			hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4279 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4280 				hw_cons++;
4281 		}
4282 
4283 		/* Prevent speculative reads from getting ahead of the status block. */
4284 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4285 			BUS_SPACE_BARRIER_READ);
4286 	}
4287 
4288 	for (int i = 0; i < RX_PAGES; i++)
4289 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4290 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4291 
4292 	sc->rx_cons = sw_cons;
4293 	sc->rx_prod = sw_prod;
4294 	sc->rx_prod_bseq = sw_prod_bseq;
4295 
4296 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4297 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4298 
4299 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4300 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4301 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4302 }
4303 
4304 
4305 /****************************************************************************/
4306 /* Handles transmit completion interrupt events.                            */
4307 /*                                                                          */
4308 /* Returns:                                                                 */
4309 /*   Nothing.                                                               */
4310 /****************************************************************************/
4311 static void
4312 bce_tx_intr(struct bce_softc *sc)
4313 {
4314 	struct status_block *sblk = sc->status_block;
4315 	struct ifnet *ifp = sc->bce_ifp;
4316 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4317 
4318 	BCE_LOCK_ASSERT(sc);
4319 
4320 	DBRUNIF(1, sc->tx_interrupts++);
4321 
4322 	/* Get the hardware's view of the TX consumer index. */
4323 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4324 
4325 	/* Skip to the next entry if this is a chain page pointer. */
4326 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4327 		hw_tx_cons++;
4328 
4329 	sw_tx_cons = sc->tx_cons;
4330 
4331 	/* Prevent speculative reads from getting ahead of the status block. */
4332 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4333 		BUS_SPACE_BARRIER_READ);
4334 
4335 	/* Cycle through any completed TX chain page entries. */
4336 	while (sw_tx_cons != hw_tx_cons) {
4337 #ifdef BCE_DEBUG
4338 		struct tx_bd *txbd = NULL;
4339 #endif
4340 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4341 
4342 		DBPRINT(sc, BCE_INFO_SEND,
4343 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4344 			"sw_tx_chain_cons = 0x%04X\n",
4345 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4346 
4347 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4348 			BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4349 				" 0x%04X > 0x%04X\n",
4350 				__FILE__, __LINE__, sw_tx_chain_cons,
4351 				(int) MAX_TX_BD);
4352 			bce_breakpoint(sc));
4353 
4354 		DBRUNIF(1,
4355 			txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4356 				[TX_IDX(sw_tx_chain_cons)]);
4357 
4358 		DBRUNIF((txbd == NULL),
4359 			BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4360 				__FILE__, __LINE__, sw_tx_chain_cons);
4361 			bce_breakpoint(sc));
4362 
4363 		DBRUN(BCE_INFO_SEND,
4364 			BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4365 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4366 
4367 		/*
4368 		 * Free the associated mbuf. Remember
4369 		 * that only the last tx_bd of a packet
4370 		 * has an mbuf pointer and DMA map.
4371 		 */
4372 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4373 
4374 			/* Validate that this is the last tx_bd. */
4375 			DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4376 				BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4377 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4378 				bce_breakpoint(sc));
4379 
4380 			DBRUN(BCE_INFO_SEND,
4381 				BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4382 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4383 
4384 			/* Unmap the mbuf. */
4385 			bus_dmamap_unload(sc->tx_mbuf_tag,
4386 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4387 
4388 			/* Free the mbuf. */
4389 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4390 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4391 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4392 
4393 			ifp->if_opackets++;
4394 		}
4395 
4396 		sc->used_tx_bd--;
4397 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4398 
4399 		/* Refresh hw_cons to see if there's new work. */
4400 		hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4401 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4402 			hw_tx_cons++;
4403 
4404 		/* Prevent speculative reads from getting ahead of the status block. */
4405 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4406 			BUS_SPACE_BARRIER_READ);
4407 	}
4408 
4409 	/* Clear the TX timeout timer. */
4410 	ifp->if_timer = 0;
4411 
4412 	/* Clear the tx hardware queue full flag. */
4413 	if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4414 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4415 			BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4416 				__FUNCTION__, sc->used_tx_bd));
4417 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4418 	}
4419 
4420 	sc->tx_cons = sw_tx_cons;
4421 }
4422 
4423 
4424 /****************************************************************************/
4425 /* Disables interrupt generation.                                           */
4426 /*                                                                          */
4427 /* Returns:                                                                 */
4428 /*   Nothing.                                                               */
4429 /****************************************************************************/
4430 static void
4431 bce_disable_intr(struct bce_softc *sc)
4432 {
4433 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4434 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4435 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4436 }
4437 
4438 
4439 /****************************************************************************/
4440 /* Enables interrupt generation.                                            */
4441 /*                                                                          */
4442 /* Returns:                                                                 */
4443 /*   Nothing.                                                               */
4444 /****************************************************************************/
4445 static void
4446 bce_enable_intr(struct bce_softc *sc)
4447 {
4448 	u32 val;
4449 
4450 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4451 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4452 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4453 
4454 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4455 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4456 
4457 	val = REG_RD(sc, BCE_HC_COMMAND);
4458 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4459 }
4460 
4461 
4462 /****************************************************************************/
4463 /* Handles controller initialization.                                       */
4464 /*                                                                          */
4465 /* Must be called from a locked routine.                                    */
4466 /*                                                                          */
4467 /* Returns:                                                                 */
4468 /*   Nothing.                                                               */
4469 /****************************************************************************/
4470 static void
4471 bce_init_locked(struct bce_softc *sc)
4472 {
4473 	struct ifnet *ifp;
4474 	u32 ether_mtu;
4475 
4476 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4477 
4478 	BCE_LOCK_ASSERT(sc);
4479 
4480 	ifp = sc->bce_ifp;
4481 
4482 	/* Check if the driver is still running and bail out if it is. */
4483 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4484 		goto bce_init_locked_exit;
4485 
4486 	bce_stop(sc);
4487 
4488 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4489 		BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4490 			__FILE__, __LINE__);
4491 		goto bce_init_locked_exit;
4492 	}
4493 
4494 	if (bce_chipinit(sc)) {
4495 		BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4496 			__FILE__, __LINE__);
4497 		goto bce_init_locked_exit;
4498 	}
4499 
4500 	if (bce_blockinit(sc)) {
4501 		BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4502 			__FILE__, __LINE__);
4503 		goto bce_init_locked_exit;
4504 	}
4505 
4506 	/* Load our MAC address. */
4507 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4508 	bce_set_mac_addr(sc);
4509 
4510 	/* Calculate and program the Ethernet MTU size. */
4511 	ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4512 		ETHER_CRC_LEN;
4513 
4514 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4515 
4516 	/*
4517 	 * Program the mtu, enabling jumbo frame
4518 	 * support if necessary.  Also set the mbuf
4519 	 * allocation count for RX frames.
4520 	 */
4521 	if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4522 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4523 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4524 		sc->mbuf_alloc_size = MJUM9BYTES;
4525 	} else {
4526 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4527 		sc->mbuf_alloc_size = MCLBYTES;
4528 	}
4529 
4530 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4531 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4532 
4533 	DBPRINT(sc, BCE_INFO,
4534 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4535 		"max_frame_size = %d\n",
4536 		__FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4537 
4538 	/* Program appropriate promiscuous/multicast filtering. */
4539 	bce_set_rx_mode(sc);
4540 
4541 	/* Init RX buffer descriptor chain. */
4542 	bce_init_rx_chain(sc);
4543 
4544 	/* Init TX buffer descriptor chain. */
4545 	bce_init_tx_chain(sc);
4546 
4547 #ifdef DEVICE_POLLING
4548 	/* Disable interrupts if we are polling. */
4549 	if (ifp->if_capenable & IFCAP_POLLING) {
4550 		bce_disable_intr(sc);
4551 
4552 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4553 			(1 << 16) | sc->bce_rx_quick_cons_trip);
4554 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4555 			(1 << 16) | sc->bce_tx_quick_cons_trip);
4556 	} else
4557 #endif
4558 	/* Enable host interrupts. */
4559 	bce_enable_intr(sc);
4560 
4561 	bce_ifmedia_upd(ifp);
4562 
4563 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4564 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4565 
4566 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4567 
4568 bce_init_locked_exit:
4569 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4570 
4571 	return;
4572 }
4573 
4574 
4575 /****************************************************************************/
4576 /* Handles controller initialization when called from an unlocked routine.  */
4577 /*                                                                          */
4578 /* Returns:                                                                 */
4579 /*   Nothing.                                                               */
4580 /****************************************************************************/
4581 static void
4582 bce_init(void *xsc)
4583 {
4584 	struct bce_softc *sc = xsc;
4585 
4586 	BCE_LOCK(sc);
4587 	bce_init_locked(sc);
4588 	BCE_UNLOCK(sc);
4589 }
4590 
4591 
4592 /****************************************************************************/
4593 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4594 /* memory visible to the controller.                                        */
4595 /*                                                                          */
4596 /* Returns:                                                                 */
4597 /*   0 for success, positive value for failure.                             */
4598 /****************************************************************************/
4599 static int
4600 bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod,
4601 	u16 *chain_prod, u32 *prod_bseq)
4602 {
4603 	u32 vlan_tag_flags = 0;
4604 	struct m_tag *mtag;
4605 	struct bce_dmamap_arg map_arg;
4606 	bus_dmamap_t map;
4607 	int i, error, rc = 0;
4608 
4609 	/* Transfer any checksum offload flags to the bd. */
4610 	if (m_head->m_pkthdr.csum_flags) {
4611 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4612 			vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4613 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4614 			vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4615 	}
4616 
4617 	/* Transfer any VLAN tags to the bd. */
4618 	mtag = VLAN_OUTPUT_TAG(sc->bce_ifp, m_head);
4619 	if (mtag != NULL)
4620 		vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4621 			(VLAN_TAG_VALUE(mtag) << 16));
4622 
4623 	/* Map the mbuf into DMAable memory. */
4624 	map = sc->tx_mbuf_map[*chain_prod];
4625 	map_arg.sc         = sc;
4626 	map_arg.prod       = *prod;
4627 	map_arg.chain_prod = *chain_prod;
4628 	map_arg.prod_bseq  = *prod_bseq;
4629 	map_arg.tx_flags   = vlan_tag_flags;
4630 	map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd -
4631 		BCE_TX_SLACK_SPACE;
4632 
4633 	KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4634 
4635 	for (i = 0; i < TX_PAGES; i++)
4636 		map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4637 
4638 	/* Map the mbuf into our DMA address space. */
4639 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4640 	    bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4641 
4642 	if (error || map_arg.maxsegs == 0) {
4643 
4644             /* Try to defrag the mbuf if there are too many segments. */
4645             if (error == EFBIG && map_arg.maxsegs != 0) {
4646                 struct mbuf *m0;
4647 
4648 	        DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4649                     __FUNCTION__, map_arg.maxsegs);
4650 
4651                 m0 = m_defrag(m_head, M_DONTWAIT);
4652                 if (m0 != NULL) {
4653                     m_head = m0;
4654                     error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag,
4655                         map, m_head, bce_dma_map_tx_desc, &map_arg,
4656                         BUS_DMA_NOWAIT);
4657                 }
4658             }
4659 
4660             /* Still getting an error after a defrag. */
4661             if (error) {
4662                 BCE_PRINTF(sc,
4663                     "%s(%d): Error mapping mbuf into TX chain!\n",
4664                     __FILE__, __LINE__);
4665                 rc = ENOBUFS;
4666                 goto bce_tx_encap_exit;
4667             }
4668 
4669 	}
4670 
4671 	/*
4672 	 * Ensure that the map for this transmission
4673 	 * is placed at the array index of the last
4674 	 * descriptor in this chain.  This is done
4675 	 * because a single map is used for all
4676 	 * segments of the mbuf and we don't want to
4677 	 * delete the map before all of the segments
4678 	 * have been freed.
4679 	 */
4680 	sc->tx_mbuf_map[*chain_prod] =
4681 		sc->tx_mbuf_map[map_arg.chain_prod];
4682 	sc->tx_mbuf_map[map_arg.chain_prod] = map;
4683 	sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4684 	sc->used_tx_bd += map_arg.maxsegs;
4685 
4686 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4687 		sc->tx_hi_watermark = sc->used_tx_bd);
4688 
4689 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4690 
4691 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, *chain_prod,
4692 		map_arg.maxsegs));
4693 
4694 	/* prod still points the last used tx_bd at this point. */
4695 	*prod       = map_arg.prod;
4696 	*chain_prod = map_arg.chain_prod;
4697 	*prod_bseq  = map_arg.prod_bseq;
4698 
4699 bce_tx_encap_exit:
4700 
4701 	return(rc);
4702 }
4703 
4704 
4705 /****************************************************************************/
4706 /* Main transmit routine when called from another routine with a lock.      */
4707 /*                                                                          */
4708 /* Returns:                                                                 */
4709 /*   Nothing.                                                               */
4710 /****************************************************************************/
4711 static void
4712 bce_start_locked(struct ifnet *ifp)
4713 {
4714 	struct bce_softc *sc = ifp->if_softc;
4715 	struct mbuf *m_head = NULL;
4716 	int count = 0;
4717 	u16 tx_prod, tx_chain_prod;
4718 	u32	tx_prod_bseq;
4719 
4720 	/* If there's no link or the transmit queue is empty then just exit. */
4721 	if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4722 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4723 			__FUNCTION__);
4724 		goto bce_start_locked_exit;
4725 	}
4726 
4727 	/* prod points to the next free tx_bd. */
4728 	tx_prod = sc->tx_prod;
4729 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4730 	tx_prod_bseq = sc->tx_prod_bseq;
4731 
4732 	DBPRINT(sc, BCE_INFO_SEND,
4733 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4734 		"tx_prod_bseq = 0x%08X\n",
4735 		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4736 
4737 	/* Keep adding entries while there is space in the ring. */
4738 	while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4739 
4740 		/* Check for any frames to send. */
4741 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4742 		if (m_head == NULL)
4743 			break;
4744 
4745 		/*
4746 		 * Pack the data into the transmit ring. If we
4747 		 * don't have room, place the mbuf back at the
4748 		 * head of the queue and set the OACTIVE flag
4749 		 * to wait for the NIC to drain the chain.
4750 		 */
4751 		if (bce_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4752 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4753 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4754 			DBPRINT(sc, BCE_INFO_SEND,
4755 				"TX chain is closed for business! Total tx_bd used = %d\n",
4756 				sc->used_tx_bd);
4757 			break;
4758 		}
4759 
4760 		count++;
4761 
4762 		/* Send a copy of the frame to any BPF listeners. */
4763 		BPF_MTAP(ifp, m_head);
4764 
4765 		tx_prod = NEXT_TX_BD(tx_prod);
4766 		tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4767 	}
4768 
4769 	if (count == 0) {
4770 		/* no packets were dequeued */
4771 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4772 			__FUNCTION__);
4773 		goto bce_start_locked_exit;
4774 	}
4775 
4776 	/* Update the driver's counters. */
4777 	sc->tx_prod      = tx_prod;
4778 	sc->tx_prod_bseq = tx_prod_bseq;
4779 
4780 	DBPRINT(sc, BCE_INFO_SEND,
4781 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4782 		"tx_prod_bseq = 0x%08X\n",
4783 		__FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4784 
4785 	/* Start the transmit. */
4786 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4787 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4788 
4789 	/* Set the tx timeout. */
4790 	ifp->if_timer = BCE_TX_TIMEOUT;
4791 
4792 bce_start_locked_exit:
4793 	return;
4794 }
4795 
4796 
4797 /****************************************************************************/
4798 /* Main transmit routine when called from another routine without a lock.   */
4799 /*                                                                          */
4800 /* Returns:                                                                 */
4801 /*   Nothing.                                                               */
4802 /****************************************************************************/
4803 static void
4804 bce_start(struct ifnet *ifp)
4805 {
4806 	struct bce_softc *sc = ifp->if_softc;
4807 
4808 	BCE_LOCK(sc);
4809 	bce_start_locked(ifp);
4810 	BCE_UNLOCK(sc);
4811 }
4812 
4813 
4814 /****************************************************************************/
4815 /* Handles any IOCTL calls from the operating system.                       */
4816 /*                                                                          */
4817 /* Returns:                                                                 */
4818 /*   0 for success, positive value for failure.                             */
4819 /****************************************************************************/
4820 static int
4821 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4822 {
4823 	struct bce_softc *sc = ifp->if_softc;
4824 	struct ifreq *ifr = (struct ifreq *) data;
4825 	struct mii_data *mii;
4826 	int mask, error = 0;
4827 
4828 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4829 
4830 	switch(command) {
4831 
4832 		/* Set the MTU. */
4833 		case SIOCSIFMTU:
4834 			/* Check that the MTU setting is supported. */
4835 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4836 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4837 				error = EINVAL;
4838 				break;
4839 			}
4840 
4841 			DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4842 
4843 			ifp->if_mtu = ifr->ifr_mtu;
4844 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4845 			bce_init(sc);
4846 			break;
4847 
4848 		/* Set interface. */
4849 		case SIOCSIFFLAGS:
4850 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4851 
4852 			BCE_LOCK(sc);
4853 
4854 			/* Check if the interface is up. */
4855 			if (ifp->if_flags & IFF_UP) {
4856 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4857 					/* Change the promiscuous/multicast flags as necessary. */
4858 					bce_set_rx_mode(sc);
4859 				} else {
4860 					/* Start the HW */
4861 					bce_init_locked(sc);
4862 				}
4863 			} else {
4864 				/* The interface is down.  Check if the driver is running. */
4865 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4866 					bce_stop(sc);
4867 				}
4868 			}
4869 
4870 			BCE_UNLOCK(sc);
4871 			error = 0;
4872 
4873 			break;
4874 
4875 		/* Add/Delete multicast address */
4876 		case SIOCADDMULTI:
4877 		case SIOCDELMULTI:
4878 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4879 
4880 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4881 				BCE_LOCK(sc);
4882 				bce_set_rx_mode(sc);
4883 				BCE_UNLOCK(sc);
4884 				error = 0;
4885 			}
4886 
4887 			break;
4888 
4889 		/* Set/Get Interface media */
4890 		case SIOCSIFMEDIA:
4891 		case SIOCGIFMEDIA:
4892 			DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4893 
4894 			DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4895 				sc->bce_phy_flags);
4896 
4897 			if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4898 				DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4899 
4900 				error = ifmedia_ioctl(ifp, ifr,
4901 				    &sc->bce_ifmedia, command);
4902 			} else {
4903 				DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4904 				mii = device_get_softc(sc->bce_miibus);
4905 				error = ifmedia_ioctl(ifp, ifr,
4906 				    &mii->mii_media, command);
4907 			}
4908 			break;
4909 
4910 		/* Set interface capability */
4911 		case SIOCSIFCAP:
4912 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4913 			DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4914 
4915 #ifdef DEVICE_POLLING
4916 			if (mask & IFCAP_POLLING) {
4917 				if (ifr->ifr_reqcap & IFCAP_POLLING) {
4918 
4919 					/* Setup the poll routine to call. */
4920 					error = ether_poll_register(bce_poll, ifp);
4921 					if (error) {
4922 						BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4923 							__FILE__, __LINE__);
4924 						goto bce_ioctl_exit;
4925 					}
4926 
4927 					/* Clear the interrupt. */
4928 					BCE_LOCK(sc);
4929 					bce_disable_intr(sc);
4930 
4931 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4932 						(1 << 16) | sc->bce_rx_quick_cons_trip);
4933 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4934 						(1 << 16) | sc->bce_tx_quick_cons_trip);
4935 
4936 					ifp->if_capenable |= IFCAP_POLLING;
4937 					BCE_UNLOCK(sc);
4938 				} else {
4939 					/* Clear the poll routine. */
4940 					error = ether_poll_deregister(ifp);
4941 
4942 					/* Enable interrupt even in error case */
4943 					BCE_LOCK(sc);
4944 					bce_enable_intr(sc);
4945 
4946 					REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4947 						(sc->bce_tx_quick_cons_trip_int << 16) |
4948 						sc->bce_tx_quick_cons_trip);
4949 					REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4950 						(sc->bce_rx_quick_cons_trip_int << 16) |
4951 						sc->bce_rx_quick_cons_trip);
4952 
4953 					ifp->if_capenable &= ~IFCAP_POLLING;
4954 					BCE_UNLOCK(sc);
4955 				}
4956 			}
4957 #endif /*DEVICE_POLLING */
4958 
4959 			/* Toggle the TX checksum capabilites enable flag. */
4960 			if (mask & IFCAP_TXCSUM) {
4961 				ifp->if_capenable ^= IFCAP_TXCSUM;
4962 				if (IFCAP_TXCSUM & ifp->if_capenable)
4963 					ifp->if_hwassist = BCE_IF_HWASSIST;
4964 				else
4965 					ifp->if_hwassist = 0;
4966 			}
4967 
4968 			/* Toggle the RX checksum capabilities enable flag. */
4969 			if (mask & IFCAP_RXCSUM) {
4970 				ifp->if_capenable ^= IFCAP_RXCSUM;
4971 				if (IFCAP_RXCSUM & ifp->if_capenable)
4972 					ifp->if_hwassist = BCE_IF_HWASSIST;
4973 				else
4974 					ifp->if_hwassist = 0;
4975 			}
4976 
4977 			/* Toggle VLAN_MTU capabilities enable flag. */
4978 			if (mask & IFCAP_VLAN_MTU) {
4979 				BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4980 					__FILE__, __LINE__);
4981 			}
4982 
4983 			/* Toggle VLANHWTAG capabilities enabled flag. */
4984 			if (mask & IFCAP_VLAN_HWTAGGING) {
4985 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4986 					BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4987 						"management firmware (ASF/IPMI/UMP) is running!\n",
4988 						__FILE__, __LINE__);
4989 				else
4990 					BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4991 						__FILE__, __LINE__);
4992 			}
4993 
4994 			break;
4995 		default:
4996 			DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4997 				(u32) command);
4998 
4999 			/* We don't know how to handle the IOCTL, pass it on. */
5000 			error = ether_ioctl(ifp, command, data);
5001 			break;
5002 	}
5003 
5004 #ifdef DEVICE_POLLING
5005 bce_ioctl_exit:
5006 #endif
5007 
5008 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5009 
5010 	return(error);
5011 }
5012 
5013 
5014 /****************************************************************************/
5015 /* Transmit timeout handler.                                                */
5016 /*                                                                          */
5017 /* Returns:                                                                 */
5018 /*   Nothing.                                                               */
5019 /****************************************************************************/
5020 static void
5021 bce_watchdog(struct ifnet *ifp)
5022 {
5023 	struct bce_softc *sc = ifp->if_softc;
5024 
5025 	DBRUN(BCE_WARN_SEND,
5026 		bce_dump_driver_state(sc);
5027 		bce_dump_status_block(sc));
5028 
5029 	BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
5030 		__FILE__, __LINE__);
5031 
5032 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5033 
5034 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5035 
5036 	bce_init(sc);
5037 	ifp->if_oerrors++;
5038 
5039 }
5040 
5041 
5042 #ifdef DEVICE_POLLING
5043 static void
5044 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5045 {
5046 	struct bce_softc *sc = ifp->if_softc;
5047 
5048 	BCE_LOCK_ASSERT(sc);
5049 
5050 	sc->bce_rxcycles = count;
5051 
5052 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5053 	    BUS_DMASYNC_POSTWRITE);
5054 
5055 	/* Check for any completed RX frames. */
5056 	if (sc->status_block->status_rx_quick_consumer_index0 !=
5057 		sc->hw_rx_cons)
5058 		bce_rx_intr(sc);
5059 
5060 	/* Check for any completed TX frames. */
5061 	if (sc->status_block->status_tx_quick_consumer_index0 !=
5062 		sc->hw_tx_cons)
5063 		bce_tx_intr(sc);
5064 
5065 	/* Check for new frames to transmit. */
5066 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5067 		bce_start_locked(ifp);
5068 
5069 }
5070 
5071 
5072 static void
5073 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5074 {
5075 	struct bce_softc *sc = ifp->if_softc;
5076 
5077 	BCE_LOCK(sc);
5078 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5079 		bce_poll_locked(ifp, cmd, count);
5080 	BCE_UNLOCK(sc);
5081 }
5082 #endif /* DEVICE_POLLING */
5083 
5084 
5085 #if 0
5086 static inline int
5087 bce_has_work(struct bce_softc *sc)
5088 {
5089 	struct status_block *stat = sc->status_block;
5090 
5091 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5092 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5093 		return 1;
5094 
5095 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5096 	    bp->link_up)
5097 		return 1;
5098 
5099 	return 0;
5100 }
5101 #endif
5102 
5103 
5104 /*
5105  * Interrupt handler.
5106  */
5107 /****************************************************************************/
5108 /* Main interrupt entry point.  Verifies that the controller generated the  */
5109 /* interrupt and then calls a separate routine for handle the various       */
5110 /* interrupt causes (PHY, TX, RX).                                          */
5111 /*                                                                          */
5112 /* Returns:                                                                 */
5113 /*   0 for success, positive value for failure.                             */
5114 /****************************************************************************/
5115 static void
5116 bce_intr(void *xsc)
5117 {
5118 	struct bce_softc *sc;
5119 	struct ifnet *ifp;
5120 	u32 status_attn_bits;
5121 
5122 	sc = xsc;
5123 	ifp = sc->bce_ifp;
5124 
5125 	BCE_LOCK(sc);
5126 
5127 	DBRUNIF(1, sc->interrupts_generated++);
5128 
5129 #ifdef DEVICE_POLLING
5130 	if (ifp->if_capenable & IFCAP_POLLING) {
5131 		DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5132 		goto bce_intr_exit;
5133 	}
5134 #endif
5135 
5136 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5137 	    BUS_DMASYNC_POSTWRITE);
5138 
5139 	/*
5140 	 * If the hardware status block index
5141 	 * matches the last value read by the
5142 	 * driver and we haven't asserted our
5143 	 * interrupt then there's nothing to do.
5144 	 */
5145 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5146 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5147 		goto bce_intr_exit;
5148 
5149 	/* Ack the interrupt and stop others from occuring. */
5150 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5151 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5152 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5153 
5154 	/* Keep processing data as long as there is work to do. */
5155 	for (;;) {
5156 
5157 		status_attn_bits = sc->status_block->status_attn_bits;
5158 
5159 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5160 			BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5161 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5162 
5163 		/* Was it a link change interrupt? */
5164 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5165 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5166 			bce_phy_intr(sc);
5167 
5168 		/* If any other attention is asserted then the chip is toast. */
5169 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5170 			(sc->status_block->status_attn_bits_ack &
5171 			~STATUS_ATTN_BITS_LINK_STATE))) {
5172 
5173 			DBRUN(1, sc->unexpected_attentions++);
5174 
5175 			BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5176 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5177 
5178 			DBRUN(BCE_FATAL,
5179 				if (bce_debug_unexpected_attention == 0)
5180 					bce_breakpoint(sc));
5181 
5182 			bce_init_locked(sc);
5183 			goto bce_intr_exit;
5184 		}
5185 
5186 		/* Check for any completed RX frames. */
5187 		if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5188 			bce_rx_intr(sc);
5189 
5190 		/* Check for any completed TX frames. */
5191 		if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5192 			bce_tx_intr(sc);
5193 
5194 		/* Save the status block index value for use during the next interrupt. */
5195 		sc->last_status_idx = sc->status_block->status_idx;
5196 
5197 		/* Prevent speculative reads from getting ahead of the status block. */
5198 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5199 			BUS_SPACE_BARRIER_READ);
5200 
5201 		/* If there's no work left then exit the interrupt service routine. */
5202 		if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5203 	    	(sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5204 			break;
5205 
5206 	}
5207 
5208 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5209 	    BUS_DMASYNC_PREWRITE);
5210 
5211 	/* Re-enable interrupts. */
5212 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5213 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5214 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5215 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5216 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5217 
5218 	/* Handle any frames that arrived while handling the interrupt. */
5219 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5220 		bce_start_locked(ifp);
5221 
5222 bce_intr_exit:
5223 	BCE_UNLOCK(sc);
5224 }
5225 
5226 
5227 /****************************************************************************/
5228 /* Programs the various packet receive modes (broadcast and multicast).     */
5229 /*                                                                          */
5230 /* Returns:                                                                 */
5231 /*   Nothing.                                                               */
5232 /****************************************************************************/
5233 static void
5234 bce_set_rx_mode(struct bce_softc *sc)
5235 {
5236 	struct ifnet *ifp;
5237 	struct ifmultiaddr *ifma;
5238 	u32 hashes[4] = { 0, 0, 0, 0 };
5239 	u32 rx_mode, sort_mode;
5240 	int h, i;
5241 
5242 	BCE_LOCK_ASSERT(sc);
5243 
5244 	ifp = sc->bce_ifp;
5245 
5246 	/* Initialize receive mode default settings. */
5247 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5248 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5249 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5250 
5251 	/*
5252 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5253 	 * be enbled.
5254 	 */
5255 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5256 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5257 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5258 
5259 	/*
5260 	 * Check for promiscuous, all multicast, or selected
5261 	 * multicast address filtering.
5262 	 */
5263 	if (ifp->if_flags & IFF_PROMISC) {
5264 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5265 
5266 		/* Enable promiscuous mode. */
5267 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5268 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5269 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5270 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5271 
5272 		/* Enable all multicast addresses. */
5273 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5274 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5275        	}
5276 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5277 	} else {
5278 		/* Accept one or more multicast(s). */
5279 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5280 
5281 		IF_ADDR_LOCK(ifp);
5282 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5283 			if (ifma->ifma_addr->sa_family != AF_LINK)
5284 				continue;
5285 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5286 		    	ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5287 			hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5288 		}
5289 		IF_ADDR_UNLOCK(ifp);
5290 
5291 		for (i = 0; i < 4; i++)
5292 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5293 
5294 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5295 	}
5296 
5297 	/* Only make changes if the recive mode has actually changed. */
5298 	if (rx_mode != sc->rx_mode) {
5299 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5300 			rx_mode);
5301 
5302 		sc->rx_mode = rx_mode;
5303 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5304 	}
5305 
5306 	/* Disable and clear the exisitng sort before enabling a new sort. */
5307 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5308 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5309 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5310 }
5311 
5312 
5313 /****************************************************************************/
5314 /* Called periodically to updates statistics from the controllers           */
5315 /* statistics block.                                                        */
5316 /*                                                                          */
5317 /* Returns:                                                                 */
5318 /*   Nothing.                                                               */
5319 /****************************************************************************/
5320 static void
5321 bce_stats_update(struct bce_softc *sc)
5322 {
5323 	struct ifnet *ifp;
5324 	struct statistics_block *stats;
5325 
5326 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5327 
5328 	ifp = sc->bce_ifp;
5329 
5330 	stats = (struct statistics_block *) sc->stats_block;
5331 
5332 	/*
5333 	 * Update the interface statistics from the
5334 	 * hardware statistics.
5335 	 */
5336 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5337 
5338 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5339 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5340 					  (u_long) stats->stat_IfInMBUFDiscards +
5341 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5342 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5343 
5344 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5345 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5346 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5347 
5348 	/*
5349 	 * Certain controllers don't report
5350 	 * carrier sense errors correctly.
5351 	 * See errata E11_5708CA0_1165.
5352 	 */
5353 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5354 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5355 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5356 
5357 	/*
5358 	 * Update the sysctl statistics from the
5359 	 * hardware statistics.
5360 	 */
5361 	sc->stat_IfHCInOctets =
5362 		((u64) stats->stat_IfHCInOctets_hi << 32) +
5363 		 (u64) stats->stat_IfHCInOctets_lo;
5364 
5365 	sc->stat_IfHCInBadOctets =
5366 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5367 		 (u64) stats->stat_IfHCInBadOctets_lo;
5368 
5369 	sc->stat_IfHCOutOctets =
5370 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
5371 		 (u64) stats->stat_IfHCOutOctets_lo;
5372 
5373 	sc->stat_IfHCOutBadOctets =
5374 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5375 		 (u64) stats->stat_IfHCOutBadOctets_lo;
5376 
5377 	sc->stat_IfHCInUcastPkts =
5378 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5379 		 (u64) stats->stat_IfHCInUcastPkts_lo;
5380 
5381 	sc->stat_IfHCInMulticastPkts =
5382 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5383 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
5384 
5385 	sc->stat_IfHCInBroadcastPkts =
5386 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5387 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5388 
5389 	sc->stat_IfHCOutUcastPkts =
5390 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5391 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
5392 
5393 	sc->stat_IfHCOutMulticastPkts =
5394 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5395 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5396 
5397 	sc->stat_IfHCOutBroadcastPkts =
5398 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5399 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5400 
5401 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5402 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5403 
5404 	sc->stat_Dot3StatsCarrierSenseErrors =
5405 		stats->stat_Dot3StatsCarrierSenseErrors;
5406 
5407 	sc->stat_Dot3StatsFCSErrors =
5408 		stats->stat_Dot3StatsFCSErrors;
5409 
5410 	sc->stat_Dot3StatsAlignmentErrors =
5411 		stats->stat_Dot3StatsAlignmentErrors;
5412 
5413 	sc->stat_Dot3StatsSingleCollisionFrames =
5414 		stats->stat_Dot3StatsSingleCollisionFrames;
5415 
5416 	sc->stat_Dot3StatsMultipleCollisionFrames =
5417 		stats->stat_Dot3StatsMultipleCollisionFrames;
5418 
5419 	sc->stat_Dot3StatsDeferredTransmissions =
5420 		stats->stat_Dot3StatsDeferredTransmissions;
5421 
5422 	sc->stat_Dot3StatsExcessiveCollisions =
5423 		stats->stat_Dot3StatsExcessiveCollisions;
5424 
5425 	sc->stat_Dot3StatsLateCollisions =
5426 		stats->stat_Dot3StatsLateCollisions;
5427 
5428 	sc->stat_EtherStatsCollisions =
5429 		stats->stat_EtherStatsCollisions;
5430 
5431 	sc->stat_EtherStatsFragments =
5432 		stats->stat_EtherStatsFragments;
5433 
5434 	sc->stat_EtherStatsJabbers =
5435 		stats->stat_EtherStatsJabbers;
5436 
5437 	sc->stat_EtherStatsUndersizePkts =
5438 		stats->stat_EtherStatsUndersizePkts;
5439 
5440 	sc->stat_EtherStatsOverrsizePkts =
5441 		stats->stat_EtherStatsOverrsizePkts;
5442 
5443 	sc->stat_EtherStatsPktsRx64Octets =
5444 		stats->stat_EtherStatsPktsRx64Octets;
5445 
5446 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5447 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5448 
5449 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5450 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5451 
5452 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5453 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5454 
5455 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5456 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5457 
5458 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5459 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5460 
5461 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5462 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5463 
5464 	sc->stat_EtherStatsPktsTx64Octets =
5465 		stats->stat_EtherStatsPktsTx64Octets;
5466 
5467 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5468 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5469 
5470 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5471 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5472 
5473 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5474 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5475 
5476 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5477 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5478 
5479 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5480 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5481 
5482 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5483 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5484 
5485 	sc->stat_XonPauseFramesReceived =
5486 		stats->stat_XonPauseFramesReceived;
5487 
5488 	sc->stat_XoffPauseFramesReceived =
5489 		stats->stat_XoffPauseFramesReceived;
5490 
5491 	sc->stat_OutXonSent =
5492 		stats->stat_OutXonSent;
5493 
5494 	sc->stat_OutXoffSent =
5495 		stats->stat_OutXoffSent;
5496 
5497 	sc->stat_FlowControlDone =
5498 		stats->stat_FlowControlDone;
5499 
5500 	sc->stat_MacControlFramesReceived =
5501 		stats->stat_MacControlFramesReceived;
5502 
5503 	sc->stat_XoffStateEntered =
5504 		stats->stat_XoffStateEntered;
5505 
5506 	sc->stat_IfInFramesL2FilterDiscards =
5507 		stats->stat_IfInFramesL2FilterDiscards;
5508 
5509 	sc->stat_IfInRuleCheckerDiscards =
5510 		stats->stat_IfInRuleCheckerDiscards;
5511 
5512 	sc->stat_IfInFTQDiscards =
5513 		stats->stat_IfInFTQDiscards;
5514 
5515 	sc->stat_IfInMBUFDiscards =
5516 		stats->stat_IfInMBUFDiscards;
5517 
5518 	sc->stat_IfInRuleCheckerP4Hit =
5519 		stats->stat_IfInRuleCheckerP4Hit;
5520 
5521 	sc->stat_CatchupInRuleCheckerDiscards =
5522 		stats->stat_CatchupInRuleCheckerDiscards;
5523 
5524 	sc->stat_CatchupInFTQDiscards =
5525 		stats->stat_CatchupInFTQDiscards;
5526 
5527 	sc->stat_CatchupInMBUFDiscards =
5528 		stats->stat_CatchupInMBUFDiscards;
5529 
5530 	sc->stat_CatchupInRuleCheckerP4Hit =
5531 		stats->stat_CatchupInRuleCheckerP4Hit;
5532 
5533 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5534 }
5535 
5536 
5537 static void
5538 bce_tick_locked(struct bce_softc *sc)
5539 {
5540 	struct mii_data *mii = NULL;
5541 	struct ifnet *ifp;
5542 	u32 msg;
5543 
5544 	ifp = sc->bce_ifp;
5545 
5546 	BCE_LOCK_ASSERT(sc);
5547 
5548 	/* Tell the firmware that the driver is still running. */
5549 #ifdef BCE_DEBUG
5550 	msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5551 #else
5552 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5553 #endif
5554 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5555 
5556 	/* Update the statistics from the hardware statistics block. */
5557 	bce_stats_update(sc);
5558 
5559 	/* Schedule the next tick. */
5560 	callout_reset(
5561 		&sc->bce_stat_ch,		/* callout */
5562 		hz, 					/* ticks */
5563 		bce_tick, 				/* function */
5564 		sc);					/* function argument */
5565 
5566 	/* If link is up already up then we're done. */
5567 	if (sc->bce_link)
5568 		goto bce_tick_locked_exit;
5569 
5570 	/* DRC - ToDo: Add SerDes support and check SerDes link here. */
5571 
5572 	mii = device_get_softc(sc->bce_miibus);
5573 	mii_tick(mii);
5574 
5575 	/* Check if the link has come up. */
5576 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5577 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5578 		sc->bce_link++;
5579 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5580 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5581 		    bootverbose)
5582 			BCE_PRINTF(sc, "Gigabit link up\n");
5583 		/* Now that link is up, handle any outstanding TX traffic. */
5584 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5585 			bce_start_locked(ifp);
5586 	}
5587 
5588 bce_tick_locked_exit:
5589 	return;
5590 }
5591 
5592 
5593 static void
5594 bce_tick(void *xsc)
5595 {
5596 	struct bce_softc *sc;
5597 
5598 	sc = xsc;
5599 
5600 	BCE_LOCK(sc);
5601 	bce_tick_locked(sc);
5602 	BCE_UNLOCK(sc);
5603 }
5604 
5605 
5606 #ifdef BCE_DEBUG
5607 /****************************************************************************/
5608 /* Allows the driver state to be dumped through the sysctl interface.       */
5609 /*                                                                          */
5610 /* Returns:                                                                 */
5611 /*   0 for success, positive value for failure.                             */
5612 /****************************************************************************/
5613 static int
5614 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5615 {
5616         int error;
5617         int result;
5618         struct bce_softc *sc;
5619 
5620         result = -1;
5621         error = sysctl_handle_int(oidp, &result, 0, req);
5622 
5623         if (error || !req->newptr)
5624                 return (error);
5625 
5626         if (result == 1) {
5627                 sc = (struct bce_softc *)arg1;
5628                 bce_dump_driver_state(sc);
5629         }
5630 
5631         return error;
5632 }
5633 
5634 
5635 /****************************************************************************/
5636 /* Allows the hardware state to be dumped through the sysctl interface.     */
5637 /*                                                                          */
5638 /* Returns:                                                                 */
5639 /*   0 for success, positive value for failure.                             */
5640 /****************************************************************************/
5641 static int
5642 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5643 {
5644         int error;
5645         int result;
5646         struct bce_softc *sc;
5647 
5648         result = -1;
5649         error = sysctl_handle_int(oidp, &result, 0, req);
5650 
5651         if (error || !req->newptr)
5652                 return (error);
5653 
5654         if (result == 1) {
5655                 sc = (struct bce_softc *)arg1;
5656                 bce_dump_hw_state(sc);
5657         }
5658 
5659         return error;
5660 }
5661 
5662 
5663 /****************************************************************************/
5664 /*                                                                          */
5665 /*                                                                          */
5666 /* Returns:                                                                 */
5667 /*   0 for success, positive value for failure.                             */
5668 /****************************************************************************/
5669 static int
5670 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5671 {
5672         int error;
5673         int result;
5674         struct bce_softc *sc;
5675 
5676         result = -1;
5677         error = sysctl_handle_int(oidp, &result, 0, req);
5678 
5679         if (error || !req->newptr)
5680                 return (error);
5681 
5682         if (result == 1) {
5683                 sc = (struct bce_softc *)arg1;
5684                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5685         }
5686 
5687         return error;
5688 }
5689 
5690 
5691 /****************************************************************************/
5692 /*                                                                          */
5693 /*                                                                          */
5694 /* Returns:                                                                 */
5695 /*   0 for success, positive value for failure.                             */
5696 /****************************************************************************/
5697 static int
5698 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5699 {
5700         int error;
5701         int result;
5702         struct bce_softc *sc;
5703 
5704         result = -1;
5705         error = sysctl_handle_int(oidp, &result, 0, req);
5706 
5707         if (error || !req->newptr)
5708                 return (error);
5709 
5710         if (result == 1) {
5711                 sc = (struct bce_softc *)arg1;
5712                 bce_breakpoint(sc);
5713         }
5714 
5715         return error;
5716 }
5717 #endif
5718 
5719 
5720 /****************************************************************************/
5721 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5722 /*                                                                          */
5723 /* Returns:                                                                 */
5724 /*   0 for success, positive value for failure.                             */
5725 /****************************************************************************/
5726 static void
5727 bce_add_sysctls(struct bce_softc *sc)
5728 {
5729 	struct sysctl_ctx_list *ctx;
5730 	struct sysctl_oid_list *children;
5731 
5732 	ctx = device_get_sysctl_ctx(sc->bce_dev);
5733 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5734 
5735 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5736 		"driver_version",
5737 		CTLFLAG_RD, &bce_driver_version,
5738 		0, "bce driver version");
5739 
5740 #ifdef BCE_DEBUG
5741 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5742 		"rx_low_watermark",
5743 		CTLFLAG_RD, &sc->rx_low_watermark,
5744 		0, "Lowest level of free rx_bd's");
5745 
5746 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5747 		"tx_hi_watermark",
5748 		CTLFLAG_RD, &sc->tx_hi_watermark,
5749 		0, "Highest level of used tx_bd's");
5750 
5751 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5752 		"l2fhdr_status_errors",
5753 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5754 		0, "l2_fhdr status errors");
5755 
5756 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5757 		"unexpected_attentions",
5758 		CTLFLAG_RD, &sc->unexpected_attentions,
5759 		0, "unexpected attentions");
5760 
5761 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5762 		"lost_status_block_updates",
5763 		CTLFLAG_RD, &sc->lost_status_block_updates,
5764 		0, "lost status block updates");
5765 
5766 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5767 		"mbuf_alloc_failed",
5768 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5769 		0, "mbuf cluster allocation failures");
5770 #endif
5771 
5772 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5773 		"stat_IfHcInOctets",
5774 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5775 		"Bytes received");
5776 
5777 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5778 		"stat_IfHCInBadOctets",
5779 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5780 		"Bad bytes received");
5781 
5782 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5783 		"stat_IfHCOutOctets",
5784 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5785 		"Bytes sent");
5786 
5787 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5788 		"stat_IfHCOutBadOctets",
5789 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5790 		"Bad bytes sent");
5791 
5792 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5793 		"stat_IfHCInUcastPkts",
5794 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5795 		"Unicast packets received");
5796 
5797 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5798 		"stat_IfHCInMulticastPkts",
5799 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5800 		"Multicast packets received");
5801 
5802 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5803 		"stat_IfHCInBroadcastPkts",
5804 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5805 		"Broadcast packets received");
5806 
5807 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5808 		"stat_IfHCOutUcastPkts",
5809 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5810 		"Unicast packets sent");
5811 
5812 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5813 		"stat_IfHCOutMulticastPkts",
5814 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5815 		"Multicast packets sent");
5816 
5817 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5818 		"stat_IfHCOutBroadcastPkts",
5819 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5820 		"Broadcast packets sent");
5821 
5822 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5823 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5824 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5825 		0, "Internal MAC transmit errors");
5826 
5827 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5828 		"stat_Dot3StatsCarrierSenseErrors",
5829 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5830 		0, "Carrier sense errors");
5831 
5832 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5833 		"stat_Dot3StatsFCSErrors",
5834 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5835 		0, "Frame check sequence errors");
5836 
5837 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5838 		"stat_Dot3StatsAlignmentErrors",
5839 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5840 		0, "Alignment errors");
5841 
5842 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5843 		"stat_Dot3StatsSingleCollisionFrames",
5844 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5845 		0, "Single Collision Frames");
5846 
5847 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5848 		"stat_Dot3StatsMultipleCollisionFrames",
5849 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5850 		0, "Multiple Collision Frames");
5851 
5852 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5853 		"stat_Dot3StatsDeferredTransmissions",
5854 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5855 		0, "Deferred Transmissions");
5856 
5857 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5858 		"stat_Dot3StatsExcessiveCollisions",
5859 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5860 		0, "Excessive Collisions");
5861 
5862 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5863 		"stat_Dot3StatsLateCollisions",
5864 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5865 		0, "Late Collisions");
5866 
5867 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5868 		"stat_EtherStatsCollisions",
5869 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5870 		0, "Collisions");
5871 
5872 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5873 		"stat_EtherStatsFragments",
5874 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5875 		0, "Fragments");
5876 
5877 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5878 		"stat_EtherStatsJabbers",
5879 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5880 		0, "Jabbers");
5881 
5882 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5883 		"stat_EtherStatsUndersizePkts",
5884 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5885 		0, "Undersize packets");
5886 
5887 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5888 		"stat_EtherStatsOverrsizePkts",
5889 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5890 		0, "stat_EtherStatsOverrsizePkts");
5891 
5892 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5893 		"stat_EtherStatsPktsRx64Octets",
5894 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5895 		0, "Bytes received in 64 byte packets");
5896 
5897 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5898 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5899 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5900 		0, "Bytes received in 65 to 127 byte packets");
5901 
5902 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5903 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5904 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5905 		0, "Bytes received in 128 to 255 byte packets");
5906 
5907 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5908 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5909 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5910 		0, "Bytes received in 256 to 511 byte packets");
5911 
5912 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5913 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5914 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5915 		0, "Bytes received in 512 to 1023 byte packets");
5916 
5917 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5918 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5919 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5920 		0, "Bytes received in 1024 t0 1522 byte packets");
5921 
5922 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5923 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5924 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5925 		0, "Bytes received in 1523 to 9022 byte packets");
5926 
5927 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5928 		"stat_EtherStatsPktsTx64Octets",
5929 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5930 		0, "Bytes sent in 64 byte packets");
5931 
5932 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5933 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5934 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5935 		0, "Bytes sent in 65 to 127 byte packets");
5936 
5937 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5938 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5939 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5940 		0, "Bytes sent in 128 to 255 byte packets");
5941 
5942 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5943 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5944 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5945 		0, "Bytes sent in 256 to 511 byte packets");
5946 
5947 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5948 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5949 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5950 		0, "Bytes sent in 512 to 1023 byte packets");
5951 
5952 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5953 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5954 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5955 		0, "Bytes sent in 1024 to 1522 byte packets");
5956 
5957 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5958 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5959 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5960 		0, "Bytes sent in 1523 to 9022 byte packets");
5961 
5962 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5963 		"stat_XonPauseFramesReceived",
5964 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5965 		0, "XON pause frames receved");
5966 
5967 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5968 		"stat_XoffPauseFramesReceived",
5969 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5970 		0, "XOFF pause frames received");
5971 
5972 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5973 		"stat_OutXonSent",
5974 		CTLFLAG_RD, &sc->stat_OutXonSent,
5975 		0, "XON pause frames sent");
5976 
5977 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5978 		"stat_OutXoffSent",
5979 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5980 		0, "XOFF pause frames sent");
5981 
5982 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5983 		"stat_FlowControlDone",
5984 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5985 		0, "Flow control done");
5986 
5987 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5988 		"stat_MacControlFramesReceived",
5989 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5990 		0, "MAC control frames received");
5991 
5992 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5993 		"stat_XoffStateEntered",
5994 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5995 		0, "XOFF state entered");
5996 
5997 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5998 		"stat_IfInFramesL2FilterDiscards",
5999 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6000 		0, "Received L2 packets discarded");
6001 
6002 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6003 		"stat_IfInRuleCheckerDiscards",
6004 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6005 		0, "Received packets discarded by rule");
6006 
6007 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6008 		"stat_IfInFTQDiscards",
6009 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6010 		0, "Received packet FTQ discards");
6011 
6012 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6013 		"stat_IfInMBUFDiscards",
6014 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6015 		0, "Received packets discarded due to lack of controller buffer memory");
6016 
6017 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6018 		"stat_IfInRuleCheckerP4Hit",
6019 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6020 		0, "Received packets rule checker hits");
6021 
6022 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6023 		"stat_CatchupInRuleCheckerDiscards",
6024 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6025 		0, "Received packets discarded in Catchup path");
6026 
6027 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6028 		"stat_CatchupInFTQDiscards",
6029 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6030 		0, "Received packets discarded in FTQ in Catchup path");
6031 
6032 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6033 		"stat_CatchupInMBUFDiscards",
6034 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6035 		0, "Received packets discarded in controller buffer memory in Catchup path");
6036 
6037 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6038 		"stat_CatchupInRuleCheckerP4Hit",
6039 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6040 		0, "Received packets rule checker hits in Catchup path");
6041 
6042 #ifdef BCE_DEBUG
6043 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6044 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6045 		(void *)sc, 0,
6046 		bce_sysctl_driver_state, "I", "Drive state information");
6047 
6048 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6049 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6050 		(void *)sc, 0,
6051 		bce_sysctl_hw_state, "I", "Hardware state information");
6052 
6053 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6054 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6055 		(void *)sc, 0,
6056 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6057 
6058 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6059 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6060 		(void *)sc, 0,
6061 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6062 #endif
6063 
6064 }
6065 
6066 
6067 /****************************************************************************/
6068 /* BCE Debug Routines                                                       */
6069 /****************************************************************************/
6070 #ifdef BCE_DEBUG
6071 
6072 /****************************************************************************/
6073 /* Prints out information about an mbuf.                                    */
6074 /*                                                                          */
6075 /* Returns:                                                                 */
6076 /*   Nothing.                                                               */
6077 /****************************************************************************/
6078 static void
6079 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6080 {
6081 	u32 val_hi, val_lo;
6082 	struct mbuf *mp = m;
6083 
6084 	if (m == NULL) {
6085 		/* Index out of range. */
6086 		printf("mbuf ptr is null!\n");
6087 		return;
6088 	}
6089 
6090 	while (mp) {
6091 		val_hi = BCE_ADDR_HI(mp);
6092 		val_lo = BCE_ADDR_LO(mp);
6093 		BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6094 			   val_hi, val_lo, mp->m_len);
6095 
6096 		if (mp->m_flags & M_EXT)
6097 			printf("M_EXT ");
6098 		if (mp->m_flags & M_PKTHDR)
6099 			printf("M_PKTHDR ");
6100 		printf("\n");
6101 
6102 		if (mp->m_flags & M_EXT) {
6103 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6104 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6105 			BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6106 				val_hi, val_lo, mp->m_ext.ext_size);
6107 		}
6108 
6109 		mp = mp->m_next;
6110 	}
6111 
6112 
6113 }
6114 
6115 
6116 /****************************************************************************/
6117 /* Prints out the mbufs in the TX mbuf chain.                               */
6118 /*                                                                          */
6119 /* Returns:                                                                 */
6120 /*   Nothing.                                                               */
6121 /****************************************************************************/
6122 static void
6123 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6124 {
6125 	struct mbuf *m;
6126 
6127 	BCE_PRINTF(sc,
6128 		"----------------------------"
6129 		"  tx mbuf data  "
6130 		"----------------------------\n");
6131 
6132 	for (int i = 0; i < count; i++) {
6133 	 	m = sc->tx_mbuf_ptr[chain_prod];
6134 		BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6135 		bce_dump_mbuf(sc, m);
6136 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6137 	}
6138 
6139 	BCE_PRINTF(sc,
6140 		"----------------------------"
6141 		"----------------"
6142 		"----------------------------\n");
6143 }
6144 
6145 
6146 /*
6147  * This routine prints the RX mbuf chain.
6148  */
6149 static void
6150 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6151 {
6152 	struct mbuf *m;
6153 
6154 	BCE_PRINTF(sc,
6155 		"----------------------------"
6156 		"  rx mbuf data  "
6157 		"----------------------------\n");
6158 
6159 	for (int i = 0; i < count; i++) {
6160 	 	m = sc->rx_mbuf_ptr[chain_prod];
6161 		BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6162 		bce_dump_mbuf(sc, m);
6163 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6164 	}
6165 
6166 
6167 	BCE_PRINTF(sc,
6168 		"----------------------------"
6169 		"----------------"
6170 		"----------------------------\n");
6171 }
6172 
6173 
6174 static void
6175 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6176 {
6177 	if (idx > MAX_TX_BD)
6178 		/* Index out of range. */
6179 		BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6180 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6181 		/* TX Chain page pointer. */
6182 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6183 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6184 	else
6185 		/* Normal tx_bd entry. */
6186 		BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6187 			"flags = 0x%08X\n", idx,
6188 			txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6189 			txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6190 }
6191 
6192 
6193 static void
6194 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6195 {
6196 	if (idx > MAX_RX_BD)
6197 		/* Index out of range. */
6198 		BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6199 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6200 		/* TX Chain page pointer. */
6201 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6202 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6203 	else
6204 		/* Normal tx_bd entry. */
6205 		BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6206 			"flags = 0x%08X\n", idx,
6207 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6208 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
6209 }
6210 
6211 
6212 static void
6213 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6214 {
6215 	BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6216 		"pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6217 		"tcp_udp_xsum = 0x%04X\n", idx,
6218 		l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6219 		l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6220 		l2fhdr->l2_fhdr_tcp_udp_xsum);
6221 }
6222 
6223 
6224 /*
6225  * This routine prints the TX chain.
6226  */
6227 static void
6228 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6229 {
6230 	struct tx_bd *txbd;
6231 
6232 	/* First some info about the tx_bd chain structure. */
6233 	BCE_PRINTF(sc,
6234 		"----------------------------"
6235 		"  tx_bd  chain  "
6236 		"----------------------------\n");
6237 
6238 	BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6239 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6240 
6241 	BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6242 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6243 
6244 	BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6245 
6246 	BCE_PRINTF(sc, ""
6247 		"-----------------------------"
6248 		"   tx_bd data   "
6249 		"-----------------------------\n");
6250 
6251 	/* Now print out the tx_bd's themselves. */
6252 	for (int i = 0; i < count; i++) {
6253 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6254 		bce_dump_txbd(sc, tx_prod, txbd);
6255 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6256 	}
6257 
6258 	BCE_PRINTF(sc,
6259 		"-----------------------------"
6260 		"--------------"
6261 		"-----------------------------\n");
6262 }
6263 
6264 
6265 /*
6266  * This routine prints the RX chain.
6267  */
6268 static void
6269 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6270 {
6271 	struct rx_bd *rxbd;
6272 
6273 	/* First some info about the tx_bd chain structure. */
6274 	BCE_PRINTF(sc,
6275 		"----------------------------"
6276 		"  rx_bd  chain  "
6277 		"----------------------------\n");
6278 
6279 	BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6280 
6281 	BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6282 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6283 
6284 	BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6285 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6286 
6287 	BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6288 
6289 	BCE_PRINTF(sc,
6290 		"----------------------------"
6291 		"   rx_bd data   "
6292 		"----------------------------\n");
6293 
6294 	/* Now print out the rx_bd's themselves. */
6295 	for (int i = 0; i < count; i++) {
6296 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6297 		bce_dump_rxbd(sc, rx_prod, rxbd);
6298 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6299 	}
6300 
6301 	BCE_PRINTF(sc,
6302 		"----------------------------"
6303 		"--------------"
6304 		"----------------------------\n");
6305 }
6306 
6307 
6308 /*
6309  * This routine prints the status block.
6310  */
6311 static void
6312 bce_dump_status_block(struct bce_softc *sc)
6313 {
6314 	struct status_block *sblk;
6315 
6316 	sblk = sc->status_block;
6317 
6318    	BCE_PRINTF(sc, "----------------------------- Status Block "
6319 		"-----------------------------\n");
6320 
6321 	BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6322 		sblk->status_attn_bits, sblk->status_attn_bits_ack,
6323 		sblk->status_idx);
6324 
6325 	BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6326 		sblk->status_rx_quick_consumer_index0,
6327 		sblk->status_tx_quick_consumer_index0);
6328 
6329 	BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6330 
6331 	/* Theses indices are not used for normal L2 drivers. */
6332 	if (sblk->status_rx_quick_consumer_index1 ||
6333 		sblk->status_tx_quick_consumer_index1)
6334 		BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6335 			sblk->status_rx_quick_consumer_index1,
6336 			sblk->status_tx_quick_consumer_index1);
6337 
6338 	if (sblk->status_rx_quick_consumer_index2 ||
6339 		sblk->status_tx_quick_consumer_index2)
6340 		BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6341 			sblk->status_rx_quick_consumer_index2,
6342 			sblk->status_tx_quick_consumer_index2);
6343 
6344 	if (sblk->status_rx_quick_consumer_index3 ||
6345 		sblk->status_tx_quick_consumer_index3)
6346 		BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6347 			sblk->status_rx_quick_consumer_index3,
6348 			sblk->status_tx_quick_consumer_index3);
6349 
6350 	if (sblk->status_rx_quick_consumer_index4 ||
6351 		sblk->status_rx_quick_consumer_index5)
6352 		BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6353 			sblk->status_rx_quick_consumer_index4,
6354 			sblk->status_rx_quick_consumer_index5);
6355 
6356 	if (sblk->status_rx_quick_consumer_index6 ||
6357 		sblk->status_rx_quick_consumer_index7)
6358 		BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6359 			sblk->status_rx_quick_consumer_index6,
6360 			sblk->status_rx_quick_consumer_index7);
6361 
6362 	if (sblk->status_rx_quick_consumer_index8 ||
6363 		sblk->status_rx_quick_consumer_index9)
6364 		BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6365 			sblk->status_rx_quick_consumer_index8,
6366 			sblk->status_rx_quick_consumer_index9);
6367 
6368 	if (sblk->status_rx_quick_consumer_index10 ||
6369 		sblk->status_rx_quick_consumer_index11)
6370 		BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6371 			sblk->status_rx_quick_consumer_index10,
6372 			sblk->status_rx_quick_consumer_index11);
6373 
6374 	if (sblk->status_rx_quick_consumer_index12 ||
6375 		sblk->status_rx_quick_consumer_index13)
6376 		BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6377 			sblk->status_rx_quick_consumer_index12,
6378 			sblk->status_rx_quick_consumer_index13);
6379 
6380 	if (sblk->status_rx_quick_consumer_index14 ||
6381 		sblk->status_rx_quick_consumer_index15)
6382 		BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6383 			sblk->status_rx_quick_consumer_index14,
6384 			sblk->status_rx_quick_consumer_index15);
6385 
6386 	if (sblk->status_completion_producer_index ||
6387 		sblk->status_cmd_consumer_index)
6388 		BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6389 			sblk->status_completion_producer_index,
6390 			sblk->status_cmd_consumer_index);
6391 
6392 	BCE_PRINTF(sc, "-------------------------------------------"
6393 		"-----------------------------\n");
6394 }
6395 
6396 
6397 /*
6398  * This routine prints the statistics block.
6399  */
6400 static void
6401 bce_dump_stats_block(struct bce_softc *sc)
6402 {
6403 	struct statistics_block *sblk;
6404 
6405 	sblk = sc->stats_block;
6406 
6407 	BCE_PRINTF(sc, ""
6408 		"-----------------------------"
6409 		" Stats  Block "
6410 		"-----------------------------\n");
6411 
6412 	BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6413 		"IfHcInBadOctets      = 0x%08X:%08X\n",
6414 		sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6415 		sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6416 
6417 	BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6418 		"IfHcOutBadOctets     = 0x%08X:%08X\n",
6419 		sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6420 		sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6421 
6422 	BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6423 		"IfHcInMulticastPkts  = 0x%08X:%08X\n",
6424 		sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6425 		sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6426 
6427 	BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6428 		"IfHcOutUcastPkts     = 0x%08X:%08X\n",
6429 		sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6430 		sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6431 
6432 	BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6433 		sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6434 		sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6435 
6436 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6437 		BCE_PRINTF(sc, "0x%08X : "
6438 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6439 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6440 
6441 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
6442 		BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6443 			sblk->stat_Dot3StatsCarrierSenseErrors);
6444 
6445 	if (sblk->stat_Dot3StatsFCSErrors)
6446 		BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6447 			sblk->stat_Dot3StatsFCSErrors);
6448 
6449 	if (sblk->stat_Dot3StatsAlignmentErrors)
6450 		BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6451 			sblk->stat_Dot3StatsAlignmentErrors);
6452 
6453 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6454 		BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6455 			sblk->stat_Dot3StatsSingleCollisionFrames);
6456 
6457 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6458 		BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6459 			sblk->stat_Dot3StatsMultipleCollisionFrames);
6460 
6461 	if (sblk->stat_Dot3StatsDeferredTransmissions)
6462 		BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6463 			sblk->stat_Dot3StatsDeferredTransmissions);
6464 
6465 	if (sblk->stat_Dot3StatsExcessiveCollisions)
6466 		BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6467 			sblk->stat_Dot3StatsExcessiveCollisions);
6468 
6469 	if (sblk->stat_Dot3StatsLateCollisions)
6470 		BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6471 			sblk->stat_Dot3StatsLateCollisions);
6472 
6473 	if (sblk->stat_EtherStatsCollisions)
6474 		BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6475 			sblk->stat_EtherStatsCollisions);
6476 
6477 	if (sblk->stat_EtherStatsFragments)
6478 		BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6479 			sblk->stat_EtherStatsFragments);
6480 
6481 	if (sblk->stat_EtherStatsJabbers)
6482 		BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6483 			sblk->stat_EtherStatsJabbers);
6484 
6485 	if (sblk->stat_EtherStatsUndersizePkts)
6486 		BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6487 			sblk->stat_EtherStatsUndersizePkts);
6488 
6489 	if (sblk->stat_EtherStatsOverrsizePkts)
6490 		BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6491 			sblk->stat_EtherStatsOverrsizePkts);
6492 
6493 	if (sblk->stat_EtherStatsPktsRx64Octets)
6494 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6495 			sblk->stat_EtherStatsPktsRx64Octets);
6496 
6497 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6498 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6499 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6500 
6501 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6502 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6503 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6504 
6505 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6506 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6507 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6508 
6509 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6510 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6511 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6512 
6513 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6514 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6515 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6516 
6517 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6518 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6519 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6520 
6521 	if (sblk->stat_EtherStatsPktsTx64Octets)
6522 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6523 			sblk->stat_EtherStatsPktsTx64Octets);
6524 
6525 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6526 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6527 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6528 
6529 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6530 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6531 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6532 
6533 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6534 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6535 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6536 
6537 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6538 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6539 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6540 
6541 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6542 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6543 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6544 
6545 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6546 		BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6547 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6548 
6549 	if (sblk->stat_XonPauseFramesReceived)
6550 		BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6551 			sblk->stat_XonPauseFramesReceived);
6552 
6553 	if (sblk->stat_XoffPauseFramesReceived)
6554 	   BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6555 			sblk->stat_XoffPauseFramesReceived);
6556 
6557 	if (sblk->stat_OutXonSent)
6558 		BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6559 			sblk->stat_OutXonSent);
6560 
6561 	if (sblk->stat_OutXoffSent)
6562 		BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6563 			sblk->stat_OutXoffSent);
6564 
6565 	if (sblk->stat_FlowControlDone)
6566 		BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6567 			sblk->stat_FlowControlDone);
6568 
6569 	if (sblk->stat_MacControlFramesReceived)
6570 		BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6571 			sblk->stat_MacControlFramesReceived);
6572 
6573 	if (sblk->stat_XoffStateEntered)
6574 		BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6575 			sblk->stat_XoffStateEntered);
6576 
6577 	if (sblk->stat_IfInFramesL2FilterDiscards)
6578 		BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6579 			sblk->stat_IfInFramesL2FilterDiscards);
6580 
6581 	if (sblk->stat_IfInRuleCheckerDiscards)
6582 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6583 			sblk->stat_IfInRuleCheckerDiscards);
6584 
6585 	if (sblk->stat_IfInFTQDiscards)
6586 		BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6587 			sblk->stat_IfInFTQDiscards);
6588 
6589 	if (sblk->stat_IfInMBUFDiscards)
6590 		BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6591 			sblk->stat_IfInMBUFDiscards);
6592 
6593 	if (sblk->stat_IfInRuleCheckerP4Hit)
6594 		BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6595 			sblk->stat_IfInRuleCheckerP4Hit);
6596 
6597 	if (sblk->stat_CatchupInRuleCheckerDiscards)
6598 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6599 			sblk->stat_CatchupInRuleCheckerDiscards);
6600 
6601 	if (sblk->stat_CatchupInFTQDiscards)
6602 		BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6603 			sblk->stat_CatchupInFTQDiscards);
6604 
6605 	if (sblk->stat_CatchupInMBUFDiscards)
6606 		BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6607 			sblk->stat_CatchupInMBUFDiscards);
6608 
6609 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6610 		BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6611 			sblk->stat_CatchupInRuleCheckerP4Hit);
6612 
6613 	BCE_PRINTF(sc,
6614 		"-----------------------------"
6615 		"--------------"
6616 		"-----------------------------\n");
6617 }
6618 
6619 
6620 static void
6621 bce_dump_driver_state(struct bce_softc *sc)
6622 {
6623 	u32 val_hi, val_lo;
6624 
6625 	BCE_PRINTF(sc,
6626 		"-----------------------------"
6627 		" Driver State "
6628 		"-----------------------------\n");
6629 
6630 	val_hi = BCE_ADDR_HI(sc);
6631 	val_lo = BCE_ADDR_LO(sc);
6632 	BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6633 		val_hi, val_lo);
6634 
6635 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6636 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6637 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6638 		val_hi, val_lo);
6639 
6640 	val_hi = BCE_ADDR_HI(sc->status_block);
6641 	val_lo = BCE_ADDR_LO(sc->status_block);
6642 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6643 		val_hi, val_lo);
6644 
6645 	val_hi = BCE_ADDR_HI(sc->stats_block);
6646 	val_lo = BCE_ADDR_LO(sc->stats_block);
6647 	BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6648 		val_hi, val_lo);
6649 
6650 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6651 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6652 	BCE_PRINTF(sc,
6653 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6654 		val_hi, val_lo);
6655 
6656 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6657 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6658 	BCE_PRINTF(sc,
6659 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6660 		val_hi, val_lo);
6661 
6662 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6663 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6664 	BCE_PRINTF(sc,
6665 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6666 		val_hi, val_lo);
6667 
6668 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6669 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6670 	BCE_PRINTF(sc,
6671 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6672 		val_hi, val_lo);
6673 
6674 	BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6675 		sc->interrupts_generated);
6676 
6677 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6678 		sc->rx_interrupts);
6679 
6680 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6681 		sc->tx_interrupts);
6682 
6683 	BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6684 		sc->last_status_idx);
6685 
6686 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6687 		sc->tx_prod);
6688 
6689 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6690 		sc->tx_cons);
6691 
6692 	BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6693 		sc->tx_prod_bseq);
6694 
6695 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6696 		sc->rx_prod);
6697 
6698 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6699 		sc->rx_cons);
6700 
6701 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6702 		sc->rx_prod_bseq);
6703 
6704 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6705 		sc->rx_mbuf_alloc);
6706 
6707 	BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6708 		sc->free_rx_bd);
6709 
6710 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6711 		sc->rx_low_watermark, (u32) USABLE_RX_BD);
6712 
6713 	BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6714 		sc->tx_mbuf_alloc);
6715 
6716 	BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6717 		sc->rx_mbuf_alloc);
6718 
6719 	BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6720 		sc->used_tx_bd);
6721 
6722 	BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6723 		sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6724 
6725 	BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6726 		sc->mbuf_alloc_failed);
6727 
6728 	BCE_PRINTF(sc,
6729 		"-----------------------------"
6730 		"--------------"
6731 		"-----------------------------\n");
6732 }
6733 
6734 
6735 static void
6736 bce_dump_hw_state(struct bce_softc *sc)
6737 {
6738 	u32 val1;
6739 
6740 	BCE_PRINTF(sc,
6741 		"----------------------------"
6742 		" Hardware State "
6743 		"----------------------------\n");
6744 
6745 	BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6746 
6747 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6748 	BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6749 		val1, BCE_MISC_ENABLE_STATUS_BITS);
6750 
6751 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6752 	BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6753 
6754 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6755 	BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6756 
6757 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6758 	BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6759 
6760 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6761 	BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6762 
6763 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6764 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6765 
6766 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6767 	BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6768 
6769 	val1 = REG_RD(sc, BCE_HC_STATUS);
6770 	BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6771 
6772 	BCE_PRINTF(sc,
6773 		"----------------------------"
6774 		"----------------"
6775 		"----------------------------\n");
6776 
6777 	BCE_PRINTF(sc,
6778 		"----------------------------"
6779 		" Register  Dump "
6780 		"----------------------------\n");
6781 
6782 	for (int i = 0x400; i < 0x8000; i += 0x10)
6783 		BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6784 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6785 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6786 
6787 	BCE_PRINTF(sc,
6788 		"----------------------------"
6789 		"----------------"
6790 		"----------------------------\n");
6791 }
6792 
6793 
6794 static void
6795 bce_breakpoint(struct bce_softc *sc)
6796 {
6797 
6798 	/* Unreachable code to shut the compiler up about unused functions. */
6799 	if (0) {
6800    		bce_dump_txbd(sc, 0, NULL);
6801 		bce_dump_rxbd(sc, 0, NULL);
6802 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6803 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6804 		bce_dump_l2fhdr(sc, 0, NULL);
6805 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6806 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6807 		bce_dump_status_block(sc);
6808 		bce_dump_stats_block(sc);
6809 		bce_dump_driver_state(sc);
6810 		bce_dump_hw_state(sc);
6811 	}
6812 
6813 	bce_dump_driver_state(sc);
6814 	/* Print the important status block fields. */
6815 	bce_dump_status_block(sc);
6816 
6817 	/* Call the debugger. */
6818 	breakpoint();
6819 
6820 	return;
6821 }
6822 #endif
6823