1 /*- 2 * Copyright (c) 2006-2010 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * The following controllers are supported by this driver: 36 * BCM5706C A2, A3 37 * BCM5706S A2, A3 38 * BCM5708C B1, B2 39 * BCM5708S B1, B2 40 * BCM5709C A1, C0 41 * BCM5709S A1, C0 42 * BCM5716C C0 43 * BCM5716S C0 44 * 45 * The following controllers are not supported by this driver: 46 * BCM5706C A0, A1 (pre-production) 47 * BCM5706S A0, A1 (pre-production) 48 * BCM5708C A0, B0 (pre-production) 49 * BCM5708S A0, B0 (pre-production) 50 * BCM5709C A0 B0, B1, B2 (pre-production) 51 * BCM5709S A0, B0, B1, B2 (pre-production) 52 */ 53 54 #include "opt_bce.h" 55 56 #include <dev/bce/if_bcereg.h> 57 #include <dev/bce/if_bcefw.h> 58 59 /****************************************************************************/ 60 /* BCE Debug Options */ 61 /****************************************************************************/ 62 #ifdef BCE_DEBUG 63 u32 bce_debug = BCE_WARN; 64 65 /* 0 = Never */ 66 /* 1 = 1 in 2,147,483,648 */ 67 /* 256 = 1 in 8,388,608 */ 68 /* 2048 = 1 in 1,048,576 */ 69 /* 65536 = 1 in 32,768 */ 70 /* 1048576 = 1 in 2,048 */ 71 /* 268435456 = 1 in 8 */ 72 /* 536870912 = 1 in 4 */ 73 /* 1073741824 = 1 in 2 */ 74 75 /* Controls how often the l2_fhdr frame error check will fail. */ 76 int l2fhdr_error_sim_control = 0; 77 78 /* Controls how often the unexpected attention check will fail. */ 79 int unexpected_attention_sim_control = 0; 80 81 /* Controls how often to simulate an mbuf allocation failure. */ 82 int mbuf_alloc_failed_sim_control = 0; 83 84 /* Controls how often to simulate a DMA mapping failure. */ 85 int dma_map_addr_failed_sim_control = 0; 86 87 /* Controls how often to simulate a bootcode failure. */ 88 int bootcode_running_failure_sim_control = 0; 89 #endif 90 91 /****************************************************************************/ 92 /* BCE Build Time Options */ 93 /****************************************************************************/ 94 /* #define BCE_NVRAM_WRITE_SUPPORT 1 */ 95 96 97 /****************************************************************************/ 98 /* PCI Device ID Table */ 99 /* */ 100 /* Used by bce_probe() to identify the devices supported by this driver. */ 101 /****************************************************************************/ 102 #define BCE_DEVDESC_MAX 64 103 104 static struct bce_type bce_devs[] = { 105 /* BCM5706C Controllers and OEM boards. */ 106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 107 "HP NC370T Multifunction Gigabit Server Adapter" }, 108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 109 "HP NC370i Multifunction Gigabit Server Adapter" }, 110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 113 "HP NC371i Multifunction Gigabit Server Adapter" }, 114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 115 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 116 117 /* BCM5706S controllers and OEM boards. */ 118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 119 "HP NC370F Multifunction Gigabit Server Adapter" }, 120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 121 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 122 123 /* BCM5708C controllers and OEM boards. */ 124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 125 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 127 "HP NC373i Multifunction Gigabit Server Adapter" }, 128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 129 "HP NC374m PCIe Multifunction Adapter" }, 130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 131 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 132 133 /* BCM5708S controllers and OEM boards. */ 134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 135 "HP NC373m Multifunction Gigabit Server Adapter" }, 136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 137 "HP NC373i Multifunction Gigabit Server Adapter" }, 138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 139 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 141 "Broadcom NetXtreme II BCM5708 1000Base-SX" }, 142 143 /* BCM5709C controllers and OEM boards. */ 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 145 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 149 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 150 151 /* BCM5709S controllers and OEM boards. */ 152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 155 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 157 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 158 159 /* BCM5716 controllers and OEM boards. */ 160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 161 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 162 163 { 0, 0, 0, 0, NULL } 164 }; 165 166 167 /****************************************************************************/ 168 /* Supported Flash NVRAM device data. */ 169 /****************************************************************************/ 170 static struct flash_spec flash_table[] = 171 { 172 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 173 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 174 175 /* Slow EEPROM */ 176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 179 "EEPROM - slow"}, 180 /* Expansion entry 0001 */ 181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 184 "Entry 0001"}, 185 /* Saifun SA25F010 (non-buffered flash) */ 186 /* strap, cfg1, & write1 need updates */ 187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 190 "Non-buffered flash (128kB)"}, 191 /* Saifun SA25F020 (non-buffered flash) */ 192 /* strap, cfg1, & write1 need updates */ 193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 196 "Non-buffered flash (256kB)"}, 197 /* Expansion entry 0100 */ 198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 201 "Entry 0100"}, 202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 212 /* Saifun SA25F005 (non-buffered flash) */ 213 /* strap, cfg1, & write1 need updates */ 214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 217 "Non-buffered flash (64kB)"}, 218 /* Fast EEPROM */ 219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 222 "EEPROM - fast"}, 223 /* Expansion entry 1001 */ 224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 227 "Entry 1001"}, 228 /* Expansion entry 1010 */ 229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 232 "Entry 1010"}, 233 /* ATMEL AT45DB011B (buffered flash) */ 234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 237 "Buffered flash (128kB)"}, 238 /* Expansion entry 1100 */ 239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 242 "Entry 1100"}, 243 /* Expansion entry 1101 */ 244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 247 "Entry 1101"}, 248 /* Ateml Expansion entry 1110 */ 249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 252 "Entry 1110 (Atmel)"}, 253 /* ATMEL AT45DB021B (buffered flash) */ 254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 257 "Buffered flash (256kB)"}, 258 }; 259 260 /* 261 * The BCM5709 controllers transparently handle the 262 * differences between Atmel 264 byte pages and all 263 * flash devices which use 256 byte pages, so no 264 * logical-to-physical mapping is required in the 265 * driver. 266 */ 267 static struct flash_spec flash_5709 = { 268 .flags = BCE_NV_BUFFERED, 269 .page_bits = BCM5709_FLASH_PAGE_BITS, 270 .page_size = BCM5709_FLASH_PAGE_SIZE, 271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 273 .name = "5709/5716 buffered flash (256kB)", 274 }; 275 276 277 /****************************************************************************/ 278 /* FreeBSD device entry points. */ 279 /****************************************************************************/ 280 static int bce_probe (device_t); 281 static int bce_attach (device_t); 282 static int bce_detach (device_t); 283 static int bce_shutdown (device_t); 284 285 286 /****************************************************************************/ 287 /* BCE Debug Data Structure Dump Routines */ 288 /****************************************************************************/ 289 #ifdef BCE_DEBUG 290 static u32 bce_reg_rd (struct bce_softc *, u32); 291 static void bce_reg_wr (struct bce_softc *, u32, u32); 292 static void bce_reg_wr16 (struct bce_softc *, u32, u16); 293 static u32 bce_ctx_rd (struct bce_softc *, u32, u32); 294 static void bce_dump_enet (struct bce_softc *, struct mbuf *); 295 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); 296 static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); 297 static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); 298 #ifdef BCE_JUMBO_HDRSPLIT 299 static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); 300 #endif 301 static void bce_dump_txbd (struct bce_softc *, 302 int, struct tx_bd *); 303 static void bce_dump_rxbd (struct bce_softc *, 304 int, struct rx_bd *); 305 #ifdef BCE_JUMBO_HDRSPLIT 306 static void bce_dump_pgbd (struct bce_softc *, 307 int, struct rx_bd *); 308 #endif 309 static void bce_dump_l2fhdr (struct bce_softc *, 310 int, struct l2_fhdr *); 311 static void bce_dump_ctx (struct bce_softc *, u16); 312 static void bce_dump_ftqs (struct bce_softc *); 313 static void bce_dump_tx_chain (struct bce_softc *, u16, int); 314 static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); 315 #ifdef BCE_JUMBO_HDRSPLIT 316 static void bce_dump_pg_chain (struct bce_softc *, u16, int); 317 #endif 318 static void bce_dump_status_block (struct bce_softc *); 319 static void bce_dump_stats_block (struct bce_softc *); 320 static void bce_dump_driver_state (struct bce_softc *); 321 static void bce_dump_hw_state (struct bce_softc *); 322 static void bce_dump_mq_regs (struct bce_softc *); 323 static void bce_dump_bc_state (struct bce_softc *); 324 static void bce_dump_txp_state (struct bce_softc *, int); 325 static void bce_dump_rxp_state (struct bce_softc *, int); 326 static void bce_dump_tpat_state (struct bce_softc *, int); 327 static void bce_dump_cp_state (struct bce_softc *, int); 328 static void bce_dump_com_state (struct bce_softc *, int); 329 static void bce_dump_rv2p_state (struct bce_softc *); 330 static void bce_breakpoint (struct bce_softc *); 331 #endif 332 333 334 /****************************************************************************/ 335 /* BCE Register/Memory Access Routines */ 336 /****************************************************************************/ 337 static u32 bce_reg_rd_ind (struct bce_softc *, u32); 338 static void bce_reg_wr_ind (struct bce_softc *, u32, u32); 339 static void bce_shmem_wr (struct bce_softc *, u32, u32); 340 static u32 bce_shmem_rd (struct bce_softc *, u32); 341 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); 342 static int bce_miibus_read_reg (device_t, int, int); 343 static int bce_miibus_write_reg (device_t, int, int, int); 344 static void bce_miibus_statchg (device_t); 345 346 #ifdef BCE_DEBUG 347 static int sysctl_nvram_dump(SYSCTL_HANDLER_ARGS); 348 #ifdef BCE_NVRAM_WRITE_SUPPORT 349 static int sysctl_nvram_write(SYSCTL_HANDLER_ARGS); 350 #endif 351 #endif 352 353 /****************************************************************************/ 354 /* BCE NVRAM Access Routines */ 355 /****************************************************************************/ 356 static int bce_acquire_nvram_lock (struct bce_softc *); 357 static int bce_release_nvram_lock (struct bce_softc *); 358 static void bce_enable_nvram_access (struct bce_softc *); 359 static void bce_disable_nvram_access (struct bce_softc *); 360 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); 361 static int bce_init_nvram (struct bce_softc *); 362 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); 363 static int bce_nvram_test (struct bce_softc *); 364 #ifdef BCE_NVRAM_WRITE_SUPPORT 365 static int bce_enable_nvram_write (struct bce_softc *); 366 static void bce_disable_nvram_write (struct bce_softc *); 367 static int bce_nvram_erase_page (struct bce_softc *, u32); 368 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); 369 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); 370 #endif 371 372 /****************************************************************************/ 373 /* */ 374 /****************************************************************************/ 375 static void bce_get_media (struct bce_softc *); 376 static void bce_init_media (struct bce_softc *); 377 static void bce_dma_map_addr (void *, 378 bus_dma_segment_t *, int, int); 379 static int bce_dma_alloc (device_t); 380 static void bce_dma_free (struct bce_softc *); 381 static void bce_release_resources (struct bce_softc *); 382 383 /****************************************************************************/ 384 /* BCE Firmware Synchronization and Load */ 385 /****************************************************************************/ 386 static int bce_fw_sync (struct bce_softc *, u32); 387 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32); 388 static void bce_load_cpu_fw (struct bce_softc *, 389 struct cpu_reg *, struct fw_info *); 390 static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); 391 static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); 392 static void bce_start_rxp_cpu (struct bce_softc *); 393 static void bce_init_rxp_cpu (struct bce_softc *); 394 static void bce_init_txp_cpu (struct bce_softc *); 395 static void bce_init_tpat_cpu (struct bce_softc *); 396 static void bce_init_cp_cpu (struct bce_softc *); 397 static void bce_init_com_cpu (struct bce_softc *); 398 static void bce_init_cpus (struct bce_softc *); 399 400 static void bce_print_adapter_info (struct bce_softc *); 401 static void bce_probe_pci_caps (device_t, struct bce_softc *); 402 static void bce_stop (struct bce_softc *); 403 static int bce_reset (struct bce_softc *, u32); 404 static int bce_chipinit (struct bce_softc *); 405 static int bce_blockinit (struct bce_softc *); 406 407 static int bce_init_tx_chain (struct bce_softc *); 408 static void bce_free_tx_chain (struct bce_softc *); 409 410 static int bce_get_rx_buf (struct bce_softc *, 411 struct mbuf *, u16 *, u16 *, u32 *); 412 static int bce_init_rx_chain (struct bce_softc *); 413 static void bce_fill_rx_chain (struct bce_softc *); 414 static void bce_free_rx_chain (struct bce_softc *); 415 416 #ifdef BCE_JUMBO_HDRSPLIT 417 static int bce_get_pg_buf (struct bce_softc *, 418 struct mbuf *, u16 *, u16 *); 419 static int bce_init_pg_chain (struct bce_softc *); 420 static void bce_fill_pg_chain (struct bce_softc *); 421 static void bce_free_pg_chain (struct bce_softc *); 422 #endif 423 424 static struct mbuf *bce_tso_setup (struct bce_softc *, 425 struct mbuf **, u16 *); 426 static int bce_tx_encap (struct bce_softc *, struct mbuf **); 427 static void bce_start_locked (struct ifnet *); 428 static void bce_start (struct ifnet *); 429 static int bce_ioctl (struct ifnet *, u_long, caddr_t); 430 static void bce_watchdog (struct bce_softc *); 431 static int bce_ifmedia_upd (struct ifnet *); 432 static int bce_ifmedia_upd_locked (struct ifnet *); 433 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); 434 static void bce_init_locked (struct bce_softc *); 435 static void bce_init (void *); 436 static void bce_mgmt_init_locked (struct bce_softc *sc); 437 438 static int bce_init_ctx (struct bce_softc *); 439 static void bce_get_mac_addr (struct bce_softc *); 440 static void bce_set_mac_addr (struct bce_softc *); 441 static void bce_phy_intr (struct bce_softc *); 442 static inline u16 bce_get_hw_rx_cons (struct bce_softc *); 443 static void bce_rx_intr (struct bce_softc *); 444 static void bce_tx_intr (struct bce_softc *); 445 static void bce_disable_intr (struct bce_softc *); 446 static void bce_enable_intr (struct bce_softc *, int); 447 448 static void bce_intr (void *); 449 static void bce_set_rx_mode (struct bce_softc *); 450 static void bce_stats_update (struct bce_softc *); 451 static void bce_tick (void *); 452 static void bce_pulse (void *); 453 static void bce_add_sysctls (struct bce_softc *); 454 455 456 /****************************************************************************/ 457 /* FreeBSD device dispatch table. */ 458 /****************************************************************************/ 459 static device_method_t bce_methods[] = { 460 /* Device interface (device_if.h) */ 461 DEVMETHOD(device_probe, bce_probe), 462 DEVMETHOD(device_attach, bce_attach), 463 DEVMETHOD(device_detach, bce_detach), 464 DEVMETHOD(device_shutdown, bce_shutdown), 465 /* Supported by device interface but not used here. */ 466 /* DEVMETHOD(device_identify, bce_identify), */ 467 /* DEVMETHOD(device_suspend, bce_suspend), */ 468 /* DEVMETHOD(device_resume, bce_resume), */ 469 /* DEVMETHOD(device_quiesce, bce_quiesce), */ 470 471 /* Bus interface (bus_if.h) */ 472 DEVMETHOD(bus_print_child, bus_generic_print_child), 473 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 474 475 /* MII interface (miibus_if.h) */ 476 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 477 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 478 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 479 /* Supported by MII interface but not used here. */ 480 /* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ 481 /* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ 482 483 { 0, 0 } 484 }; 485 486 static driver_t bce_driver = { 487 "bce", 488 bce_methods, 489 sizeof(struct bce_softc) 490 }; 491 492 static devclass_t bce_devclass; 493 494 MODULE_DEPEND(bce, pci, 1, 1, 1); 495 MODULE_DEPEND(bce, ether, 1, 1, 1); 496 MODULE_DEPEND(bce, miibus, 1, 1, 1); 497 498 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0); 499 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0); 500 501 502 /****************************************************************************/ 503 /* Tunable device values */ 504 /****************************************************************************/ 505 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); 506 507 /* Allowable values are TRUE or FALSE */ 508 static int bce_tso_enable = TRUE; 509 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable); 510 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, 511 "TSO Enable/Disable"); 512 513 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 514 /* ToDo: Add MSI-X support. */ 515 static int bce_msi_enable = 1; 516 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable); 517 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, 518 "MSI-X|MSI|INTx selector"); 519 520 /* ToDo: Add tunable to enable/disable strict MTU handling. */ 521 /* Currently allows "loose" RX MTU checking (i.e. sets the */ 522 /* H/W RX MTU to the size of the largest receive buffer, or */ 523 /* 2048 bytes). This will cause a UNH failure but is more */ 524 /* desireable from a functional perspective. */ 525 526 527 /****************************************************************************/ 528 /* Device probe function. */ 529 /* */ 530 /* Compares the device to the driver's list of supported devices and */ 531 /* reports back to the OS whether this is the right driver for the device. */ 532 /* */ 533 /* Returns: */ 534 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 535 /****************************************************************************/ 536 static int 537 bce_probe(device_t dev) 538 { 539 struct bce_type *t; 540 struct bce_softc *sc; 541 char *descbuf; 542 u16 vid = 0, did = 0, svid = 0, sdid = 0; 543 544 t = bce_devs; 545 546 sc = device_get_softc(dev); 547 bzero(sc, sizeof(struct bce_softc)); 548 sc->bce_unit = device_get_unit(dev); 549 sc->bce_dev = dev; 550 551 /* Get the data for the device to be probed. */ 552 vid = pci_get_vendor(dev); 553 did = pci_get_device(dev); 554 svid = pci_get_subvendor(dev); 555 sdid = pci_get_subdevice(dev); 556 557 DBPRINT(sc, BCE_EXTREME_LOAD, 558 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 559 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 560 561 /* Look through the list of known devices for a match. */ 562 while(t->bce_name != NULL) { 563 564 if ((vid == t->bce_vid) && (did == t->bce_did) && 565 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && 566 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { 567 568 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 569 570 if (descbuf == NULL) 571 return(ENOMEM); 572 573 /* Print out the device identity. */ 574 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 575 t->bce_name, (((pci_read_config(dev, 576 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 577 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 578 579 device_set_desc_copy(dev, descbuf); 580 free(descbuf, M_TEMP); 581 return(BUS_PROBE_DEFAULT); 582 } 583 t++; 584 } 585 586 return(ENXIO); 587 } 588 589 590 /****************************************************************************/ 591 /* PCI Capabilities Probe Function. */ 592 /* */ 593 /* Walks the PCI capabiites list for the device to find what features are */ 594 /* supported. */ 595 /* */ 596 /* Returns: */ 597 /* None. */ 598 /****************************************************************************/ 599 static void 600 bce_print_adapter_info(struct bce_softc *sc) 601 { 602 int i = 0; 603 604 DBENTER(BCE_VERBOSE_LOAD); 605 606 if (bootverbose) { 607 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); 608 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 609 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 610 611 612 /* Bus info. */ 613 if (sc->bce_flags & BCE_PCIE_FLAG) { 614 printf("Bus (PCIe x%d, ", sc->link_width); 615 switch (sc->link_speed) { 616 case 1: printf("2.5Gbps); "); break; 617 case 2: printf("5Gbps); "); break; 618 default: printf("Unknown link speed); "); 619 } 620 } else { 621 printf("Bus (PCI%s, %s, %dMHz); ", 622 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 623 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? 624 "32-bit" : "64-bit"), sc->bus_speed_mhz); 625 } 626 627 /* Firmware version and device features. */ 628 printf("B/C (%s); Flags (", sc->bce_bc_ver); 629 630 #ifdef BCE_JUMBO_HDRSPLIT 631 printf("SPLT"); 632 i++; 633 #endif 634 635 if (sc->bce_flags & BCE_USING_MSI_FLAG) { 636 if (i > 0) printf("|"); 637 printf("MSI"); i++; 638 } 639 640 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 641 if (i > 0) printf("|"); 642 printf("MSI-X"); i++; 643 } 644 645 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { 646 if (i > 0) printf("|"); 647 printf("2.5G"); i++; 648 } 649 650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 651 if (i > 0) printf("|"); 652 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); 653 } else { 654 printf(")\n"); 655 } 656 } 657 658 DBEXIT(BCE_VERBOSE_LOAD); 659 } 660 661 662 /****************************************************************************/ 663 /* PCI Capabilities Probe Function. */ 664 /* */ 665 /* Walks the PCI capabiites list for the device to find what features are */ 666 /* supported. */ 667 /* */ 668 /* Returns: */ 669 /* None. */ 670 /****************************************************************************/ 671 static void 672 bce_probe_pci_caps(device_t dev, struct bce_softc *sc) 673 { 674 u32 reg; 675 676 DBENTER(BCE_VERBOSE_LOAD); 677 678 /* Check if PCI-X capability is enabled. */ 679 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 680 if (reg != 0) 681 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 682 } 683 684 /* Check if PCIe capability is enabled. */ 685 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 686 if (reg != 0) { 687 u16 link_status = pci_read_config(dev, reg + 0x12, 2); 688 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " 689 "0x%08X\n", link_status); 690 sc->link_speed = link_status & 0xf; 691 sc->link_width = (link_status >> 4) & 0x3f; 692 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 693 sc->bce_flags |= BCE_PCIE_FLAG; 694 } 695 } 696 697 /* Check if MSI capability is enabled. */ 698 if (pci_find_extcap(dev, PCIY_MSI, ®) == 0) { 699 if (reg != 0) 700 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; 701 } 702 703 /* Check if MSI-X capability is enabled. */ 704 if (pci_find_extcap(dev, PCIY_MSIX, ®) == 0) { 705 if (reg != 0) 706 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; 707 } 708 709 DBEXIT(BCE_VERBOSE_LOAD); 710 } 711 712 713 /****************************************************************************/ 714 /* Device attach function. */ 715 /* */ 716 /* Allocates device resources, performs secondary chip identification, */ 717 /* resets and initializes the hardware, and initializes driver instance */ 718 /* variables. */ 719 /* */ 720 /* Returns: */ 721 /* 0 on success, positive value on failure. */ 722 /****************************************************************************/ 723 static int 724 bce_attach(device_t dev) 725 { 726 struct bce_softc *sc; 727 struct ifnet *ifp; 728 u32 val; 729 int error, rid, rc = 0; 730 731 sc = device_get_softc(dev); 732 sc->bce_dev = dev; 733 734 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 735 736 sc->bce_unit = device_get_unit(dev); 737 738 /* Set initial device and PHY flags */ 739 sc->bce_flags = 0; 740 sc->bce_phy_flags = 0; 741 742 pci_enable_busmaster(dev); 743 744 /* Allocate PCI memory resources. */ 745 rid = PCIR_BAR(0); 746 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 747 &rid, RF_ACTIVE); 748 749 if (sc->bce_res_mem == NULL) { 750 BCE_PRINTF("%s(%d): PCI memory allocation failed\n", 751 __FILE__, __LINE__); 752 rc = ENXIO; 753 goto bce_attach_fail; 754 } 755 756 /* Get various resource handles. */ 757 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 758 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 759 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); 760 761 bce_probe_pci_caps(dev, sc); 762 763 rid = 1; 764 #if 0 765 /* Try allocating MSI-X interrupts. */ 766 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && 767 (bce_msi_enable >= 2) && 768 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 769 &rid, RF_ACTIVE)) != NULL)) { 770 771 msi_needed = sc->bce_msi_count = 1; 772 773 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) || 774 (sc->bce_msi_count != msi_needed)) { 775 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," 776 "Received = %d, error = %d\n", __FILE__, __LINE__, 777 msi_needed, sc->bce_msi_count, error); 778 sc->bce_msi_count = 0; 779 pci_release_msi(dev); 780 bus_release_resource(dev, SYS_RES_MEMORY, rid, 781 sc->bce_res_irq); 782 sc->bce_res_irq = NULL; 783 } else { 784 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", 785 __FUNCTION__); 786 sc->bce_flags |= BCE_USING_MSIX_FLAG; 787 sc->bce_intr = bce_intr; 788 } 789 } 790 #endif 791 792 /* Try allocating a MSI interrupt. */ 793 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && 794 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) { 795 sc->bce_msi_count = 1; 796 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) { 797 BCE_PRINTF("%s(%d): MSI allocation failed! " 798 "error = %d\n", __FILE__, __LINE__, error); 799 sc->bce_msi_count = 0; 800 pci_release_msi(dev); 801 } else { 802 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " 803 "interrupt.\n", __FUNCTION__); 804 sc->bce_flags |= BCE_USING_MSI_FLAG; 805 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 806 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 807 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; 808 sc->bce_irq_rid = 1; 809 sc->bce_intr = bce_intr; 810 } 811 } 812 813 /* Try allocating a legacy interrupt. */ 814 if (sc->bce_msi_count == 0) { 815 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", 816 __FUNCTION__); 817 rid = 0; 818 sc->bce_intr = bce_intr; 819 } 820 821 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 822 &rid, RF_SHAREABLE | RF_ACTIVE); 823 824 sc->bce_irq_rid = rid; 825 826 /* Report any IRQ allocation errors. */ 827 if (sc->bce_res_irq == NULL) { 828 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", 829 __FILE__, __LINE__); 830 rc = ENXIO; 831 goto bce_attach_fail; 832 } 833 834 /* Initialize mutex for the current device instance. */ 835 BCE_LOCK_INIT(sc, device_get_nameunit(dev)); 836 837 /* 838 * Configure byte swap and enable indirect register access. 839 * Rely on CPU to do target byte swapping on big endian systems. 840 * Access to registers outside of PCI configurtion space are not 841 * valid until this is done. 842 */ 843 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 844 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 845 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 846 847 /* Save ASIC revsion info. */ 848 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 849 850 /* Weed out any non-production controller revisions. */ 851 switch(BCE_CHIP_ID(sc)) { 852 case BCE_CHIP_ID_5706_A0: 853 case BCE_CHIP_ID_5706_A1: 854 case BCE_CHIP_ID_5708_A0: 855 case BCE_CHIP_ID_5708_B0: 856 case BCE_CHIP_ID_5709_A0: 857 case BCE_CHIP_ID_5709_B0: 858 case BCE_CHIP_ID_5709_B1: 859 case BCE_CHIP_ID_5709_B2: 860 BCE_PRINTF("%s(%d): Unsupported controller " 861 "revision (%c%d)!\n", __FILE__, __LINE__, 862 (((pci_read_config(dev, PCIR_REVID, 4) & 863 0xf0) >> 4) + 'A'), (pci_read_config(dev, 864 PCIR_REVID, 4) & 0xf)); 865 rc = ENODEV; 866 goto bce_attach_fail; 867 } 868 869 /* 870 * The embedded PCIe to PCI-X bridge (EPB) 871 * in the 5708 cannot address memory above 872 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 873 */ 874 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 875 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 876 else 877 sc->max_bus_addr = BUS_SPACE_MAXADDR; 878 879 /* 880 * Find the base address for shared memory access. 881 * Newer versions of bootcode use a signature and offset 882 * while older versions use a fixed address. 883 */ 884 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 885 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 886 /* Multi-port devices use different offsets in shared memory. */ 887 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + 888 (pci_get_function(sc->bce_dev) << 2)); 889 else 890 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 891 892 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", 893 __FUNCTION__, sc->bce_shmem_base); 894 895 /* Fetch the bootcode revision. */ 896 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 897 for (int i = 0, j = 0; i < 3; i++) { 898 u8 num; 899 900 num = (u8) (val >> (24 - (i * 8))); 901 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 902 if (num >= k || !skip0 || k == 1) { 903 sc->bce_bc_ver[j++] = (num / k) + '0'; 904 skip0 = 0; 905 } 906 } 907 908 if (i != 2) 909 sc->bce_bc_ver[j++] = '.'; 910 } 911 912 /* Check if any management firwmare is enabled. */ 913 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 914 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 915 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 916 917 /* Allow time for firmware to enter the running state. */ 918 for (int i = 0; i < 30; i++) { 919 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 920 if (val & BCE_CONDITION_MFW_RUN_MASK) 921 break; 922 DELAY(10000); 923 } 924 925 /* Check if management firmware is running. */ 926 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 927 val &= BCE_CONDITION_MFW_RUN_MASK; 928 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && 929 (val != BCE_CONDITION_MFW_RUN_NONE)) { 930 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 931 int i = 0; 932 933 /* Read the management firmware version string. */ 934 for (int j = 0; j < 3; j++) { 935 val = bce_reg_rd_ind(sc, addr + j * 4); 936 val = bswap32(val); 937 memcpy(&sc->bce_mfw_ver[i], &val, 4); 938 i += 4; 939 } 940 } else { 941 /* May cause firmware synchronization timeouts. */ 942 BCE_PRINTF("%s(%d): Management firmware enabled " 943 "but not running!\n", __FILE__, __LINE__); 944 strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); 945 946 /* ToDo: Any action the driver should take? */ 947 } 948 } 949 950 /* Get PCI bus information (speed and type). */ 951 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 952 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 953 u32 clkreg; 954 955 sc->bce_flags |= BCE_PCIX_FLAG; 956 957 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); 958 959 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 960 switch (clkreg) { 961 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 962 sc->bus_speed_mhz = 133; 963 break; 964 965 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 966 sc->bus_speed_mhz = 100; 967 break; 968 969 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 970 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 971 sc->bus_speed_mhz = 66; 972 break; 973 974 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 975 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 976 sc->bus_speed_mhz = 50; 977 break; 978 979 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 980 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 981 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 982 sc->bus_speed_mhz = 33; 983 break; 984 } 985 } else { 986 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 987 sc->bus_speed_mhz = 66; 988 else 989 sc->bus_speed_mhz = 33; 990 } 991 992 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 993 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 994 995 /* Reset controller and announce to bootcode that driver is present. */ 996 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 997 BCE_PRINTF("%s(%d): Controller reset failed!\n", 998 __FILE__, __LINE__); 999 rc = ENXIO; 1000 goto bce_attach_fail; 1001 } 1002 1003 /* Initialize the controller. */ 1004 if (bce_chipinit(sc)) { 1005 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 1006 __FILE__, __LINE__); 1007 rc = ENXIO; 1008 goto bce_attach_fail; 1009 } 1010 1011 /* Perform NVRAM test. */ 1012 if (bce_nvram_test(sc)) { 1013 BCE_PRINTF("%s(%d): NVRAM test failed!\n", 1014 __FILE__, __LINE__); 1015 rc = ENXIO; 1016 goto bce_attach_fail; 1017 } 1018 1019 /* Fetch the permanent Ethernet MAC address. */ 1020 bce_get_mac_addr(sc); 1021 1022 /* 1023 * Trip points control how many BDs 1024 * should be ready before generating an 1025 * interrupt while ticks control how long 1026 * a BD can sit in the chain before 1027 * generating an interrupt. Set the default 1028 * values for the RX and TX chains. 1029 */ 1030 1031 #ifdef BCE_DEBUG 1032 /* Force more frequent interrupts. */ 1033 sc->bce_tx_quick_cons_trip_int = 1; 1034 sc->bce_tx_quick_cons_trip = 1; 1035 sc->bce_tx_ticks_int = 0; 1036 sc->bce_tx_ticks = 0; 1037 1038 sc->bce_rx_quick_cons_trip_int = 1; 1039 sc->bce_rx_quick_cons_trip = 1; 1040 sc->bce_rx_ticks_int = 0; 1041 sc->bce_rx_ticks = 0; 1042 #else 1043 /* Improve throughput at the expense of increased latency. */ 1044 sc->bce_tx_quick_cons_trip_int = 20; 1045 sc->bce_tx_quick_cons_trip = 20; 1046 sc->bce_tx_ticks_int = 80; 1047 sc->bce_tx_ticks = 80; 1048 1049 sc->bce_rx_quick_cons_trip_int = 6; 1050 sc->bce_rx_quick_cons_trip = 6; 1051 sc->bce_rx_ticks_int = 18; 1052 sc->bce_rx_ticks = 18; 1053 #endif 1054 1055 /* Not used for L2. */ 1056 sc->bce_comp_prod_trip_int = 0; 1057 sc->bce_comp_prod_trip = 0; 1058 sc->bce_com_ticks_int = 0; 1059 sc->bce_com_ticks = 0; 1060 sc->bce_cmd_ticks_int = 0; 1061 sc->bce_cmd_ticks = 0; 1062 1063 /* Update statistics once every second. */ 1064 sc->bce_stats_ticks = 1000000 & 0xffff00; 1065 1066 /* Find the media type for the adapter. */ 1067 bce_get_media(sc); 1068 1069 /* Store data needed by PHY driver for backplane applications */ 1070 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1071 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); 1072 1073 /* Allocate DMA memory resources. */ 1074 if (bce_dma_alloc(dev)) { 1075 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", 1076 __FILE__, __LINE__); 1077 rc = ENXIO; 1078 goto bce_attach_fail; 1079 } 1080 1081 /* Allocate an ifnet structure. */ 1082 ifp = sc->bce_ifp = if_alloc(IFT_ETHER); 1083 if (ifp == NULL) { 1084 BCE_PRINTF("%s(%d): Interface allocation failed!\n", 1085 __FILE__, __LINE__); 1086 rc = ENXIO; 1087 goto bce_attach_fail; 1088 } 1089 1090 /* Initialize the ifnet interface. */ 1091 ifp->if_softc = sc; 1092 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1094 ifp->if_ioctl = bce_ioctl; 1095 ifp->if_start = bce_start; 1096 ifp->if_init = bce_init; 1097 ifp->if_mtu = ETHERMTU; 1098 1099 if (bce_tso_enable) { 1100 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; 1101 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 | 1102 IFCAP_VLAN_HWTSO; 1103 } else { 1104 ifp->if_hwassist = BCE_IF_HWASSIST; 1105 ifp->if_capabilities = BCE_IF_CAPABILITIES; 1106 } 1107 1108 ifp->if_capenable = ifp->if_capabilities; 1109 1110 /* 1111 * Assume standard mbuf sizes for buffer allocation. 1112 * This may change later if the MTU size is set to 1113 * something other than 1500. 1114 */ 1115 #ifdef BCE_JUMBO_HDRSPLIT 1116 sc->rx_bd_mbuf_alloc_size = MHLEN; 1117 /* Make sure offset is 16 byte aligned for hardware. */ 1118 sc->rx_bd_mbuf_align_pad = 1119 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN); 1120 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1121 sc->rx_bd_mbuf_align_pad; 1122 sc->pg_bd_mbuf_alloc_size = MCLBYTES; 1123 #else 1124 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 1125 sc->rx_bd_mbuf_align_pad = 1126 roundup2(MCLBYTES, 16) - MCLBYTES; 1127 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1128 sc->rx_bd_mbuf_align_pad; 1129 #endif 1130 1131 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD; 1132 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1133 IFQ_SET_READY(&ifp->if_snd); 1134 1135 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 1136 ifp->if_baudrate = IF_Mbps(2500ULL); 1137 else 1138 ifp->if_baudrate = IF_Mbps(1000); 1139 1140 /* Handle any special PHY initialization for SerDes PHYs. */ 1141 bce_init_media(sc); 1142 1143 /* MII child bus by attaching the PHY. */ 1144 rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd, 1145 bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr, 1146 MII_OFFSET_ANY, 0); 1147 if (rc != 0) { 1148 BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__, 1149 __LINE__); 1150 goto bce_attach_fail; 1151 } 1152 1153 /* Attach to the Ethernet interface list. */ 1154 ether_ifattach(ifp, sc->eaddr); 1155 1156 #if __FreeBSD_version < 500000 1157 callout_init(&sc->bce_tick_callout); 1158 callout_init(&sc->bce_pulse_callout); 1159 #else 1160 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); 1161 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); 1162 #endif 1163 1164 /* Hookup IRQ last. */ 1165 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, 1166 NULL, bce_intr, sc, &sc->bce_intrhand); 1167 1168 if (rc) { 1169 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", 1170 __FILE__, __LINE__); 1171 bce_detach(dev); 1172 goto bce_attach_exit; 1173 } 1174 1175 /* 1176 * At this point we've acquired all the resources 1177 * we need to run so there's no turning back, we're 1178 * cleared for launch. 1179 */ 1180 1181 /* Print some important debugging info. */ 1182 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); 1183 1184 /* Add the supported sysctls to the kernel. */ 1185 bce_add_sysctls(sc); 1186 1187 BCE_LOCK(sc); 1188 1189 /* 1190 * The chip reset earlier notified the bootcode that 1191 * a driver is present. We now need to start our pulse 1192 * routine so that the bootcode is reminded that we're 1193 * still running. 1194 */ 1195 bce_pulse(sc); 1196 1197 bce_mgmt_init_locked(sc); 1198 BCE_UNLOCK(sc); 1199 1200 /* Finally, print some useful adapter info */ 1201 bce_print_adapter_info(sc); 1202 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", 1203 __FUNCTION__, sc); 1204 1205 goto bce_attach_exit; 1206 1207 bce_attach_fail: 1208 bce_release_resources(sc); 1209 1210 bce_attach_exit: 1211 1212 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 1213 1214 return(rc); 1215 } 1216 1217 1218 /****************************************************************************/ 1219 /* Device detach function. */ 1220 /* */ 1221 /* Stops the controller, resets the controller, and releases resources. */ 1222 /* */ 1223 /* Returns: */ 1224 /* 0 on success, positive value on failure. */ 1225 /****************************************************************************/ 1226 static int 1227 bce_detach(device_t dev) 1228 { 1229 struct bce_softc *sc = device_get_softc(dev); 1230 struct ifnet *ifp; 1231 u32 msg; 1232 1233 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1234 1235 ifp = sc->bce_ifp; 1236 1237 /* Stop and reset the controller. */ 1238 BCE_LOCK(sc); 1239 1240 /* Stop the pulse so the bootcode can go to driver absent state. */ 1241 callout_stop(&sc->bce_pulse_callout); 1242 1243 bce_stop(sc); 1244 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1245 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1246 else 1247 msg = BCE_DRV_MSG_CODE_UNLOAD; 1248 bce_reset(sc, msg); 1249 1250 BCE_UNLOCK(sc); 1251 1252 ether_ifdetach(ifp); 1253 1254 /* If we have a child device on the MII bus remove it too. */ 1255 bus_generic_detach(dev); 1256 device_delete_child(dev, sc->bce_miibus); 1257 1258 /* Release all remaining resources. */ 1259 bce_release_resources(sc); 1260 1261 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1262 1263 return(0); 1264 } 1265 1266 1267 /****************************************************************************/ 1268 /* Device shutdown function. */ 1269 /* */ 1270 /* Stops and resets the controller. */ 1271 /* */ 1272 /* Returns: */ 1273 /* 0 on success, positive value on failure. */ 1274 /****************************************************************************/ 1275 static int 1276 bce_shutdown(device_t dev) 1277 { 1278 struct bce_softc *sc = device_get_softc(dev); 1279 u32 msg; 1280 1281 DBENTER(BCE_VERBOSE); 1282 1283 BCE_LOCK(sc); 1284 bce_stop(sc); 1285 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1286 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1287 else 1288 msg = BCE_DRV_MSG_CODE_UNLOAD; 1289 bce_reset(sc, msg); 1290 BCE_UNLOCK(sc); 1291 1292 DBEXIT(BCE_VERBOSE); 1293 1294 return (0); 1295 } 1296 1297 1298 #ifdef BCE_DEBUG 1299 /****************************************************************************/ 1300 /* Register read. */ 1301 /* */ 1302 /* Returns: */ 1303 /* The value of the register. */ 1304 /****************************************************************************/ 1305 static u32 1306 bce_reg_rd(struct bce_softc *sc, u32 offset) 1307 { 1308 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset); 1309 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1310 __FUNCTION__, offset, val); 1311 return val; 1312 } 1313 1314 1315 /****************************************************************************/ 1316 /* Register write (16 bit). */ 1317 /* */ 1318 /* Returns: */ 1319 /* Nothing. */ 1320 /****************************************************************************/ 1321 static void 1322 bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) 1323 { 1324 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", 1325 __FUNCTION__, offset, val); 1326 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val); 1327 } 1328 1329 1330 /****************************************************************************/ 1331 /* Register write. */ 1332 /* */ 1333 /* Returns: */ 1334 /* Nothing. */ 1335 /****************************************************************************/ 1336 static void 1337 bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) 1338 { 1339 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1340 __FUNCTION__, offset, val); 1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val); 1342 } 1343 #endif 1344 1345 /****************************************************************************/ 1346 /* Indirect register read. */ 1347 /* */ 1348 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1349 /* configuration space. Using this mechanism avoids issues with posted */ 1350 /* reads but is much slower than memory-mapped I/O. */ 1351 /* */ 1352 /* Returns: */ 1353 /* The value of the register. */ 1354 /****************************************************************************/ 1355 static u32 1356 bce_reg_rd_ind(struct bce_softc *sc, u32 offset) 1357 { 1358 device_t dev; 1359 dev = sc->bce_dev; 1360 1361 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1362 #ifdef BCE_DEBUG 1363 { 1364 u32 val; 1365 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1366 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1367 __FUNCTION__, offset, val); 1368 return val; 1369 } 1370 #else 1371 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1372 #endif 1373 } 1374 1375 1376 /****************************************************************************/ 1377 /* Indirect register write. */ 1378 /* */ 1379 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1380 /* configuration space. Using this mechanism avoids issues with posted */ 1381 /* writes but is muchh slower than memory-mapped I/O. */ 1382 /* */ 1383 /* Returns: */ 1384 /* Nothing. */ 1385 /****************************************************************************/ 1386 static void 1387 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) 1388 { 1389 device_t dev; 1390 dev = sc->bce_dev; 1391 1392 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1393 __FUNCTION__, offset, val); 1394 1395 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1396 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1397 } 1398 1399 1400 /****************************************************************************/ 1401 /* Shared memory write. */ 1402 /* */ 1403 /* Writes NetXtreme II shared memory region. */ 1404 /* */ 1405 /* Returns: */ 1406 /* Nothing. */ 1407 /****************************************************************************/ 1408 static void 1409 bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) 1410 { 1411 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " 1412 "0x%08X\n", __FUNCTION__, val, offset); 1413 1414 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1415 } 1416 1417 1418 /****************************************************************************/ 1419 /* Shared memory read. */ 1420 /* */ 1421 /* Reads NetXtreme II shared memory region. */ 1422 /* */ 1423 /* Returns: */ 1424 /* The 32 bit value read. */ 1425 /****************************************************************************/ 1426 static u32 1427 bce_shmem_rd(struct bce_softc *sc, u32 offset) 1428 { 1429 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1430 1431 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " 1432 "0x%08X\n", __FUNCTION__, val, offset); 1433 1434 return val; 1435 } 1436 1437 1438 #ifdef BCE_DEBUG 1439 /****************************************************************************/ 1440 /* Context memory read. */ 1441 /* */ 1442 /* The NetXtreme II controller uses context memory to track connection */ 1443 /* information for L2 and higher network protocols. */ 1444 /* */ 1445 /* Returns: */ 1446 /* The requested 32 bit value of context memory. */ 1447 /****************************************************************************/ 1448 static u32 1449 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) 1450 { 1451 u32 idx, offset, retry_cnt = 5, val; 1452 1453 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || 1454 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " 1455 "address: 0x%08X.\n", __FUNCTION__, cid_addr)); 1456 1457 offset = ctx_offset + cid_addr; 1458 1459 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1460 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1461 1462 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); 1463 1464 for (idx = 0; idx < retry_cnt; idx++) { 1465 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1466 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) 1467 break; 1468 DELAY(5); 1469 } 1470 1471 if (val & BCE_CTX_CTX_CTRL_READ_REQ) 1472 BCE_PRINTF("%s(%d); Unable to read CTX memory: " 1473 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1474 __FILE__, __LINE__, cid_addr, ctx_offset); 1475 1476 val = REG_RD(sc, BCE_CTX_CTX_DATA); 1477 } else { 1478 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1479 val = REG_RD(sc, BCE_CTX_DATA); 1480 } 1481 1482 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1483 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); 1484 1485 return(val); 1486 } 1487 #endif 1488 1489 1490 /****************************************************************************/ 1491 /* Context memory write. */ 1492 /* */ 1493 /* The NetXtreme II controller uses context memory to track connection */ 1494 /* information for L2 and higher network protocols. */ 1495 /* */ 1496 /* Returns: */ 1497 /* Nothing. */ 1498 /****************************************************************************/ 1499 static void 1500 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) 1501 { 1502 u32 idx, offset = ctx_offset + cid_addr; 1503 u32 val, retry_cnt = 5; 1504 1505 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1506 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); 1507 1508 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), 1509 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", 1510 __FUNCTION__, cid_addr)); 1511 1512 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1513 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1514 1515 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1516 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1517 1518 for (idx = 0; idx < retry_cnt; idx++) { 1519 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1520 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1521 break; 1522 DELAY(5); 1523 } 1524 1525 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) 1526 BCE_PRINTF("%s(%d); Unable to write CTX memory: " 1527 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1528 __FILE__, __LINE__, cid_addr, ctx_offset); 1529 1530 } else { 1531 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1532 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1533 } 1534 } 1535 1536 1537 /****************************************************************************/ 1538 /* PHY register read. */ 1539 /* */ 1540 /* Implements register reads on the MII bus. */ 1541 /* */ 1542 /* Returns: */ 1543 /* The value of the register. */ 1544 /****************************************************************************/ 1545 static int 1546 bce_miibus_read_reg(device_t dev, int phy, int reg) 1547 { 1548 struct bce_softc *sc; 1549 u32 val; 1550 int i; 1551 1552 sc = device_get_softc(dev); 1553 1554 /* Make sure we are accessing the correct PHY address. */ 1555 if (phy != sc->bce_phy_addr) { 1556 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1557 "for PHY read!\n", phy); 1558 return(0); 1559 } 1560 1561 /* 1562 * The 5709S PHY is an IEEE Clause 45 PHY 1563 * with special mappings to work with IEEE 1564 * Clause 22 register accesses. 1565 */ 1566 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1567 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1568 reg += 0x10; 1569 } 1570 1571 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1572 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1573 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1574 1575 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1576 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1577 1578 DELAY(40); 1579 } 1580 1581 1582 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1583 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1584 BCE_EMAC_MDIO_COMM_START_BUSY; 1585 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1586 1587 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1588 DELAY(10); 1589 1590 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1591 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1592 DELAY(5); 1593 1594 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1595 val &= BCE_EMAC_MDIO_COMM_DATA; 1596 1597 break; 1598 } 1599 } 1600 1601 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1602 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " 1603 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1604 val = 0x0; 1605 } else { 1606 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1607 } 1608 1609 1610 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1611 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1612 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1613 1614 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1615 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1616 1617 DELAY(40); 1618 } 1619 1620 DB_PRINT_PHY_REG(reg, val); 1621 return (val & 0xffff); 1622 1623 } 1624 1625 1626 /****************************************************************************/ 1627 /* PHY register write. */ 1628 /* */ 1629 /* Implements register writes on the MII bus. */ 1630 /* */ 1631 /* Returns: */ 1632 /* The value of the register. */ 1633 /****************************************************************************/ 1634 static int 1635 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1636 { 1637 struct bce_softc *sc; 1638 u32 val1; 1639 int i; 1640 1641 sc = device_get_softc(dev); 1642 1643 /* Make sure we are accessing the correct PHY address. */ 1644 if (phy != sc->bce_phy_addr) { 1645 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1646 "for PHY write!\n", phy); 1647 return(0); 1648 } 1649 1650 DB_PRINT_PHY_REG(reg, val); 1651 1652 /* 1653 * The 5709S PHY is an IEEE Clause 45 PHY 1654 * with special mappings to work with IEEE 1655 * Clause 22 register accesses. 1656 */ 1657 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1658 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1659 reg += 0x10; 1660 } 1661 1662 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1663 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1664 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1665 1666 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1667 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1668 1669 DELAY(40); 1670 } 1671 1672 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1673 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1674 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1675 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1676 1677 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1678 DELAY(10); 1679 1680 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1681 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1682 DELAY(5); 1683 break; 1684 } 1685 } 1686 1687 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1688 BCE_PRINTF("%s(%d): PHY write timeout!\n", 1689 __FILE__, __LINE__); 1690 1691 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1692 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1693 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1694 1695 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1696 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1697 1698 DELAY(40); 1699 } 1700 1701 return 0; 1702 } 1703 1704 1705 /****************************************************************************/ 1706 /* MII bus status change. */ 1707 /* */ 1708 /* Called by the MII bus driver when the PHY establishes link to set the */ 1709 /* MAC interface registers. */ 1710 /* */ 1711 /* Returns: */ 1712 /* Nothing. */ 1713 /****************************************************************************/ 1714 static void 1715 bce_miibus_statchg(device_t dev) 1716 { 1717 struct bce_softc *sc; 1718 struct mii_data *mii; 1719 int val; 1720 1721 sc = device_get_softc(dev); 1722 1723 DBENTER(BCE_VERBOSE_PHY); 1724 1725 mii = device_get_softc(sc->bce_miibus); 1726 1727 val = REG_RD(sc, BCE_EMAC_MODE); 1728 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | 1729 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | 1730 BCE_EMAC_MODE_25G); 1731 1732 /* Set MII or GMII interface based on the PHY speed. */ 1733 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1734 case IFM_10_T: 1735 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1736 DBPRINT(sc, BCE_INFO_PHY, 1737 "Enabling 10Mb interface.\n"); 1738 val |= BCE_EMAC_MODE_PORT_MII_10; 1739 break; 1740 } 1741 /* fall-through */ 1742 case IFM_100_TX: 1743 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); 1744 val |= BCE_EMAC_MODE_PORT_MII; 1745 break; 1746 case IFM_2500_SX: 1747 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); 1748 val |= BCE_EMAC_MODE_25G; 1749 /* fall-through */ 1750 case IFM_1000_T: 1751 case IFM_1000_SX: 1752 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); 1753 val |= BCE_EMAC_MODE_PORT_GMII; 1754 break; 1755 default: 1756 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " 1757 "default GMII interface.\n"); 1758 val |= BCE_EMAC_MODE_PORT_GMII; 1759 } 1760 1761 /* Set half or full duplex based on PHY settings. */ 1762 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1763 DBPRINT(sc, BCE_INFO_PHY, 1764 "Setting Half-Duplex interface.\n"); 1765 val |= BCE_EMAC_MODE_HALF_DUPLEX; 1766 } else 1767 DBPRINT(sc, BCE_INFO_PHY, 1768 "Setting Full-Duplex interface.\n"); 1769 1770 REG_WR(sc, BCE_EMAC_MODE, val); 1771 1772 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */ 1773 if (mii->mii_media_active & IFM_FLAG0) { 1774 DBPRINT(sc, BCE_INFO_PHY, 1775 "%s(): Enabling RX flow control.\n", __FUNCTION__); 1776 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1777 } else { 1778 DBPRINT(sc, BCE_INFO_PHY, 1779 "%s(): Disabling RX flow control.\n", __FUNCTION__); 1780 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1781 } 1782 1783 if (mii->mii_media_active & IFM_FLAG1) { 1784 DBPRINT(sc, BCE_INFO_PHY, 1785 "%s(): Enabling TX flow control.\n", __FUNCTION__); 1786 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1787 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; 1788 } else { 1789 DBPRINT(sc, BCE_INFO_PHY, 1790 "%s(): Disabling TX flow control.\n", __FUNCTION__); 1791 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1792 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; 1793 } 1794 1795 /* ToDo: Update watermarks in bce_init_rx_context(). */ 1796 1797 DBEXIT(BCE_VERBOSE_PHY); 1798 } 1799 1800 1801 /****************************************************************************/ 1802 /* Acquire NVRAM lock. */ 1803 /* */ 1804 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1805 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1806 /* for use by the driver. */ 1807 /* */ 1808 /* Returns: */ 1809 /* 0 on success, positive value on failure. */ 1810 /****************************************************************************/ 1811 static int 1812 bce_acquire_nvram_lock(struct bce_softc *sc) 1813 { 1814 u32 val; 1815 int j, rc = 0; 1816 1817 DBENTER(BCE_VERBOSE_NVRAM); 1818 1819 /* Request access to the flash interface. */ 1820 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1821 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1822 val = REG_RD(sc, BCE_NVM_SW_ARB); 1823 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1824 break; 1825 1826 DELAY(5); 1827 } 1828 1829 if (j >= NVRAM_TIMEOUT_COUNT) { 1830 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1831 rc = EBUSY; 1832 } 1833 1834 DBEXIT(BCE_VERBOSE_NVRAM); 1835 return (rc); 1836 } 1837 1838 1839 /****************************************************************************/ 1840 /* Release NVRAM lock. */ 1841 /* */ 1842 /* When the caller is finished accessing NVRAM the lock must be released. */ 1843 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1844 /* for use by the driver. */ 1845 /* */ 1846 /* Returns: */ 1847 /* 0 on success, positive value on failure. */ 1848 /****************************************************************************/ 1849 static int 1850 bce_release_nvram_lock(struct bce_softc *sc) 1851 { 1852 u32 val; 1853 int j, rc = 0; 1854 1855 DBENTER(BCE_VERBOSE_NVRAM); 1856 1857 /* 1858 * Relinquish nvram interface. 1859 */ 1860 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1861 1862 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1863 val = REG_RD(sc, BCE_NVM_SW_ARB); 1864 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1865 break; 1866 1867 DELAY(5); 1868 } 1869 1870 if (j >= NVRAM_TIMEOUT_COUNT) { 1871 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); 1872 rc = EBUSY; 1873 } 1874 1875 DBEXIT(BCE_VERBOSE_NVRAM); 1876 return (rc); 1877 } 1878 1879 1880 #ifdef BCE_NVRAM_WRITE_SUPPORT 1881 /****************************************************************************/ 1882 /* Enable NVRAM write access. */ 1883 /* */ 1884 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1885 /* */ 1886 /* Returns: */ 1887 /* 0 on success, positive value on failure. */ 1888 /****************************************************************************/ 1889 static int 1890 bce_enable_nvram_write(struct bce_softc *sc) 1891 { 1892 u32 val; 1893 int rc = 0; 1894 1895 DBENTER(BCE_VERBOSE_NVRAM); 1896 1897 val = REG_RD(sc, BCE_MISC_CFG); 1898 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 1899 1900 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 1901 int j; 1902 1903 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1904 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 1905 1906 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1907 DELAY(5); 1908 1909 val = REG_RD(sc, BCE_NVM_COMMAND); 1910 if (val & BCE_NVM_COMMAND_DONE) 1911 break; 1912 } 1913 1914 if (j >= NVRAM_TIMEOUT_COUNT) { 1915 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 1916 rc = EBUSY; 1917 } 1918 } 1919 1920 DBENTER(BCE_VERBOSE_NVRAM); 1921 return (rc); 1922 } 1923 1924 1925 /****************************************************************************/ 1926 /* Disable NVRAM write access. */ 1927 /* */ 1928 /* When the caller is finished writing to NVRAM write access must be */ 1929 /* disabled. */ 1930 /* */ 1931 /* Returns: */ 1932 /* Nothing. */ 1933 /****************************************************************************/ 1934 static void 1935 bce_disable_nvram_write(struct bce_softc *sc) 1936 { 1937 u32 val; 1938 1939 DBENTER(BCE_VERBOSE_NVRAM); 1940 1941 val = REG_RD(sc, BCE_MISC_CFG); 1942 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 1943 1944 DBEXIT(BCE_VERBOSE_NVRAM); 1945 1946 } 1947 #endif 1948 1949 1950 /****************************************************************************/ 1951 /* Enable NVRAM access. */ 1952 /* */ 1953 /* Before accessing NVRAM for read or write operations the caller must */ 1954 /* enabled NVRAM access. */ 1955 /* */ 1956 /* Returns: */ 1957 /* Nothing. */ 1958 /****************************************************************************/ 1959 static void 1960 bce_enable_nvram_access(struct bce_softc *sc) 1961 { 1962 u32 val; 1963 1964 DBENTER(BCE_VERBOSE_NVRAM); 1965 1966 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1967 /* Enable both bits, even on read. */ 1968 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | 1969 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1970 1971 DBEXIT(BCE_VERBOSE_NVRAM); 1972 } 1973 1974 1975 /****************************************************************************/ 1976 /* Disable NVRAM access. */ 1977 /* */ 1978 /* When the caller is finished accessing NVRAM access must be disabled. */ 1979 /* */ 1980 /* Returns: */ 1981 /* Nothing. */ 1982 /****************************************************************************/ 1983 static void 1984 bce_disable_nvram_access(struct bce_softc *sc) 1985 { 1986 u32 val; 1987 1988 DBENTER(BCE_VERBOSE_NVRAM); 1989 1990 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1991 1992 /* Disable both bits, even after read. */ 1993 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & 1994 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1995 1996 DBEXIT(BCE_VERBOSE_NVRAM); 1997 } 1998 1999 2000 #ifdef BCE_NVRAM_WRITE_SUPPORT 2001 /****************************************************************************/ 2002 /* Erase NVRAM page before writing. */ 2003 /* */ 2004 /* Non-buffered flash parts require that a page be erased before it is */ 2005 /* written. */ 2006 /* */ 2007 /* Returns: */ 2008 /* 0 on success, positive value on failure. */ 2009 /****************************************************************************/ 2010 static int 2011 bce_nvram_erase_page(struct bce_softc *sc, u32 offset) 2012 { 2013 u32 cmd; 2014 int j, rc = 0; 2015 2016 DBENTER(BCE_VERBOSE_NVRAM); 2017 2018 /* Buffered flash doesn't require an erase. */ 2019 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) 2020 goto bce_nvram_erase_page_exit; 2021 2022 /* Build an erase command. */ 2023 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 2024 BCE_NVM_COMMAND_DOIT; 2025 2026 /* 2027 * Clear the DONE bit separately, set the NVRAM adress to erase, 2028 * and issue the erase command. 2029 */ 2030 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2031 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2032 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2033 2034 /* Wait for completion. */ 2035 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2036 u32 val; 2037 2038 DELAY(5); 2039 2040 val = REG_RD(sc, BCE_NVM_COMMAND); 2041 if (val & BCE_NVM_COMMAND_DONE) 2042 break; 2043 } 2044 2045 if (j >= NVRAM_TIMEOUT_COUNT) { 2046 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 2047 rc = EBUSY; 2048 } 2049 2050 bce_nvram_erase_page_exit: 2051 DBEXIT(BCE_VERBOSE_NVRAM); 2052 return (rc); 2053 } 2054 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2055 2056 2057 /****************************************************************************/ 2058 /* Read a dword (32 bits) from NVRAM. */ 2059 /* */ 2060 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 2061 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 2062 /* */ 2063 /* Returns: */ 2064 /* 0 on success and the 32 bit value read, positive value on failure. */ 2065 /****************************************************************************/ 2066 static int 2067 bce_nvram_read_dword(struct bce_softc *sc, 2068 u32 offset, u8 *ret_val, u32 cmd_flags) 2069 { 2070 u32 cmd; 2071 int i, rc = 0; 2072 2073 DBENTER(BCE_EXTREME_NVRAM); 2074 2075 /* Build the command word. */ 2076 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 2077 2078 /* Calculate the offset for buffered flash if translation is used. */ 2079 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2080 offset = ((offset / sc->bce_flash_info->page_size) << 2081 sc->bce_flash_info->page_bits) + 2082 (offset % sc->bce_flash_info->page_size); 2083 } 2084 2085 /* 2086 * Clear the DONE bit separately, set the address to read, 2087 * and issue the read. 2088 */ 2089 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2090 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2091 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2092 2093 /* Wait for completion. */ 2094 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 2095 u32 val; 2096 2097 DELAY(5); 2098 2099 val = REG_RD(sc, BCE_NVM_COMMAND); 2100 if (val & BCE_NVM_COMMAND_DONE) { 2101 val = REG_RD(sc, BCE_NVM_READ); 2102 2103 val = bce_be32toh(val); 2104 memcpy(ret_val, &val, 4); 2105 break; 2106 } 2107 } 2108 2109 /* Check for errors. */ 2110 if (i >= NVRAM_TIMEOUT_COUNT) { 2111 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " 2112 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 2113 rc = EBUSY; 2114 } 2115 2116 DBEXIT(BCE_EXTREME_NVRAM); 2117 return(rc); 2118 } 2119 2120 2121 #ifdef BCE_NVRAM_WRITE_SUPPORT 2122 /****************************************************************************/ 2123 /* Write a dword (32 bits) to NVRAM. */ 2124 /* */ 2125 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 2126 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 2127 /* enabled NVRAM write access. */ 2128 /* */ 2129 /* Returns: */ 2130 /* 0 on success, positive value on failure. */ 2131 /****************************************************************************/ 2132 static int 2133 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, 2134 u32 cmd_flags) 2135 { 2136 u32 cmd, val32; 2137 int j, rc = 0; 2138 2139 DBENTER(BCE_VERBOSE_NVRAM); 2140 2141 /* Build the command word. */ 2142 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 2143 2144 /* Calculate the offset for buffered flash if translation is used. */ 2145 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2146 offset = ((offset / sc->bce_flash_info->page_size) << 2147 sc->bce_flash_info->page_bits) + 2148 (offset % sc->bce_flash_info->page_size); 2149 } 2150 2151 /* 2152 * Clear the DONE bit separately, convert NVRAM data to big-endian, 2153 * set the NVRAM address to write, and issue the write command 2154 */ 2155 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2156 memcpy(&val32, val, 4); 2157 val32 = htobe32(val32); 2158 REG_WR(sc, BCE_NVM_WRITE, val32); 2159 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2160 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2161 2162 /* Wait for completion. */ 2163 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2164 DELAY(5); 2165 2166 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 2167 break; 2168 } 2169 if (j >= NVRAM_TIMEOUT_COUNT) { 2170 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " 2171 "offset 0x%08X\n", __FILE__, __LINE__, offset); 2172 rc = EBUSY; 2173 } 2174 2175 DBEXIT(BCE_VERBOSE_NVRAM); 2176 return (rc); 2177 } 2178 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2179 2180 2181 /****************************************************************************/ 2182 /* Initialize NVRAM access. */ 2183 /* */ 2184 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 2185 /* access that device. */ 2186 /* */ 2187 /* Returns: */ 2188 /* 0 on success, positive value on failure. */ 2189 /****************************************************************************/ 2190 static int 2191 bce_init_nvram(struct bce_softc *sc) 2192 { 2193 u32 val; 2194 int j, entry_count, rc = 0; 2195 struct flash_spec *flash; 2196 2197 DBENTER(BCE_VERBOSE_NVRAM); 2198 2199 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2200 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2201 sc->bce_flash_info = &flash_5709; 2202 goto bce_init_nvram_get_flash_size; 2203 } 2204 2205 /* Determine the selected interface. */ 2206 val = REG_RD(sc, BCE_NVM_CFG1); 2207 2208 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 2209 2210 /* 2211 * Flash reconfiguration is required to support additional 2212 * NVRAM devices not directly supported in hardware. 2213 * Check if the flash interface was reconfigured 2214 * by the bootcode. 2215 */ 2216 2217 if (val & 0x40000000) { 2218 /* Flash interface reconfigured by bootcode. */ 2219 2220 DBPRINT(sc,BCE_INFO_LOAD, 2221 "bce_init_nvram(): Flash WAS reconfigured.\n"); 2222 2223 for (j = 0, flash = &flash_table[0]; j < entry_count; 2224 j++, flash++) { 2225 if ((val & FLASH_BACKUP_STRAP_MASK) == 2226 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 2227 sc->bce_flash_info = flash; 2228 break; 2229 } 2230 } 2231 } else { 2232 /* Flash interface not yet reconfigured. */ 2233 u32 mask; 2234 2235 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", 2236 __FUNCTION__); 2237 2238 if (val & (1 << 23)) 2239 mask = FLASH_BACKUP_STRAP_MASK; 2240 else 2241 mask = FLASH_STRAP_MASK; 2242 2243 /* Look for the matching NVRAM device configuration data. */ 2244 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 2245 2246 /* Check if the device matches any of the known devices. */ 2247 if ((val & mask) == (flash->strapping & mask)) { 2248 /* Found a device match. */ 2249 sc->bce_flash_info = flash; 2250 2251 /* Request access to the flash interface. */ 2252 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2253 return rc; 2254 2255 /* Reconfigure the flash interface. */ 2256 bce_enable_nvram_access(sc); 2257 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 2258 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 2259 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 2260 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 2261 bce_disable_nvram_access(sc); 2262 bce_release_nvram_lock(sc); 2263 2264 break; 2265 } 2266 } 2267 } 2268 2269 /* Check if a matching device was found. */ 2270 if (j == entry_count) { 2271 sc->bce_flash_info = NULL; 2272 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", 2273 __FILE__, __LINE__); 2274 DBEXIT(BCE_VERBOSE_NVRAM); 2275 return (ENODEV); 2276 } 2277 2278 bce_init_nvram_get_flash_size: 2279 /* Write the flash config data to the shared memory interface. */ 2280 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); 2281 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 2282 if (val) 2283 sc->bce_flash_size = val; 2284 else 2285 sc->bce_flash_size = sc->bce_flash_info->total_size; 2286 2287 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", 2288 __FUNCTION__, sc->bce_flash_info->name, 2289 sc->bce_flash_info->total_size); 2290 2291 DBEXIT(BCE_VERBOSE_NVRAM); 2292 return rc; 2293 } 2294 2295 2296 /****************************************************************************/ 2297 /* Read an arbitrary range of data from NVRAM. */ 2298 /* */ 2299 /* Prepares the NVRAM interface for access and reads the requested data */ 2300 /* into the supplied buffer. */ 2301 /* */ 2302 /* Returns: */ 2303 /* 0 on success and the data read, positive value on failure. */ 2304 /****************************************************************************/ 2305 static int 2306 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, 2307 int buf_size) 2308 { 2309 int rc = 0; 2310 u32 cmd_flags, offset32, len32, extra; 2311 2312 DBENTER(BCE_VERBOSE_NVRAM); 2313 2314 if (buf_size == 0) 2315 goto bce_nvram_read_exit; 2316 2317 /* Request access to the flash interface. */ 2318 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2319 goto bce_nvram_read_exit; 2320 2321 /* Enable access to flash interface */ 2322 bce_enable_nvram_access(sc); 2323 2324 len32 = buf_size; 2325 offset32 = offset; 2326 extra = 0; 2327 2328 cmd_flags = 0; 2329 2330 if (offset32 & 3) { 2331 u8 buf[4]; 2332 u32 pre_len; 2333 2334 offset32 &= ~3; 2335 pre_len = 4 - (offset & 3); 2336 2337 if (pre_len >= len32) { 2338 pre_len = len32; 2339 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 2340 } 2341 else { 2342 cmd_flags = BCE_NVM_COMMAND_FIRST; 2343 } 2344 2345 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2346 2347 if (rc) 2348 return rc; 2349 2350 memcpy(ret_buf, buf + (offset & 3), pre_len); 2351 2352 offset32 += 4; 2353 ret_buf += pre_len; 2354 len32 -= pre_len; 2355 } 2356 2357 if (len32 & 3) { 2358 extra = 4 - (len32 & 3); 2359 len32 = (len32 + 4) & ~3; 2360 } 2361 2362 if (len32 == 4) { 2363 u8 buf[4]; 2364 2365 if (cmd_flags) 2366 cmd_flags = BCE_NVM_COMMAND_LAST; 2367 else 2368 cmd_flags = BCE_NVM_COMMAND_FIRST | 2369 BCE_NVM_COMMAND_LAST; 2370 2371 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2372 2373 memcpy(ret_buf, buf, 4 - extra); 2374 } 2375 else if (len32 > 0) { 2376 u8 buf[4]; 2377 2378 /* Read the first word. */ 2379 if (cmd_flags) 2380 cmd_flags = 0; 2381 else 2382 cmd_flags = BCE_NVM_COMMAND_FIRST; 2383 2384 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 2385 2386 /* Advance to the next dword. */ 2387 offset32 += 4; 2388 ret_buf += 4; 2389 len32 -= 4; 2390 2391 while (len32 > 4 && rc == 0) { 2392 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 2393 2394 /* Advance to the next dword. */ 2395 offset32 += 4; 2396 ret_buf += 4; 2397 len32 -= 4; 2398 } 2399 2400 if (rc) 2401 goto bce_nvram_read_locked_exit; 2402 2403 cmd_flags = BCE_NVM_COMMAND_LAST; 2404 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2405 2406 memcpy(ret_buf, buf, 4 - extra); 2407 } 2408 2409 bce_nvram_read_locked_exit: 2410 /* Disable access to flash interface and release the lock. */ 2411 bce_disable_nvram_access(sc); 2412 bce_release_nvram_lock(sc); 2413 2414 bce_nvram_read_exit: 2415 DBEXIT(BCE_VERBOSE_NVRAM); 2416 return rc; 2417 } 2418 2419 2420 #ifdef BCE_NVRAM_WRITE_SUPPORT 2421 /****************************************************************************/ 2422 /* Write an arbitrary range of data from NVRAM. */ 2423 /* */ 2424 /* Prepares the NVRAM interface for write access and writes the requested */ 2425 /* data from the supplied buffer. The caller is responsible for */ 2426 /* calculating any appropriate CRCs. */ 2427 /* */ 2428 /* Returns: */ 2429 /* 0 on success, positive value on failure. */ 2430 /****************************************************************************/ 2431 static int 2432 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, 2433 int buf_size) 2434 { 2435 u32 written, offset32, len32; 2436 u8 *buf, start[4], end[4]; 2437 int rc = 0; 2438 int align_start, align_end; 2439 2440 DBENTER(BCE_VERBOSE_NVRAM); 2441 2442 buf = data_buf; 2443 offset32 = offset; 2444 len32 = buf_size; 2445 align_start = align_end = 0; 2446 2447 if ((align_start = (offset32 & 3))) { 2448 offset32 &= ~3; 2449 len32 += align_start; 2450 if ((rc = bce_nvram_read(sc, offset32, start, 4))) 2451 goto bce_nvram_write_exit; 2452 } 2453 2454 if (len32 & 3) { 2455 if ((len32 > 4) || !align_start) { 2456 align_end = 4 - (len32 & 3); 2457 len32 += align_end; 2458 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, 2459 end, 4))) { 2460 goto bce_nvram_write_exit; 2461 } 2462 } 2463 } 2464 2465 if (align_start || align_end) { 2466 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 2467 if (buf == 0) { 2468 rc = ENOMEM; 2469 goto bce_nvram_write_exit; 2470 } 2471 2472 if (align_start) { 2473 memcpy(buf, start, 4); 2474 } 2475 2476 if (align_end) { 2477 memcpy(buf + len32 - 4, end, 4); 2478 } 2479 memcpy(buf + align_start, data_buf, buf_size); 2480 } 2481 2482 written = 0; 2483 while ((written < len32) && (rc == 0)) { 2484 u32 page_start, page_end, data_start, data_end; 2485 u32 addr, cmd_flags; 2486 int i; 2487 u8 flash_buffer[264]; 2488 2489 /* Find the page_start addr */ 2490 page_start = offset32 + written; 2491 page_start -= (page_start % sc->bce_flash_info->page_size); 2492 /* Find the page_end addr */ 2493 page_end = page_start + sc->bce_flash_info->page_size; 2494 /* Find the data_start addr */ 2495 data_start = (written == 0) ? offset32 : page_start; 2496 /* Find the data_end addr */ 2497 data_end = (page_end > offset32 + len32) ? 2498 (offset32 + len32) : page_end; 2499 2500 /* Request access to the flash interface. */ 2501 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2502 goto bce_nvram_write_exit; 2503 2504 /* Enable access to flash interface */ 2505 bce_enable_nvram_access(sc); 2506 2507 cmd_flags = BCE_NVM_COMMAND_FIRST; 2508 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2509 int j; 2510 2511 /* Read the whole page into the buffer 2512 * (non-buffer flash only) */ 2513 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 2514 if (j == (sc->bce_flash_info->page_size - 4)) { 2515 cmd_flags |= BCE_NVM_COMMAND_LAST; 2516 } 2517 rc = bce_nvram_read_dword(sc, 2518 page_start + j, 2519 &flash_buffer[j], 2520 cmd_flags); 2521 2522 if (rc) 2523 goto bce_nvram_write_locked_exit; 2524 2525 cmd_flags = 0; 2526 } 2527 } 2528 2529 /* Enable writes to flash interface (unlock write-protect) */ 2530 if ((rc = bce_enable_nvram_write(sc)) != 0) 2531 goto bce_nvram_write_locked_exit; 2532 2533 /* Erase the page */ 2534 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) 2535 goto bce_nvram_write_locked_exit; 2536 2537 /* Re-enable the write again for the actual write */ 2538 bce_enable_nvram_write(sc); 2539 2540 /* Loop to write back the buffer data from page_start to 2541 * data_start */ 2542 i = 0; 2543 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2544 for (addr = page_start; addr < data_start; 2545 addr += 4, i += 4) { 2546 2547 rc = bce_nvram_write_dword(sc, addr, 2548 &flash_buffer[i], cmd_flags); 2549 2550 if (rc != 0) 2551 goto bce_nvram_write_locked_exit; 2552 2553 cmd_flags = 0; 2554 } 2555 } 2556 2557 /* Loop to write the new data from data_start to data_end */ 2558 for (addr = data_start; addr < data_end; addr += 4, i++) { 2559 if ((addr == page_end - 4) || 2560 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && 2561 (addr == data_end - 4))) { 2562 2563 cmd_flags |= BCE_NVM_COMMAND_LAST; 2564 } 2565 rc = bce_nvram_write_dword(sc, addr, buf, 2566 cmd_flags); 2567 2568 if (rc != 0) 2569 goto bce_nvram_write_locked_exit; 2570 2571 cmd_flags = 0; 2572 buf += 4; 2573 } 2574 2575 /* Loop to write back the buffer data from data_end 2576 * to page_end */ 2577 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2578 for (addr = data_end; addr < page_end; 2579 addr += 4, i += 4) { 2580 2581 if (addr == page_end-4) { 2582 cmd_flags = BCE_NVM_COMMAND_LAST; 2583 } 2584 rc = bce_nvram_write_dword(sc, addr, 2585 &flash_buffer[i], cmd_flags); 2586 2587 if (rc != 0) 2588 goto bce_nvram_write_locked_exit; 2589 2590 cmd_flags = 0; 2591 } 2592 } 2593 2594 /* Disable writes to flash interface (lock write-protect) */ 2595 bce_disable_nvram_write(sc); 2596 2597 /* Disable access to flash interface */ 2598 bce_disable_nvram_access(sc); 2599 bce_release_nvram_lock(sc); 2600 2601 /* Increment written */ 2602 written += data_end - data_start; 2603 } 2604 2605 goto bce_nvram_write_exit; 2606 2607 bce_nvram_write_locked_exit: 2608 bce_disable_nvram_write(sc); 2609 bce_disable_nvram_access(sc); 2610 bce_release_nvram_lock(sc); 2611 2612 bce_nvram_write_exit: 2613 if (align_start || align_end) 2614 free(buf, M_DEVBUF); 2615 2616 DBEXIT(BCE_VERBOSE_NVRAM); 2617 return (rc); 2618 } 2619 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2620 2621 2622 /****************************************************************************/ 2623 /* Verifies that NVRAM is accessible and contains valid data. */ 2624 /* */ 2625 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2626 /* correct. */ 2627 /* */ 2628 /* Returns: */ 2629 /* 0 on success, positive value on failure. */ 2630 /****************************************************************************/ 2631 static int 2632 bce_nvram_test(struct bce_softc *sc) 2633 { 2634 u32 buf[BCE_NVRAM_SIZE / 4]; 2635 u8 *data = (u8 *) buf; 2636 int rc = 0; 2637 u32 magic, csum; 2638 2639 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2640 2641 /* 2642 * Check that the device NVRAM is valid by reading 2643 * the magic value at offset 0. 2644 */ 2645 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { 2646 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", 2647 __FILE__, __LINE__); 2648 goto bce_nvram_test_exit; 2649 } 2650 2651 /* 2652 * Verify that offset 0 of the NVRAM contains 2653 * a valid magic number. 2654 */ 2655 magic = bce_be32toh(buf[0]); 2656 if (magic != BCE_NVRAM_MAGIC) { 2657 rc = ENODEV; 2658 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " 2659 "Expected: 0x%08X, Found: 0x%08X\n", 2660 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); 2661 goto bce_nvram_test_exit; 2662 } 2663 2664 /* 2665 * Verify that the device NVRAM includes valid 2666 * configuration data. 2667 */ 2668 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { 2669 BCE_PRINTF("%s(%d): Unable to read manufacturing " 2670 "Information from NVRAM!\n", __FILE__, __LINE__); 2671 goto bce_nvram_test_exit; 2672 } 2673 2674 csum = ether_crc32_le(data, 0x100); 2675 if (csum != BCE_CRC32_RESIDUAL) { 2676 rc = ENODEV; 2677 BCE_PRINTF("%s(%d): Invalid manufacturing information " 2678 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2679 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2680 goto bce_nvram_test_exit; 2681 } 2682 2683 csum = ether_crc32_le(data + 0x100, 0x100); 2684 if (csum != BCE_CRC32_RESIDUAL) { 2685 rc = ENODEV; 2686 BCE_PRINTF("%s(%d): Invalid feature configuration " 2687 "information NVRAM CRC! Expected: 0x%08X, " 2688 "Found: 08%08X\n", __FILE__, __LINE__, 2689 BCE_CRC32_RESIDUAL, csum); 2690 } 2691 2692 bce_nvram_test_exit: 2693 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2694 return rc; 2695 } 2696 2697 2698 /****************************************************************************/ 2699 /* Identifies the current media type of the controller and sets the PHY */ 2700 /* address. */ 2701 /* */ 2702 /* Returns: */ 2703 /* Nothing. */ 2704 /****************************************************************************/ 2705 static void 2706 bce_get_media(struct bce_softc *sc) 2707 { 2708 u32 val; 2709 2710 DBENTER(BCE_VERBOSE_PHY); 2711 2712 /* Assume PHY address for copper controllers. */ 2713 sc->bce_phy_addr = 1; 2714 2715 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 2716 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 2717 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2718 u32 strap; 2719 2720 /* 2721 * The BCM5709S is software configurable 2722 * for Copper or SerDes operation. 2723 */ 2724 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2725 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2726 "for copper.\n"); 2727 goto bce_get_media_exit; 2728 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2729 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2730 "for dual media.\n"); 2731 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2732 goto bce_get_media_exit; 2733 } 2734 2735 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2736 strap = (val & 2737 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2738 else 2739 strap = (val & 2740 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 2741 2742 if (pci_get_function(sc->bce_dev) == 0) { 2743 switch (strap) { 2744 case 0x4: 2745 case 0x5: 2746 case 0x6: 2747 DBPRINT(sc, BCE_INFO_LOAD, 2748 "BCM5709 s/w configured for SerDes.\n"); 2749 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2750 break; 2751 default: 2752 DBPRINT(sc, BCE_INFO_LOAD, 2753 "BCM5709 s/w configured for Copper.\n"); 2754 break; 2755 } 2756 } else { 2757 switch (strap) { 2758 case 0x1: 2759 case 0x2: 2760 case 0x4: 2761 DBPRINT(sc, BCE_INFO_LOAD, 2762 "BCM5709 s/w configured for SerDes.\n"); 2763 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2764 break; 2765 default: 2766 DBPRINT(sc, BCE_INFO_LOAD, 2767 "BCM5709 s/w configured for Copper.\n"); 2768 break; 2769 } 2770 } 2771 2772 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) 2773 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2774 2775 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2776 2777 sc->bce_flags |= BCE_NO_WOL_FLAG; 2778 2779 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 2780 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; 2781 2782 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2783 /* 5708S/09S/16S use a separate PHY for SerDes. */ 2784 sc->bce_phy_addr = 2; 2785 2786 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2787 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { 2788 sc->bce_phy_flags |= 2789 BCE_PHY_2_5G_CAPABLE_FLAG; 2790 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " 2791 "capable adapter\n"); 2792 } 2793 } 2794 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2795 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) 2796 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2797 2798 bce_get_media_exit: 2799 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), 2800 "Using PHY address %d.\n", sc->bce_phy_addr); 2801 2802 DBEXIT(BCE_VERBOSE_PHY); 2803 } 2804 2805 2806 /****************************************************************************/ 2807 /* Performs PHY initialization required before MII drivers access the */ 2808 /* device. */ 2809 /* */ 2810 /* Returns: */ 2811 /* Nothing. */ 2812 /****************************************************************************/ 2813 static void 2814 bce_init_media(struct bce_softc *sc) 2815 { 2816 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 2817 /* 2818 * Configure 5709S/5716S PHYs to use traditional IEEE 2819 * Clause 22 method. Otherwise we have no way to attach 2820 * the PHY in mii(4) layer. PHY specific configuration 2821 * is done in mii layer. 2822 */ 2823 2824 /* Select auto-negotiation MMD of the PHY. */ 2825 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2826 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2827 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2828 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2829 2830 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ 2831 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2832 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2833 } 2834 } 2835 2836 2837 /****************************************************************************/ 2838 /* Free any DMA memory owned by the driver. */ 2839 /* */ 2840 /* Scans through each data structre that requires DMA memory and frees */ 2841 /* the memory if allocated. */ 2842 /* */ 2843 /* Returns: */ 2844 /* Nothing. */ 2845 /****************************************************************************/ 2846 static void 2847 bce_dma_free(struct bce_softc *sc) 2848 { 2849 int i; 2850 2851 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 2852 2853 /* Free, unmap, and destroy the status block. */ 2854 if (sc->status_block != NULL) { 2855 bus_dmamem_free( 2856 sc->status_tag, 2857 sc->status_block, 2858 sc->status_map); 2859 sc->status_block = NULL; 2860 } 2861 2862 if (sc->status_map != NULL) { 2863 bus_dmamap_unload( 2864 sc->status_tag, 2865 sc->status_map); 2866 bus_dmamap_destroy(sc->status_tag, 2867 sc->status_map); 2868 sc->status_map = NULL; 2869 } 2870 2871 if (sc->status_tag != NULL) { 2872 bus_dma_tag_destroy(sc->status_tag); 2873 sc->status_tag = NULL; 2874 } 2875 2876 2877 /* Free, unmap, and destroy the statistics block. */ 2878 if (sc->stats_block != NULL) { 2879 bus_dmamem_free( 2880 sc->stats_tag, 2881 sc->stats_block, 2882 sc->stats_map); 2883 sc->stats_block = NULL; 2884 } 2885 2886 if (sc->stats_map != NULL) { 2887 bus_dmamap_unload( 2888 sc->stats_tag, 2889 sc->stats_map); 2890 bus_dmamap_destroy(sc->stats_tag, 2891 sc->stats_map); 2892 sc->stats_map = NULL; 2893 } 2894 2895 if (sc->stats_tag != NULL) { 2896 bus_dma_tag_destroy(sc->stats_tag); 2897 sc->stats_tag = NULL; 2898 } 2899 2900 2901 /* Free, unmap and destroy all context memory pages. */ 2902 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2903 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2904 for (i = 0; i < sc->ctx_pages; i++ ) { 2905 if (sc->ctx_block[i] != NULL) { 2906 bus_dmamem_free( 2907 sc->ctx_tag, 2908 sc->ctx_block[i], 2909 sc->ctx_map[i]); 2910 sc->ctx_block[i] = NULL; 2911 } 2912 2913 if (sc->ctx_map[i] != NULL) { 2914 bus_dmamap_unload( 2915 sc->ctx_tag, 2916 sc->ctx_map[i]); 2917 bus_dmamap_destroy( 2918 sc->ctx_tag, 2919 sc->ctx_map[i]); 2920 sc->ctx_map[i] = NULL; 2921 } 2922 } 2923 2924 /* Destroy the context memory tag. */ 2925 if (sc->ctx_tag != NULL) { 2926 bus_dma_tag_destroy(sc->ctx_tag); 2927 sc->ctx_tag = NULL; 2928 } 2929 } 2930 2931 2932 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2933 for (i = 0; i < TX_PAGES; i++ ) { 2934 if (sc->tx_bd_chain[i] != NULL) { 2935 bus_dmamem_free( 2936 sc->tx_bd_chain_tag, 2937 sc->tx_bd_chain[i], 2938 sc->tx_bd_chain_map[i]); 2939 sc->tx_bd_chain[i] = NULL; 2940 } 2941 2942 if (sc->tx_bd_chain_map[i] != NULL) { 2943 bus_dmamap_unload( 2944 sc->tx_bd_chain_tag, 2945 sc->tx_bd_chain_map[i]); 2946 bus_dmamap_destroy( 2947 sc->tx_bd_chain_tag, 2948 sc->tx_bd_chain_map[i]); 2949 sc->tx_bd_chain_map[i] = NULL; 2950 } 2951 } 2952 2953 /* Destroy the TX buffer descriptor tag. */ 2954 if (sc->tx_bd_chain_tag != NULL) { 2955 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2956 sc->tx_bd_chain_tag = NULL; 2957 } 2958 2959 2960 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2961 for (i = 0; i < RX_PAGES; i++ ) { 2962 if (sc->rx_bd_chain[i] != NULL) { 2963 bus_dmamem_free( 2964 sc->rx_bd_chain_tag, 2965 sc->rx_bd_chain[i], 2966 sc->rx_bd_chain_map[i]); 2967 sc->rx_bd_chain[i] = NULL; 2968 } 2969 2970 if (sc->rx_bd_chain_map[i] != NULL) { 2971 bus_dmamap_unload( 2972 sc->rx_bd_chain_tag, 2973 sc->rx_bd_chain_map[i]); 2974 bus_dmamap_destroy( 2975 sc->rx_bd_chain_tag, 2976 sc->rx_bd_chain_map[i]); 2977 sc->rx_bd_chain_map[i] = NULL; 2978 } 2979 } 2980 2981 /* Destroy the RX buffer descriptor tag. */ 2982 if (sc->rx_bd_chain_tag != NULL) { 2983 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2984 sc->rx_bd_chain_tag = NULL; 2985 } 2986 2987 2988 #ifdef BCE_JUMBO_HDRSPLIT 2989 /* Free, unmap and destroy all page buffer descriptor chain pages. */ 2990 for (i = 0; i < PG_PAGES; i++ ) { 2991 if (sc->pg_bd_chain[i] != NULL) { 2992 bus_dmamem_free( 2993 sc->pg_bd_chain_tag, 2994 sc->pg_bd_chain[i], 2995 sc->pg_bd_chain_map[i]); 2996 sc->pg_bd_chain[i] = NULL; 2997 } 2998 2999 if (sc->pg_bd_chain_map[i] != NULL) { 3000 bus_dmamap_unload( 3001 sc->pg_bd_chain_tag, 3002 sc->pg_bd_chain_map[i]); 3003 bus_dmamap_destroy( 3004 sc->pg_bd_chain_tag, 3005 sc->pg_bd_chain_map[i]); 3006 sc->pg_bd_chain_map[i] = NULL; 3007 } 3008 } 3009 3010 /* Destroy the page buffer descriptor tag. */ 3011 if (sc->pg_bd_chain_tag != NULL) { 3012 bus_dma_tag_destroy(sc->pg_bd_chain_tag); 3013 sc->pg_bd_chain_tag = NULL; 3014 } 3015 #endif 3016 3017 3018 /* Unload and destroy the TX mbuf maps. */ 3019 for (i = 0; i < TOTAL_TX_BD; i++) { 3020 if (sc->tx_mbuf_map[i] != NULL) { 3021 bus_dmamap_unload(sc->tx_mbuf_tag, 3022 sc->tx_mbuf_map[i]); 3023 bus_dmamap_destroy(sc->tx_mbuf_tag, 3024 sc->tx_mbuf_map[i]); 3025 sc->tx_mbuf_map[i] = NULL; 3026 } 3027 } 3028 3029 /* Destroy the TX mbuf tag. */ 3030 if (sc->tx_mbuf_tag != NULL) { 3031 bus_dma_tag_destroy(sc->tx_mbuf_tag); 3032 sc->tx_mbuf_tag = NULL; 3033 } 3034 3035 /* Unload and destroy the RX mbuf maps. */ 3036 for (i = 0; i < TOTAL_RX_BD; i++) { 3037 if (sc->rx_mbuf_map[i] != NULL) { 3038 bus_dmamap_unload(sc->rx_mbuf_tag, 3039 sc->rx_mbuf_map[i]); 3040 bus_dmamap_destroy(sc->rx_mbuf_tag, 3041 sc->rx_mbuf_map[i]); 3042 sc->rx_mbuf_map[i] = NULL; 3043 } 3044 } 3045 3046 /* Destroy the RX mbuf tag. */ 3047 if (sc->rx_mbuf_tag != NULL) { 3048 bus_dma_tag_destroy(sc->rx_mbuf_tag); 3049 sc->rx_mbuf_tag = NULL; 3050 } 3051 3052 #ifdef BCE_JUMBO_HDRSPLIT 3053 /* Unload and destroy the page mbuf maps. */ 3054 for (i = 0; i < TOTAL_PG_BD; i++) { 3055 if (sc->pg_mbuf_map[i] != NULL) { 3056 bus_dmamap_unload(sc->pg_mbuf_tag, 3057 sc->pg_mbuf_map[i]); 3058 bus_dmamap_destroy(sc->pg_mbuf_tag, 3059 sc->pg_mbuf_map[i]); 3060 sc->pg_mbuf_map[i] = NULL; 3061 } 3062 } 3063 3064 /* Destroy the page mbuf tag. */ 3065 if (sc->pg_mbuf_tag != NULL) { 3066 bus_dma_tag_destroy(sc->pg_mbuf_tag); 3067 sc->pg_mbuf_tag = NULL; 3068 } 3069 #endif 3070 3071 /* Destroy the parent tag */ 3072 if (sc->parent_tag != NULL) { 3073 bus_dma_tag_destroy(sc->parent_tag); 3074 sc->parent_tag = NULL; 3075 } 3076 3077 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 3078 } 3079 3080 3081 /****************************************************************************/ 3082 /* Get DMA memory from the OS. */ 3083 /* */ 3084 /* Validates that the OS has provided DMA buffers in response to a */ 3085 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 3086 /* When the callback is used the OS will return 0 for the mapping function */ 3087 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 3088 /* failures back to the caller. */ 3089 /* */ 3090 /* Returns: */ 3091 /* Nothing. */ 3092 /****************************************************************************/ 3093 static void 3094 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3095 { 3096 bus_addr_t *busaddr = arg; 3097 3098 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", 3099 __FUNCTION__, nseg)); 3100 /* Simulate a mapping failure. */ 3101 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), 3102 error = ENOMEM); 3103 3104 /* ToDo: How to increment debug sim_count variable here? */ 3105 3106 /* Check for an error and signal the caller that an error occurred. */ 3107 if (error) { 3108 *busaddr = 0; 3109 } else { 3110 *busaddr = segs->ds_addr; 3111 } 3112 3113 return; 3114 } 3115 3116 3117 /****************************************************************************/ 3118 /* Allocate any DMA memory needed by the driver. */ 3119 /* */ 3120 /* Allocates DMA memory needed for the various global structures needed by */ 3121 /* hardware. */ 3122 /* */ 3123 /* Memory alignment requirements: */ 3124 /* +-----------------+----------+----------+----------+----------+ */ 3125 /* | | 5706 | 5708 | 5709 | 5716 | */ 3126 /* +-----------------+----------+----------+----------+----------+ */ 3127 /* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3128 /* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3129 /* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 3130 /* |PG Buffers | none | none | none | none | */ 3131 /* |TX Buffers | none | none | none | none | */ 3132 /* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 3133 /* |Context Memory | | | | | */ 3134 /* +-----------------+----------+----------+----------+----------+ */ 3135 /* */ 3136 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 3137 /* */ 3138 /* Returns: */ 3139 /* 0 for success, positive value for failure. */ 3140 /****************************************************************************/ 3141 static int 3142 bce_dma_alloc(device_t dev) 3143 { 3144 struct bce_softc *sc; 3145 int i, error, rc = 0; 3146 bus_size_t max_size, max_seg_size; 3147 int max_segments; 3148 3149 sc = device_get_softc(dev); 3150 3151 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3152 3153 /* 3154 * Allocate the parent bus DMA tag appropriate for PCI. 3155 */ 3156 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, 3157 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3158 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 3159 &sc->parent_tag)) { 3160 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", 3161 __FILE__, __LINE__); 3162 rc = ENOMEM; 3163 goto bce_dma_alloc_exit; 3164 } 3165 3166 /* 3167 * Create a DMA tag for the status block, allocate and clear the 3168 * memory, map the memory into DMA space, and fetch the physical 3169 * address of the block. 3170 */ 3171 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3172 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3173 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 3174 0, NULL, NULL, &sc->status_tag)) { 3175 BCE_PRINTF("%s(%d): Could not allocate status block " 3176 "DMA tag!\n", __FILE__, __LINE__); 3177 rc = ENOMEM; 3178 goto bce_dma_alloc_exit; 3179 } 3180 3181 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, 3182 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3183 &sc->status_map)) { 3184 BCE_PRINTF("%s(%d): Could not allocate status block " 3185 "DMA memory!\n", __FILE__, __LINE__); 3186 rc = ENOMEM; 3187 goto bce_dma_alloc_exit; 3188 } 3189 3190 error = bus_dmamap_load(sc->status_tag, sc->status_map, 3191 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, 3192 &sc->status_block_paddr, BUS_DMA_NOWAIT); 3193 3194 if (error) { 3195 BCE_PRINTF("%s(%d): Could not map status block " 3196 "DMA memory!\n", __FILE__, __LINE__); 3197 rc = ENOMEM; 3198 goto bce_dma_alloc_exit; 3199 } 3200 3201 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", 3202 __FUNCTION__, (uintmax_t) sc->status_block_paddr); 3203 3204 /* 3205 * Create a DMA tag for the statistics block, allocate and clear the 3206 * memory, map the memory into DMA space, and fetch the physical 3207 * address of the block. 3208 */ 3209 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3210 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3211 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 3212 0, NULL, NULL, &sc->stats_tag)) { 3213 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3214 "DMA tag!\n", __FILE__, __LINE__); 3215 rc = ENOMEM; 3216 goto bce_dma_alloc_exit; 3217 } 3218 3219 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, 3220 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { 3221 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3222 "DMA memory!\n", __FILE__, __LINE__); 3223 rc = ENOMEM; 3224 goto bce_dma_alloc_exit; 3225 } 3226 3227 error = bus_dmamap_load(sc->stats_tag, sc->stats_map, 3228 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, 3229 &sc->stats_block_paddr, BUS_DMA_NOWAIT); 3230 3231 if(error) { 3232 BCE_PRINTF("%s(%d): Could not map statistics block " 3233 "DMA memory!\n", __FILE__, __LINE__); 3234 rc = ENOMEM; 3235 goto bce_dma_alloc_exit; 3236 } 3237 3238 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", 3239 __FUNCTION__, (uintmax_t) sc->stats_block_paddr); 3240 3241 /* BCM5709 uses host memory as cache for context memory. */ 3242 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3243 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3244 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 3245 if (sc->ctx_pages == 0) 3246 sc->ctx_pages = 1; 3247 3248 DBRUNIF((sc->ctx_pages > 512), 3249 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", 3250 __FILE__, __LINE__, sc->ctx_pages)); 3251 3252 /* 3253 * Create a DMA tag for the context pages, 3254 * allocate and clear the memory, map the 3255 * memory into DMA space, and fetch the 3256 * physical address of the block. 3257 */ 3258 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3259 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3260 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 3261 0, NULL, NULL, &sc->ctx_tag)) { 3262 BCE_PRINTF("%s(%d): Could not allocate CTX " 3263 "DMA tag!\n", __FILE__, __LINE__); 3264 rc = ENOMEM; 3265 goto bce_dma_alloc_exit; 3266 } 3267 3268 for (i = 0; i < sc->ctx_pages; i++) { 3269 3270 if(bus_dmamem_alloc(sc->ctx_tag, 3271 (void **)&sc->ctx_block[i], 3272 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3273 &sc->ctx_map[i])) { 3274 BCE_PRINTF("%s(%d): Could not allocate CTX " 3275 "DMA memory!\n", __FILE__, __LINE__); 3276 rc = ENOMEM; 3277 goto bce_dma_alloc_exit; 3278 } 3279 3280 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 3281 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, 3282 &sc->ctx_paddr[i], BUS_DMA_NOWAIT); 3283 3284 if (error) { 3285 BCE_PRINTF("%s(%d): Could not map CTX " 3286 "DMA memory!\n", __FILE__, __LINE__); 3287 rc = ENOMEM; 3288 goto bce_dma_alloc_exit; 3289 } 3290 3291 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " 3292 "= 0x%jX\n", __FUNCTION__, i, 3293 (uintmax_t) sc->ctx_paddr[i]); 3294 } 3295 } 3296 3297 /* 3298 * Create a DMA tag for the TX buffer descriptor chain, 3299 * allocate and clear the memory, and fetch the 3300 * physical address of the block. 3301 */ 3302 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 3303 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3304 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, 3305 NULL, NULL, &sc->tx_bd_chain_tag)) { 3306 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3307 "chain DMA tag!\n", __FILE__, __LINE__); 3308 rc = ENOMEM; 3309 goto bce_dma_alloc_exit; 3310 } 3311 3312 for (i = 0; i < TX_PAGES; i++) { 3313 3314 if(bus_dmamem_alloc(sc->tx_bd_chain_tag, 3315 (void **)&sc->tx_bd_chain[i], 3316 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3317 &sc->tx_bd_chain_map[i])) { 3318 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3319 "chain DMA memory!\n", __FILE__, __LINE__); 3320 rc = ENOMEM; 3321 goto bce_dma_alloc_exit; 3322 } 3323 3324 error = bus_dmamap_load(sc->tx_bd_chain_tag, 3325 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], 3326 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3327 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3328 3329 if (error) { 3330 BCE_PRINTF("%s(%d): Could not map TX descriptor " 3331 "chain DMA memory!\n", __FILE__, __LINE__); 3332 rc = ENOMEM; 3333 goto bce_dma_alloc_exit; 3334 } 3335 3336 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " 3337 "0x%jX\n", __FUNCTION__, i, 3338 (uintmax_t) sc->tx_bd_chain_paddr[i]); 3339 } 3340 3341 /* Check the required size before mapping to conserve resources. */ 3342 if (bce_tso_enable) { 3343 max_size = BCE_TSO_MAX_SIZE; 3344 max_segments = BCE_MAX_SEGMENTS; 3345 max_seg_size = BCE_TSO_MAX_SEG_SIZE; 3346 } else { 3347 max_size = MCLBYTES * BCE_MAX_SEGMENTS; 3348 max_segments = BCE_MAX_SEGMENTS; 3349 max_seg_size = MCLBYTES; 3350 } 3351 3352 /* Create a DMA tag for TX mbufs. */ 3353 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3354 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 3355 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { 3356 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", 3357 __FILE__, __LINE__); 3358 rc = ENOMEM; 3359 goto bce_dma_alloc_exit; 3360 } 3361 3362 /* Create DMA maps for the TX mbufs clusters. */ 3363 for (i = 0; i < TOTAL_TX_BD; i++) { 3364 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 3365 &sc->tx_mbuf_map[i])) { 3366 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " 3367 "map!\n", __FILE__, __LINE__); 3368 rc = ENOMEM; 3369 goto bce_dma_alloc_exit; 3370 } 3371 } 3372 3373 /* 3374 * Create a DMA tag for the RX buffer descriptor chain, 3375 * allocate and clear the memory, and fetch the physical 3376 * address of the blocks. 3377 */ 3378 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3379 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, 3380 sc->max_bus_addr, NULL, NULL, 3381 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 3382 0, NULL, NULL, &sc->rx_bd_chain_tag)) { 3383 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " 3384 "DMA tag!\n", __FILE__, __LINE__); 3385 rc = ENOMEM; 3386 goto bce_dma_alloc_exit; 3387 } 3388 3389 for (i = 0; i < RX_PAGES; i++) { 3390 3391 if (bus_dmamem_alloc(sc->rx_bd_chain_tag, 3392 (void **)&sc->rx_bd_chain[i], 3393 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3394 &sc->rx_bd_chain_map[i])) { 3395 BCE_PRINTF("%s(%d): Could not allocate RX descriptor " 3396 "chain DMA memory!\n", __FILE__, __LINE__); 3397 rc = ENOMEM; 3398 goto bce_dma_alloc_exit; 3399 } 3400 3401 error = bus_dmamap_load(sc->rx_bd_chain_tag, 3402 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], 3403 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3404 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3405 3406 if (error) { 3407 BCE_PRINTF("%s(%d): Could not map RX descriptor " 3408 "chain DMA memory!\n", __FILE__, __LINE__); 3409 rc = ENOMEM; 3410 goto bce_dma_alloc_exit; 3411 } 3412 3413 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " 3414 "0x%jX\n", __FUNCTION__, i, 3415 (uintmax_t) sc->rx_bd_chain_paddr[i]); 3416 } 3417 3418 /* 3419 * Create a DMA tag for RX mbufs. 3420 */ 3421 #ifdef BCE_JUMBO_HDRSPLIT 3422 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? 3423 MCLBYTES : sc->rx_bd_mbuf_alloc_size); 3424 #else 3425 max_size = max_seg_size = MJUM9BYTES; 3426 #endif 3427 max_segments = 1; 3428 3429 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " 3430 "(max size = 0x%jX max segments = %d, max segment " 3431 "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size, 3432 max_segments, (uintmax_t) max_seg_size); 3433 3434 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, 3435 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3436 max_size, max_segments, max_seg_size, 0, NULL, NULL, 3437 &sc->rx_mbuf_tag)) { 3438 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", 3439 __FILE__, __LINE__); 3440 rc = ENOMEM; 3441 goto bce_dma_alloc_exit; 3442 } 3443 3444 /* Create DMA maps for the RX mbuf clusters. */ 3445 for (i = 0; i < TOTAL_RX_BD; i++) { 3446 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, 3447 &sc->rx_mbuf_map[i])) { 3448 BCE_PRINTF("%s(%d): Unable to create RX mbuf " 3449 "DMA map!\n", __FILE__, __LINE__); 3450 rc = ENOMEM; 3451 goto bce_dma_alloc_exit; 3452 } 3453 } 3454 3455 #ifdef BCE_JUMBO_HDRSPLIT 3456 /* 3457 * Create a DMA tag for the page buffer descriptor chain, 3458 * allocate and clear the memory, and fetch the physical 3459 * address of the blocks. 3460 */ 3461 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3462 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, 3463 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 3464 0, NULL, NULL, &sc->pg_bd_chain_tag)) { 3465 BCE_PRINTF("%s(%d): Could not allocate page descriptor " 3466 "chain DMA tag!\n", __FILE__, __LINE__); 3467 rc = ENOMEM; 3468 goto bce_dma_alloc_exit; 3469 } 3470 3471 for (i = 0; i < PG_PAGES; i++) { 3472 3473 if (bus_dmamem_alloc(sc->pg_bd_chain_tag, 3474 (void **)&sc->pg_bd_chain[i], 3475 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3476 &sc->pg_bd_chain_map[i])) { 3477 BCE_PRINTF("%s(%d): Could not allocate page " 3478 "descriptor chain DMA memory!\n", 3479 __FILE__, __LINE__); 3480 rc = ENOMEM; 3481 goto bce_dma_alloc_exit; 3482 } 3483 3484 error = bus_dmamap_load(sc->pg_bd_chain_tag, 3485 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], 3486 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, 3487 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3488 3489 if (error) { 3490 BCE_PRINTF("%s(%d): Could not map page descriptor " 3491 "chain DMA memory!\n", __FILE__, __LINE__); 3492 rc = ENOMEM; 3493 goto bce_dma_alloc_exit; 3494 } 3495 3496 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " 3497 "0x%jX\n", __FUNCTION__, i, 3498 (uintmax_t) sc->pg_bd_chain_paddr[i]); 3499 } 3500 3501 /* 3502 * Create a DMA tag for page mbufs. 3503 */ 3504 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ? 3505 MCLBYTES : sc->pg_bd_mbuf_alloc_size); 3506 3507 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3508 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3509 max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) { 3510 BCE_PRINTF("%s(%d): Could not allocate page mbuf " 3511 "DMA tag!\n", __FILE__, __LINE__); 3512 rc = ENOMEM; 3513 goto bce_dma_alloc_exit; 3514 } 3515 3516 /* Create DMA maps for the page mbuf clusters. */ 3517 for (i = 0; i < TOTAL_PG_BD; i++) { 3518 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, 3519 &sc->pg_mbuf_map[i])) { 3520 BCE_PRINTF("%s(%d): Unable to create page mbuf " 3521 "DMA map!\n", __FILE__, __LINE__); 3522 rc = ENOMEM; 3523 goto bce_dma_alloc_exit; 3524 } 3525 } 3526 #endif 3527 3528 bce_dma_alloc_exit: 3529 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3530 return(rc); 3531 } 3532 3533 3534 /****************************************************************************/ 3535 /* Release all resources used by the driver. */ 3536 /* */ 3537 /* Releases all resources acquired by the driver including interrupts, */ 3538 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 3539 /* */ 3540 /* Returns: */ 3541 /* Nothing. */ 3542 /****************************************************************************/ 3543 static void 3544 bce_release_resources(struct bce_softc *sc) 3545 { 3546 device_t dev; 3547 3548 DBENTER(BCE_VERBOSE_RESET); 3549 3550 dev = sc->bce_dev; 3551 3552 bce_dma_free(sc); 3553 3554 if (sc->bce_intrhand != NULL) { 3555 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); 3556 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 3557 } 3558 3559 if (sc->bce_res_irq != NULL) { 3560 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); 3561 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 3562 sc->bce_res_irq); 3563 } 3564 3565 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { 3566 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); 3567 pci_release_msi(dev); 3568 } 3569 3570 if (sc->bce_res_mem != NULL) { 3571 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); 3572 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 3573 sc->bce_res_mem); 3574 } 3575 3576 if (sc->bce_ifp != NULL) { 3577 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); 3578 if_free(sc->bce_ifp); 3579 } 3580 3581 if (mtx_initialized(&sc->bce_mtx)) 3582 BCE_LOCK_DESTROY(sc); 3583 3584 DBEXIT(BCE_VERBOSE_RESET); 3585 } 3586 3587 3588 /****************************************************************************/ 3589 /* Firmware synchronization. */ 3590 /* */ 3591 /* Before performing certain events such as a chip reset, synchronize with */ 3592 /* the firmware first. */ 3593 /* */ 3594 /* Returns: */ 3595 /* 0 for success, positive value for failure. */ 3596 /****************************************************************************/ 3597 static int 3598 bce_fw_sync(struct bce_softc *sc, u32 msg_data) 3599 { 3600 int i, rc = 0; 3601 u32 val; 3602 3603 DBENTER(BCE_VERBOSE_RESET); 3604 3605 /* Don't waste any time if we've timed out before. */ 3606 if (sc->bce_fw_timed_out == TRUE) { 3607 rc = EBUSY; 3608 goto bce_fw_sync_exit; 3609 } 3610 3611 /* Increment the message sequence number. */ 3612 sc->bce_fw_wr_seq++; 3613 msg_data |= sc->bce_fw_wr_seq; 3614 3615 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " 3616 "0x%08X\n", msg_data); 3617 3618 /* Send the message to the bootcode driver mailbox. */ 3619 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3620 3621 /* Wait for the bootcode to acknowledge the message. */ 3622 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 3623 /* Check for a response in the bootcode firmware mailbox. */ 3624 val = bce_shmem_rd(sc, BCE_FW_MB); 3625 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 3626 break; 3627 DELAY(1000); 3628 } 3629 3630 /* If we've timed out, tell bootcode that we've stopped waiting. */ 3631 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && 3632 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { 3633 3634 BCE_PRINTF("%s(%d): Firmware synchronization timeout! " 3635 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 3636 3637 msg_data &= ~BCE_DRV_MSG_CODE; 3638 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 3639 3640 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3641 3642 sc->bce_fw_timed_out = TRUE; 3643 rc = EBUSY; 3644 } 3645 3646 bce_fw_sync_exit: 3647 DBEXIT(BCE_VERBOSE_RESET); 3648 return (rc); 3649 } 3650 3651 3652 /****************************************************************************/ 3653 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 3654 /* */ 3655 /* Returns: */ 3656 /* Nothing. */ 3657 /****************************************************************************/ 3658 static void 3659 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 3660 u32 rv2p_code_len, u32 rv2p_proc) 3661 { 3662 int i; 3663 u32 val; 3664 3665 DBENTER(BCE_VERBOSE_RESET); 3666 3667 /* Set the page size used by RV2P. */ 3668 if (rv2p_proc == RV2P_PROC2) { 3669 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); 3670 } 3671 3672 for (i = 0; i < rv2p_code_len; i += 8) { 3673 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 3674 rv2p_code++; 3675 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 3676 rv2p_code++; 3677 3678 if (rv2p_proc == RV2P_PROC1) { 3679 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 3680 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 3681 } 3682 else { 3683 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 3684 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 3685 } 3686 } 3687 3688 /* Reset the processor, un-stall is done later. */ 3689 if (rv2p_proc == RV2P_PROC1) { 3690 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 3691 } 3692 else { 3693 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 3694 } 3695 3696 DBEXIT(BCE_VERBOSE_RESET); 3697 } 3698 3699 3700 /****************************************************************************/ 3701 /* Load RISC processor firmware. */ 3702 /* */ 3703 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 3704 /* associated with a particular processor. */ 3705 /* */ 3706 /* Returns: */ 3707 /* Nothing. */ 3708 /****************************************************************************/ 3709 static void 3710 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 3711 struct fw_info *fw) 3712 { 3713 u32 offset; 3714 3715 DBENTER(BCE_VERBOSE_RESET); 3716 3717 bce_halt_cpu(sc, cpu_reg); 3718 3719 /* Load the Text area. */ 3720 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 3721 if (fw->text) { 3722 int j; 3723 3724 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 3725 REG_WR_IND(sc, offset, fw->text[j]); 3726 } 3727 } 3728 3729 /* Load the Data area. */ 3730 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 3731 if (fw->data) { 3732 int j; 3733 3734 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 3735 REG_WR_IND(sc, offset, fw->data[j]); 3736 } 3737 } 3738 3739 /* Load the SBSS area. */ 3740 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 3741 if (fw->sbss) { 3742 int j; 3743 3744 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 3745 REG_WR_IND(sc, offset, fw->sbss[j]); 3746 } 3747 } 3748 3749 /* Load the BSS area. */ 3750 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 3751 if (fw->bss) { 3752 int j; 3753 3754 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 3755 REG_WR_IND(sc, offset, fw->bss[j]); 3756 } 3757 } 3758 3759 /* Load the Read-Only area. */ 3760 offset = cpu_reg->spad_base + 3761 (fw->rodata_addr - cpu_reg->mips_view_base); 3762 if (fw->rodata) { 3763 int j; 3764 3765 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 3766 REG_WR_IND(sc, offset, fw->rodata[j]); 3767 } 3768 } 3769 3770 /* Clear the pre-fetch instruction and set the FW start address. */ 3771 REG_WR_IND(sc, cpu_reg->inst, 0); 3772 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 3773 3774 DBEXIT(BCE_VERBOSE_RESET); 3775 } 3776 3777 3778 /****************************************************************************/ 3779 /* Starts the RISC processor. */ 3780 /* */ 3781 /* Assumes the CPU starting address has already been set. */ 3782 /* */ 3783 /* Returns: */ 3784 /* Nothing. */ 3785 /****************************************************************************/ 3786 static void 3787 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3788 { 3789 u32 val; 3790 3791 DBENTER(BCE_VERBOSE_RESET); 3792 3793 /* Start the CPU. */ 3794 val = REG_RD_IND(sc, cpu_reg->mode); 3795 val &= ~cpu_reg->mode_value_halt; 3796 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3797 REG_WR_IND(sc, cpu_reg->mode, val); 3798 3799 DBEXIT(BCE_VERBOSE_RESET); 3800 } 3801 3802 3803 /****************************************************************************/ 3804 /* Halts the RISC processor. */ 3805 /* */ 3806 /* Returns: */ 3807 /* Nothing. */ 3808 /****************************************************************************/ 3809 static void 3810 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3811 { 3812 u32 val; 3813 3814 DBENTER(BCE_VERBOSE_RESET); 3815 3816 /* Halt the CPU. */ 3817 val = REG_RD_IND(sc, cpu_reg->mode); 3818 val |= cpu_reg->mode_value_halt; 3819 REG_WR_IND(sc, cpu_reg->mode, val); 3820 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3821 3822 DBEXIT(BCE_VERBOSE_RESET); 3823 } 3824 3825 3826 /****************************************************************************/ 3827 /* Initialize the RX CPU. */ 3828 /* */ 3829 /* Returns: */ 3830 /* Nothing. */ 3831 /****************************************************************************/ 3832 static void 3833 bce_start_rxp_cpu(struct bce_softc *sc) 3834 { 3835 struct cpu_reg cpu_reg; 3836 3837 DBENTER(BCE_VERBOSE_RESET); 3838 3839 cpu_reg.mode = BCE_RXP_CPU_MODE; 3840 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3841 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3842 cpu_reg.state = BCE_RXP_CPU_STATE; 3843 cpu_reg.state_value_clear = 0xffffff; 3844 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3845 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3846 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3847 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3848 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3849 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3850 cpu_reg.mips_view_base = 0x8000000; 3851 3852 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); 3853 bce_start_cpu(sc, &cpu_reg); 3854 3855 DBEXIT(BCE_VERBOSE_RESET); 3856 } 3857 3858 3859 /****************************************************************************/ 3860 /* Initialize the RX CPU. */ 3861 /* */ 3862 /* Returns: */ 3863 /* Nothing. */ 3864 /****************************************************************************/ 3865 static void 3866 bce_init_rxp_cpu(struct bce_softc *sc) 3867 { 3868 struct cpu_reg cpu_reg; 3869 struct fw_info fw; 3870 3871 DBENTER(BCE_VERBOSE_RESET); 3872 3873 cpu_reg.mode = BCE_RXP_CPU_MODE; 3874 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3875 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3876 cpu_reg.state = BCE_RXP_CPU_STATE; 3877 cpu_reg.state_value_clear = 0xffffff; 3878 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3879 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3880 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3881 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3882 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3883 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3884 cpu_reg.mips_view_base = 0x8000000; 3885 3886 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3887 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3888 fw.ver_major = bce_RXP_b09FwReleaseMajor; 3889 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 3890 fw.ver_fix = bce_RXP_b09FwReleaseFix; 3891 fw.start_addr = bce_RXP_b09FwStartAddr; 3892 3893 fw.text_addr = bce_RXP_b09FwTextAddr; 3894 fw.text_len = bce_RXP_b09FwTextLen; 3895 fw.text_index = 0; 3896 fw.text = bce_RXP_b09FwText; 3897 3898 fw.data_addr = bce_RXP_b09FwDataAddr; 3899 fw.data_len = bce_RXP_b09FwDataLen; 3900 fw.data_index = 0; 3901 fw.data = bce_RXP_b09FwData; 3902 3903 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 3904 fw.sbss_len = bce_RXP_b09FwSbssLen; 3905 fw.sbss_index = 0; 3906 fw.sbss = bce_RXP_b09FwSbss; 3907 3908 fw.bss_addr = bce_RXP_b09FwBssAddr; 3909 fw.bss_len = bce_RXP_b09FwBssLen; 3910 fw.bss_index = 0; 3911 fw.bss = bce_RXP_b09FwBss; 3912 3913 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 3914 fw.rodata_len = bce_RXP_b09FwRodataLen; 3915 fw.rodata_index = 0; 3916 fw.rodata = bce_RXP_b09FwRodata; 3917 } else { 3918 fw.ver_major = bce_RXP_b06FwReleaseMajor; 3919 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 3920 fw.ver_fix = bce_RXP_b06FwReleaseFix; 3921 fw.start_addr = bce_RXP_b06FwStartAddr; 3922 3923 fw.text_addr = bce_RXP_b06FwTextAddr; 3924 fw.text_len = bce_RXP_b06FwTextLen; 3925 fw.text_index = 0; 3926 fw.text = bce_RXP_b06FwText; 3927 3928 fw.data_addr = bce_RXP_b06FwDataAddr; 3929 fw.data_len = bce_RXP_b06FwDataLen; 3930 fw.data_index = 0; 3931 fw.data = bce_RXP_b06FwData; 3932 3933 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 3934 fw.sbss_len = bce_RXP_b06FwSbssLen; 3935 fw.sbss_index = 0; 3936 fw.sbss = bce_RXP_b06FwSbss; 3937 3938 fw.bss_addr = bce_RXP_b06FwBssAddr; 3939 fw.bss_len = bce_RXP_b06FwBssLen; 3940 fw.bss_index = 0; 3941 fw.bss = bce_RXP_b06FwBss; 3942 3943 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 3944 fw.rodata_len = bce_RXP_b06FwRodataLen; 3945 fw.rodata_index = 0; 3946 fw.rodata = bce_RXP_b06FwRodata; 3947 } 3948 3949 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 3950 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3951 3952 /* Delay RXP start until initialization is complete. */ 3953 3954 DBEXIT(BCE_VERBOSE_RESET); 3955 } 3956 3957 3958 /****************************************************************************/ 3959 /* Initialize the TX CPU. */ 3960 /* */ 3961 /* Returns: */ 3962 /* Nothing. */ 3963 /****************************************************************************/ 3964 static void 3965 bce_init_txp_cpu(struct bce_softc *sc) 3966 { 3967 struct cpu_reg cpu_reg; 3968 struct fw_info fw; 3969 3970 DBENTER(BCE_VERBOSE_RESET); 3971 3972 cpu_reg.mode = BCE_TXP_CPU_MODE; 3973 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 3974 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 3975 cpu_reg.state = BCE_TXP_CPU_STATE; 3976 cpu_reg.state_value_clear = 0xffffff; 3977 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 3978 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 3979 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 3980 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 3981 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 3982 cpu_reg.spad_base = BCE_TXP_SCRATCH; 3983 cpu_reg.mips_view_base = 0x8000000; 3984 3985 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3986 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3987 fw.ver_major = bce_TXP_b09FwReleaseMajor; 3988 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 3989 fw.ver_fix = bce_TXP_b09FwReleaseFix; 3990 fw.start_addr = bce_TXP_b09FwStartAddr; 3991 3992 fw.text_addr = bce_TXP_b09FwTextAddr; 3993 fw.text_len = bce_TXP_b09FwTextLen; 3994 fw.text_index = 0; 3995 fw.text = bce_TXP_b09FwText; 3996 3997 fw.data_addr = bce_TXP_b09FwDataAddr; 3998 fw.data_len = bce_TXP_b09FwDataLen; 3999 fw.data_index = 0; 4000 fw.data = bce_TXP_b09FwData; 4001 4002 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 4003 fw.sbss_len = bce_TXP_b09FwSbssLen; 4004 fw.sbss_index = 0; 4005 fw.sbss = bce_TXP_b09FwSbss; 4006 4007 fw.bss_addr = bce_TXP_b09FwBssAddr; 4008 fw.bss_len = bce_TXP_b09FwBssLen; 4009 fw.bss_index = 0; 4010 fw.bss = bce_TXP_b09FwBss; 4011 4012 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 4013 fw.rodata_len = bce_TXP_b09FwRodataLen; 4014 fw.rodata_index = 0; 4015 fw.rodata = bce_TXP_b09FwRodata; 4016 } else { 4017 fw.ver_major = bce_TXP_b06FwReleaseMajor; 4018 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 4019 fw.ver_fix = bce_TXP_b06FwReleaseFix; 4020 fw.start_addr = bce_TXP_b06FwStartAddr; 4021 4022 fw.text_addr = bce_TXP_b06FwTextAddr; 4023 fw.text_len = bce_TXP_b06FwTextLen; 4024 fw.text_index = 0; 4025 fw.text = bce_TXP_b06FwText; 4026 4027 fw.data_addr = bce_TXP_b06FwDataAddr; 4028 fw.data_len = bce_TXP_b06FwDataLen; 4029 fw.data_index = 0; 4030 fw.data = bce_TXP_b06FwData; 4031 4032 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 4033 fw.sbss_len = bce_TXP_b06FwSbssLen; 4034 fw.sbss_index = 0; 4035 fw.sbss = bce_TXP_b06FwSbss; 4036 4037 fw.bss_addr = bce_TXP_b06FwBssAddr; 4038 fw.bss_len = bce_TXP_b06FwBssLen; 4039 fw.bss_index = 0; 4040 fw.bss = bce_TXP_b06FwBss; 4041 4042 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 4043 fw.rodata_len = bce_TXP_b06FwRodataLen; 4044 fw.rodata_index = 0; 4045 fw.rodata = bce_TXP_b06FwRodata; 4046 } 4047 4048 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 4049 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4050 bce_start_cpu(sc, &cpu_reg); 4051 4052 DBEXIT(BCE_VERBOSE_RESET); 4053 } 4054 4055 4056 /****************************************************************************/ 4057 /* Initialize the TPAT CPU. */ 4058 /* */ 4059 /* Returns: */ 4060 /* Nothing. */ 4061 /****************************************************************************/ 4062 static void 4063 bce_init_tpat_cpu(struct bce_softc *sc) 4064 { 4065 struct cpu_reg cpu_reg; 4066 struct fw_info fw; 4067 4068 DBENTER(BCE_VERBOSE_RESET); 4069 4070 cpu_reg.mode = BCE_TPAT_CPU_MODE; 4071 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 4072 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 4073 cpu_reg.state = BCE_TPAT_CPU_STATE; 4074 cpu_reg.state_value_clear = 0xffffff; 4075 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 4076 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 4077 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 4078 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 4079 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 4080 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 4081 cpu_reg.mips_view_base = 0x8000000; 4082 4083 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4084 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4085 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 4086 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 4087 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 4088 fw.start_addr = bce_TPAT_b09FwStartAddr; 4089 4090 fw.text_addr = bce_TPAT_b09FwTextAddr; 4091 fw.text_len = bce_TPAT_b09FwTextLen; 4092 fw.text_index = 0; 4093 fw.text = bce_TPAT_b09FwText; 4094 4095 fw.data_addr = bce_TPAT_b09FwDataAddr; 4096 fw.data_len = bce_TPAT_b09FwDataLen; 4097 fw.data_index = 0; 4098 fw.data = bce_TPAT_b09FwData; 4099 4100 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 4101 fw.sbss_len = bce_TPAT_b09FwSbssLen; 4102 fw.sbss_index = 0; 4103 fw.sbss = bce_TPAT_b09FwSbss; 4104 4105 fw.bss_addr = bce_TPAT_b09FwBssAddr; 4106 fw.bss_len = bce_TPAT_b09FwBssLen; 4107 fw.bss_index = 0; 4108 fw.bss = bce_TPAT_b09FwBss; 4109 4110 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 4111 fw.rodata_len = bce_TPAT_b09FwRodataLen; 4112 fw.rodata_index = 0; 4113 fw.rodata = bce_TPAT_b09FwRodata; 4114 } else { 4115 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 4116 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 4117 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 4118 fw.start_addr = bce_TPAT_b06FwStartAddr; 4119 4120 fw.text_addr = bce_TPAT_b06FwTextAddr; 4121 fw.text_len = bce_TPAT_b06FwTextLen; 4122 fw.text_index = 0; 4123 fw.text = bce_TPAT_b06FwText; 4124 4125 fw.data_addr = bce_TPAT_b06FwDataAddr; 4126 fw.data_len = bce_TPAT_b06FwDataLen; 4127 fw.data_index = 0; 4128 fw.data = bce_TPAT_b06FwData; 4129 4130 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 4131 fw.sbss_len = bce_TPAT_b06FwSbssLen; 4132 fw.sbss_index = 0; 4133 fw.sbss = bce_TPAT_b06FwSbss; 4134 4135 fw.bss_addr = bce_TPAT_b06FwBssAddr; 4136 fw.bss_len = bce_TPAT_b06FwBssLen; 4137 fw.bss_index = 0; 4138 fw.bss = bce_TPAT_b06FwBss; 4139 4140 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 4141 fw.rodata_len = bce_TPAT_b06FwRodataLen; 4142 fw.rodata_index = 0; 4143 fw.rodata = bce_TPAT_b06FwRodata; 4144 } 4145 4146 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 4147 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4148 bce_start_cpu(sc, &cpu_reg); 4149 4150 DBEXIT(BCE_VERBOSE_RESET); 4151 } 4152 4153 4154 /****************************************************************************/ 4155 /* Initialize the CP CPU. */ 4156 /* */ 4157 /* Returns: */ 4158 /* Nothing. */ 4159 /****************************************************************************/ 4160 static void 4161 bce_init_cp_cpu(struct bce_softc *sc) 4162 { 4163 struct cpu_reg cpu_reg; 4164 struct fw_info fw; 4165 4166 DBENTER(BCE_VERBOSE_RESET); 4167 4168 cpu_reg.mode = BCE_CP_CPU_MODE; 4169 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 4170 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 4171 cpu_reg.state = BCE_CP_CPU_STATE; 4172 cpu_reg.state_value_clear = 0xffffff; 4173 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 4174 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 4175 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 4176 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 4177 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 4178 cpu_reg.spad_base = BCE_CP_SCRATCH; 4179 cpu_reg.mips_view_base = 0x8000000; 4180 4181 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4182 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4183 fw.ver_major = bce_CP_b09FwReleaseMajor; 4184 fw.ver_minor = bce_CP_b09FwReleaseMinor; 4185 fw.ver_fix = bce_CP_b09FwReleaseFix; 4186 fw.start_addr = bce_CP_b09FwStartAddr; 4187 4188 fw.text_addr = bce_CP_b09FwTextAddr; 4189 fw.text_len = bce_CP_b09FwTextLen; 4190 fw.text_index = 0; 4191 fw.text = bce_CP_b09FwText; 4192 4193 fw.data_addr = bce_CP_b09FwDataAddr; 4194 fw.data_len = bce_CP_b09FwDataLen; 4195 fw.data_index = 0; 4196 fw.data = bce_CP_b09FwData; 4197 4198 fw.sbss_addr = bce_CP_b09FwSbssAddr; 4199 fw.sbss_len = bce_CP_b09FwSbssLen; 4200 fw.sbss_index = 0; 4201 fw.sbss = bce_CP_b09FwSbss; 4202 4203 fw.bss_addr = bce_CP_b09FwBssAddr; 4204 fw.bss_len = bce_CP_b09FwBssLen; 4205 fw.bss_index = 0; 4206 fw.bss = bce_CP_b09FwBss; 4207 4208 fw.rodata_addr = bce_CP_b09FwRodataAddr; 4209 fw.rodata_len = bce_CP_b09FwRodataLen; 4210 fw.rodata_index = 0; 4211 fw.rodata = bce_CP_b09FwRodata; 4212 } else { 4213 fw.ver_major = bce_CP_b06FwReleaseMajor; 4214 fw.ver_minor = bce_CP_b06FwReleaseMinor; 4215 fw.ver_fix = bce_CP_b06FwReleaseFix; 4216 fw.start_addr = bce_CP_b06FwStartAddr; 4217 4218 fw.text_addr = bce_CP_b06FwTextAddr; 4219 fw.text_len = bce_CP_b06FwTextLen; 4220 fw.text_index = 0; 4221 fw.text = bce_CP_b06FwText; 4222 4223 fw.data_addr = bce_CP_b06FwDataAddr; 4224 fw.data_len = bce_CP_b06FwDataLen; 4225 fw.data_index = 0; 4226 fw.data = bce_CP_b06FwData; 4227 4228 fw.sbss_addr = bce_CP_b06FwSbssAddr; 4229 fw.sbss_len = bce_CP_b06FwSbssLen; 4230 fw.sbss_index = 0; 4231 fw.sbss = bce_CP_b06FwSbss; 4232 4233 fw.bss_addr = bce_CP_b06FwBssAddr; 4234 fw.bss_len = bce_CP_b06FwBssLen; 4235 fw.bss_index = 0; 4236 fw.bss = bce_CP_b06FwBss; 4237 4238 fw.rodata_addr = bce_CP_b06FwRodataAddr; 4239 fw.rodata_len = bce_CP_b06FwRodataLen; 4240 fw.rodata_index = 0; 4241 fw.rodata = bce_CP_b06FwRodata; 4242 } 4243 4244 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 4245 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4246 bce_start_cpu(sc, &cpu_reg); 4247 4248 DBEXIT(BCE_VERBOSE_RESET); 4249 } 4250 4251 4252 /****************************************************************************/ 4253 /* Initialize the COM CPU. */ 4254 /* */ 4255 /* Returns: */ 4256 /* Nothing. */ 4257 /****************************************************************************/ 4258 static void 4259 bce_init_com_cpu(struct bce_softc *sc) 4260 { 4261 struct cpu_reg cpu_reg; 4262 struct fw_info fw; 4263 4264 DBENTER(BCE_VERBOSE_RESET); 4265 4266 cpu_reg.mode = BCE_COM_CPU_MODE; 4267 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 4268 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 4269 cpu_reg.state = BCE_COM_CPU_STATE; 4270 cpu_reg.state_value_clear = 0xffffff; 4271 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 4272 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 4273 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 4274 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 4275 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 4276 cpu_reg.spad_base = BCE_COM_SCRATCH; 4277 cpu_reg.mips_view_base = 0x8000000; 4278 4279 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4280 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4281 fw.ver_major = bce_COM_b09FwReleaseMajor; 4282 fw.ver_minor = bce_COM_b09FwReleaseMinor; 4283 fw.ver_fix = bce_COM_b09FwReleaseFix; 4284 fw.start_addr = bce_COM_b09FwStartAddr; 4285 4286 fw.text_addr = bce_COM_b09FwTextAddr; 4287 fw.text_len = bce_COM_b09FwTextLen; 4288 fw.text_index = 0; 4289 fw.text = bce_COM_b09FwText; 4290 4291 fw.data_addr = bce_COM_b09FwDataAddr; 4292 fw.data_len = bce_COM_b09FwDataLen; 4293 fw.data_index = 0; 4294 fw.data = bce_COM_b09FwData; 4295 4296 fw.sbss_addr = bce_COM_b09FwSbssAddr; 4297 fw.sbss_len = bce_COM_b09FwSbssLen; 4298 fw.sbss_index = 0; 4299 fw.sbss = bce_COM_b09FwSbss; 4300 4301 fw.bss_addr = bce_COM_b09FwBssAddr; 4302 fw.bss_len = bce_COM_b09FwBssLen; 4303 fw.bss_index = 0; 4304 fw.bss = bce_COM_b09FwBss; 4305 4306 fw.rodata_addr = bce_COM_b09FwRodataAddr; 4307 fw.rodata_len = bce_COM_b09FwRodataLen; 4308 fw.rodata_index = 0; 4309 fw.rodata = bce_COM_b09FwRodata; 4310 } else { 4311 fw.ver_major = bce_COM_b06FwReleaseMajor; 4312 fw.ver_minor = bce_COM_b06FwReleaseMinor; 4313 fw.ver_fix = bce_COM_b06FwReleaseFix; 4314 fw.start_addr = bce_COM_b06FwStartAddr; 4315 4316 fw.text_addr = bce_COM_b06FwTextAddr; 4317 fw.text_len = bce_COM_b06FwTextLen; 4318 fw.text_index = 0; 4319 fw.text = bce_COM_b06FwText; 4320 4321 fw.data_addr = bce_COM_b06FwDataAddr; 4322 fw.data_len = bce_COM_b06FwDataLen; 4323 fw.data_index = 0; 4324 fw.data = bce_COM_b06FwData; 4325 4326 fw.sbss_addr = bce_COM_b06FwSbssAddr; 4327 fw.sbss_len = bce_COM_b06FwSbssLen; 4328 fw.sbss_index = 0; 4329 fw.sbss = bce_COM_b06FwSbss; 4330 4331 fw.bss_addr = bce_COM_b06FwBssAddr; 4332 fw.bss_len = bce_COM_b06FwBssLen; 4333 fw.bss_index = 0; 4334 fw.bss = bce_COM_b06FwBss; 4335 4336 fw.rodata_addr = bce_COM_b06FwRodataAddr; 4337 fw.rodata_len = bce_COM_b06FwRodataLen; 4338 fw.rodata_index = 0; 4339 fw.rodata = bce_COM_b06FwRodata; 4340 } 4341 4342 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 4343 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4344 bce_start_cpu(sc, &cpu_reg); 4345 4346 DBEXIT(BCE_VERBOSE_RESET); 4347 } 4348 4349 4350 /****************************************************************************/ 4351 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 4352 /* */ 4353 /* Loads the firmware for each CPU and starts the CPU. */ 4354 /* */ 4355 /* Returns: */ 4356 /* Nothing. */ 4357 /****************************************************************************/ 4358 static void 4359 bce_init_cpus(struct bce_softc *sc) 4360 { 4361 DBENTER(BCE_VERBOSE_RESET); 4362 4363 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4364 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4365 4366 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { 4367 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 4368 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 4369 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 4370 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 4371 } else { 4372 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 4373 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 4374 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 4375 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 4376 } 4377 4378 } else { 4379 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 4380 sizeof(bce_rv2p_proc1), RV2P_PROC1); 4381 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 4382 sizeof(bce_rv2p_proc2), RV2P_PROC2); 4383 } 4384 4385 bce_init_rxp_cpu(sc); 4386 bce_init_txp_cpu(sc); 4387 bce_init_tpat_cpu(sc); 4388 bce_init_com_cpu(sc); 4389 bce_init_cp_cpu(sc); 4390 4391 DBEXIT(BCE_VERBOSE_RESET); 4392 } 4393 4394 4395 /****************************************************************************/ 4396 /* Initialize context memory. */ 4397 /* */ 4398 /* Clears the memory associated with each Context ID (CID). */ 4399 /* */ 4400 /* Returns: */ 4401 /* Nothing. */ 4402 /****************************************************************************/ 4403 static int 4404 bce_init_ctx(struct bce_softc *sc) 4405 { 4406 u32 offset, val, vcid_addr; 4407 int i, j, rc, retry_cnt; 4408 4409 rc = 0; 4410 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4411 4412 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4413 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4414 retry_cnt = CTX_INIT_RETRY_COUNT; 4415 4416 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); 4417 4418 /* 4419 * BCM5709 context memory may be cached 4420 * in host memory so prepare the host memory 4421 * for access. 4422 */ 4423 val = BCE_CTX_COMMAND_ENABLED | 4424 BCE_CTX_COMMAND_MEM_INIT | (1 << 12); 4425 val |= (BCM_PAGE_BITS - 8) << 16; 4426 REG_WR(sc, BCE_CTX_COMMAND, val); 4427 4428 /* Wait for mem init command to complete. */ 4429 for (i = 0; i < retry_cnt; i++) { 4430 val = REG_RD(sc, BCE_CTX_COMMAND); 4431 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 4432 break; 4433 DELAY(2); 4434 } 4435 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { 4436 BCE_PRINTF("%s(): Context memory initialization failed!\n", 4437 __FUNCTION__); 4438 rc = EBUSY; 4439 goto init_ctx_fail; 4440 } 4441 4442 for (i = 0; i < sc->ctx_pages; i++) { 4443 /* Set the physical address of the context memory. */ 4444 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 4445 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 4446 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 4447 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 4448 BCE_ADDR_HI(sc->ctx_paddr[i])); 4449 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | 4450 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4451 4452 /* Verify the context memory write was successful. */ 4453 for (j = 0; j < retry_cnt; j++) { 4454 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 4455 if ((val & 4456 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 4457 break; 4458 DELAY(5); 4459 } 4460 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { 4461 BCE_PRINTF("%s(): Failed to initialize " 4462 "context page %d!\n", __FUNCTION__, i); 4463 rc = EBUSY; 4464 goto init_ctx_fail; 4465 } 4466 } 4467 } else { 4468 4469 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); 4470 4471 /* 4472 * For the 5706/5708, context memory is local to 4473 * the controller, so initialize the controller 4474 * context memory. 4475 */ 4476 4477 vcid_addr = GET_CID_ADDR(96); 4478 while (vcid_addr) { 4479 4480 vcid_addr -= PHY_CTX_SIZE; 4481 4482 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 4483 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4484 4485 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 4486 CTX_WR(sc, 0x00, offset, 0); 4487 } 4488 4489 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 4490 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4491 } 4492 4493 } 4494 init_ctx_fail: 4495 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4496 return (rc); 4497 } 4498 4499 4500 /****************************************************************************/ 4501 /* Fetch the permanent MAC address of the controller. */ 4502 /* */ 4503 /* Returns: */ 4504 /* Nothing. */ 4505 /****************************************************************************/ 4506 static void 4507 bce_get_mac_addr(struct bce_softc *sc) 4508 { 4509 u32 mac_lo = 0, mac_hi = 0; 4510 4511 DBENTER(BCE_VERBOSE_RESET); 4512 4513 /* 4514 * The NetXtreme II bootcode populates various NIC 4515 * power-on and runtime configuration items in a 4516 * shared memory area. The factory configured MAC 4517 * address is available from both NVRAM and the 4518 * shared memory area so we'll read the value from 4519 * shared memory for speed. 4520 */ 4521 4522 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 4523 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 4524 4525 if ((mac_lo == 0) && (mac_hi == 0)) { 4526 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", 4527 __FILE__, __LINE__); 4528 } else { 4529 sc->eaddr[0] = (u_char)(mac_hi >> 8); 4530 sc->eaddr[1] = (u_char)(mac_hi >> 0); 4531 sc->eaddr[2] = (u_char)(mac_lo >> 24); 4532 sc->eaddr[3] = (u_char)(mac_lo >> 16); 4533 sc->eaddr[4] = (u_char)(mac_lo >> 8); 4534 sc->eaddr[5] = (u_char)(mac_lo >> 0); 4535 } 4536 4537 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " 4538 "address = %6D\n", sc->eaddr, ":"); 4539 DBEXIT(BCE_VERBOSE_RESET); 4540 } 4541 4542 4543 /****************************************************************************/ 4544 /* Program the MAC address. */ 4545 /* */ 4546 /* Returns: */ 4547 /* Nothing. */ 4548 /****************************************************************************/ 4549 static void 4550 bce_set_mac_addr(struct bce_softc *sc) 4551 { 4552 u32 val; 4553 u8 *mac_addr = sc->eaddr; 4554 4555 /* ToDo: Add support for setting multiple MAC addresses. */ 4556 4557 DBENTER(BCE_VERBOSE_RESET); 4558 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " 4559 "%6D\n", sc->eaddr, ":"); 4560 4561 val = (mac_addr[0] << 8) | mac_addr[1]; 4562 4563 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 4564 4565 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4566 (mac_addr[4] << 8) | mac_addr[5]; 4567 4568 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 4569 4570 DBEXIT(BCE_VERBOSE_RESET); 4571 } 4572 4573 4574 /****************************************************************************/ 4575 /* Stop the controller. */ 4576 /* */ 4577 /* Returns: */ 4578 /* Nothing. */ 4579 /****************************************************************************/ 4580 static void 4581 bce_stop(struct bce_softc *sc) 4582 { 4583 struct ifnet *ifp; 4584 4585 DBENTER(BCE_VERBOSE_RESET); 4586 4587 BCE_LOCK_ASSERT(sc); 4588 4589 ifp = sc->bce_ifp; 4590 4591 callout_stop(&sc->bce_tick_callout); 4592 4593 /* Disable the transmit/receive blocks. */ 4594 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 4595 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4596 DELAY(20); 4597 4598 bce_disable_intr(sc); 4599 4600 /* Free RX buffers. */ 4601 #ifdef BCE_JUMBO_HDRSPLIT 4602 bce_free_pg_chain(sc); 4603 #endif 4604 bce_free_rx_chain(sc); 4605 4606 /* Free TX buffers. */ 4607 bce_free_tx_chain(sc); 4608 4609 sc->watchdog_timer = 0; 4610 4611 sc->bce_link_up = FALSE; 4612 4613 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4614 4615 DBEXIT(BCE_VERBOSE_RESET); 4616 } 4617 4618 4619 static int 4620 bce_reset(struct bce_softc *sc, u32 reset_code) 4621 { 4622 u32 val; 4623 int i, rc = 0; 4624 4625 DBENTER(BCE_VERBOSE_RESET); 4626 4627 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", 4628 __FUNCTION__, reset_code); 4629 4630 /* Wait for pending PCI transactions to complete. */ 4631 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 4632 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4633 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4634 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4635 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4636 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4637 DELAY(5); 4638 4639 /* Disable DMA */ 4640 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4641 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4642 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4643 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4644 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4645 } 4646 4647 /* Assume bootcode is running. */ 4648 sc->bce_fw_timed_out = FALSE; 4649 sc->bce_drv_cardiac_arrest = FALSE; 4650 4651 /* Give the firmware a chance to prepare for the reset. */ 4652 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 4653 if (rc) 4654 goto bce_reset_exit; 4655 4656 /* Set a firmware reminder that this is a soft reset. */ 4657 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); 4658 4659 /* Dummy read to force the chip to complete all current transactions. */ 4660 val = REG_RD(sc, BCE_MISC_ID); 4661 4662 /* Chip reset. */ 4663 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4664 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4665 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 4666 REG_RD(sc, BCE_MISC_COMMAND); 4667 DELAY(5); 4668 4669 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4670 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4671 4672 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 4673 } else { 4674 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4675 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4676 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4677 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 4678 4679 /* Allow up to 30us for reset to complete. */ 4680 for (i = 0; i < 10; i++) { 4681 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 4682 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4683 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 4684 break; 4685 } 4686 DELAY(10); 4687 } 4688 4689 /* Check that reset completed successfully. */ 4690 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4691 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 4692 BCE_PRINTF("%s(%d): Reset failed!\n", 4693 __FILE__, __LINE__); 4694 rc = EBUSY; 4695 goto bce_reset_exit; 4696 } 4697 } 4698 4699 /* Make sure byte swapping is properly configured. */ 4700 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 4701 if (val != 0x01020304) { 4702 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", 4703 __FILE__, __LINE__); 4704 rc = ENODEV; 4705 goto bce_reset_exit; 4706 } 4707 4708 /* Just completed a reset, assume that firmware is running again. */ 4709 sc->bce_fw_timed_out = FALSE; 4710 sc->bce_drv_cardiac_arrest = FALSE; 4711 4712 /* Wait for the firmware to finish its initialization. */ 4713 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 4714 if (rc) 4715 BCE_PRINTF("%s(%d): Firmware did not complete " 4716 "initialization!\n", __FILE__, __LINE__); 4717 4718 bce_reset_exit: 4719 DBEXIT(BCE_VERBOSE_RESET); 4720 return (rc); 4721 } 4722 4723 4724 static int 4725 bce_chipinit(struct bce_softc *sc) 4726 { 4727 u32 val; 4728 int rc = 0; 4729 4730 DBENTER(BCE_VERBOSE_RESET); 4731 4732 bce_disable_intr(sc); 4733 4734 /* 4735 * Initialize DMA byte/word swapping, configure the number of DMA 4736 * channels and PCI clock compensation delay. 4737 */ 4738 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 4739 BCE_DMA_CONFIG_DATA_WORD_SWAP | 4740 #if BYTE_ORDER == BIG_ENDIAN 4741 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 4742 #endif 4743 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 4744 DMA_READ_CHANS << 12 | 4745 DMA_WRITE_CHANS << 16; 4746 4747 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 4748 4749 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 4750 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 4751 4752 /* 4753 * This setting resolves a problem observed on certain Intel PCI 4754 * chipsets that cannot handle multiple outstanding DMA operations. 4755 * See errata E9_5706A1_65. 4756 */ 4757 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 4758 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && 4759 !(sc->bce_flags & BCE_PCIX_FLAG)) 4760 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 4761 4762 REG_WR(sc, BCE_DMA_CONFIG, val); 4763 4764 /* Enable the RX_V2P and Context state machines before access. */ 4765 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4766 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 4767 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 4768 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 4769 4770 /* Initialize context mapping and zero out the quick contexts. */ 4771 if ((rc = bce_init_ctx(sc)) != 0) 4772 goto bce_chipinit_exit; 4773 4774 /* Initialize the on-boards CPUs */ 4775 bce_init_cpus(sc); 4776 4777 /* Enable management frames (NC-SI) to flow to the MCP. */ 4778 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4779 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4780 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4781 } 4782 4783 /* Prepare NVRAM for access. */ 4784 if ((rc = bce_init_nvram(sc)) != 0) 4785 goto bce_chipinit_exit; 4786 4787 /* Set the kernel bypass block size */ 4788 val = REG_RD(sc, BCE_MQ_CONFIG); 4789 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4790 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4791 4792 /* Enable bins used on the 5709. */ 4793 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4794 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4795 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 4796 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 4797 val |= BCE_MQ_CONFIG_HALT_DIS; 4798 } 4799 4800 REG_WR(sc, BCE_MQ_CONFIG, val); 4801 4802 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 4803 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 4804 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 4805 4806 /* Set the page size and clear the RV2P processor stall bits. */ 4807 val = (BCM_PAGE_BITS - 8) << 24; 4808 REG_WR(sc, BCE_RV2P_CONFIG, val); 4809 4810 /* Configure page size. */ 4811 val = REG_RD(sc, BCE_TBDR_CONFIG); 4812 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 4813 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4814 REG_WR(sc, BCE_TBDR_CONFIG, val); 4815 4816 /* Set the perfect match control register to default. */ 4817 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 4818 4819 bce_chipinit_exit: 4820 DBEXIT(BCE_VERBOSE_RESET); 4821 4822 return(rc); 4823 } 4824 4825 4826 /****************************************************************************/ 4827 /* Initialize the controller in preparation to send/receive traffic. */ 4828 /* */ 4829 /* Returns: */ 4830 /* 0 for success, positive value for failure. */ 4831 /****************************************************************************/ 4832 static int 4833 bce_blockinit(struct bce_softc *sc) 4834 { 4835 u32 reg, val; 4836 int rc = 0; 4837 4838 DBENTER(BCE_VERBOSE_RESET); 4839 4840 /* Load the hardware default MAC address. */ 4841 bce_set_mac_addr(sc); 4842 4843 /* Set the Ethernet backoff seed value */ 4844 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 4845 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 4846 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 4847 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 4848 4849 sc->last_status_idx = 0; 4850 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 4851 4852 /* Set up link change interrupt generation. */ 4853 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 4854 4855 /* Program the physical address of the status block. */ 4856 REG_WR(sc, BCE_HC_STATUS_ADDR_L, 4857 BCE_ADDR_LO(sc->status_block_paddr)); 4858 REG_WR(sc, BCE_HC_STATUS_ADDR_H, 4859 BCE_ADDR_HI(sc->status_block_paddr)); 4860 4861 /* Program the physical address of the statistics block. */ 4862 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 4863 BCE_ADDR_LO(sc->stats_block_paddr)); 4864 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 4865 BCE_ADDR_HI(sc->stats_block_paddr)); 4866 4867 /* Program various host coalescing parameters. */ 4868 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4869 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); 4870 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4871 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); 4872 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 4873 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 4874 REG_WR(sc, BCE_HC_TX_TICKS, 4875 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 4876 REG_WR(sc, BCE_HC_RX_TICKS, 4877 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 4878 REG_WR(sc, BCE_HC_COM_TICKS, 4879 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 4880 REG_WR(sc, BCE_HC_CMD_TICKS, 4881 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 4882 REG_WR(sc, BCE_HC_STATS_TICKS, 4883 (sc->bce_stats_ticks & 0xffff00)); 4884 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 4885 4886 /* Configure the Host Coalescing block. */ 4887 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | 4888 BCE_HC_CONFIG_COLLECT_STATS; 4889 4890 #if 0 4891 /* ToDo: Add MSI-X support. */ 4892 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 4893 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + 4894 BCE_HC_SB_CONFIG_1; 4895 4896 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 4897 4898 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 4899 BCE_HC_SB_CONFIG_1_ONE_SHOT); 4900 4901 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 4902 (sc->tx_quick_cons_trip_int << 16) | 4903 sc->tx_quick_cons_trip); 4904 4905 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 4906 (sc->tx_ticks_int << 16) | sc->tx_ticks); 4907 4908 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4909 } 4910 4911 /* 4912 * Tell the HC block to automatically set the 4913 * INT_MASK bit after an MSI/MSI-X interrupt 4914 * is generated so the driver doesn't have to. 4915 */ 4916 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) 4917 val |= BCE_HC_CONFIG_ONE_SHOT; 4918 4919 /* Set the MSI-X status blocks to 128 byte boundaries. */ 4920 if (sc->bce_flags & BCE_USING_MSIX_FLAG) 4921 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4922 #endif 4923 4924 REG_WR(sc, BCE_HC_CONFIG, val); 4925 4926 /* Clear the internal statistics counters. */ 4927 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 4928 4929 /* Verify that bootcode is running. */ 4930 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 4931 4932 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), 4933 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", 4934 __FILE__, __LINE__); 4935 reg = 0); 4936 4937 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 4938 BCE_DEV_INFO_SIGNATURE_MAGIC) { 4939 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " 4940 "Expected: 08%08X\n", __FILE__, __LINE__, 4941 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), 4942 BCE_DEV_INFO_SIGNATURE_MAGIC); 4943 rc = ENODEV; 4944 goto bce_blockinit_exit; 4945 } 4946 4947 /* Enable DMA */ 4948 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4949 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4950 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4951 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4952 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4953 } 4954 4955 /* Allow bootcode to apply additional fixes before enabling MAC. */ 4956 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | 4957 BCE_DRV_MSG_CODE_RESET); 4958 4959 /* Enable link state change interrupt generation. */ 4960 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 4961 4962 /* Enable the RXP. */ 4963 bce_start_rxp_cpu(sc); 4964 4965 /* Disable management frames (NC-SI) from flowing to the MCP. */ 4966 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4967 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 4968 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4969 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4970 } 4971 4972 /* Enable all remaining blocks in the MAC. */ 4973 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4974 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 4975 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4976 BCE_MISC_ENABLE_DEFAULT_XI); 4977 else 4978 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4979 BCE_MISC_ENABLE_DEFAULT); 4980 4981 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4982 DELAY(20); 4983 4984 /* Save the current host coalescing block settings. */ 4985 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 4986 4987 bce_blockinit_exit: 4988 DBEXIT(BCE_VERBOSE_RESET); 4989 4990 return (rc); 4991 } 4992 4993 4994 /****************************************************************************/ 4995 /* Encapsulate an mbuf into the rx_bd chain. */ 4996 /* */ 4997 /* Returns: */ 4998 /* 0 for success, positive value for failure. */ 4999 /****************************************************************************/ 5000 static int 5001 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 5002 u16 *chain_prod, u32 *prod_bseq) 5003 { 5004 bus_dmamap_t map; 5005 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 5006 struct mbuf *m_new = NULL; 5007 struct rx_bd *rxbd; 5008 int nsegs, error, rc = 0; 5009 #ifdef BCE_DEBUG 5010 u16 debug_chain_prod = *chain_prod; 5011 #endif 5012 5013 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5014 5015 /* Make sure the inputs are valid. */ 5016 DBRUNIF((*chain_prod > MAX_RX_BD), 5017 BCE_PRINTF("%s(%d): RX producer out of range: " 5018 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5019 *chain_prod, (u16) MAX_RX_BD)); 5020 5021 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5022 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, 5023 *prod, *chain_prod, *prod_bseq); 5024 5025 /* Update some debug statistic counters */ 5026 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 5027 sc->rx_low_watermark = sc->free_rx_bd); 5028 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 5029 sc->rx_empty_count++); 5030 5031 /* Check whether this is a new mbuf allocation. */ 5032 if (m == NULL) { 5033 5034 /* Simulate an mbuf allocation failure. */ 5035 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5036 sc->mbuf_alloc_failed_count++; 5037 sc->mbuf_alloc_failed_sim_count++; 5038 rc = ENOBUFS; 5039 goto bce_get_rx_buf_exit); 5040 5041 /* This is a new mbuf allocation. */ 5042 #ifdef BCE_JUMBO_HDRSPLIT 5043 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 5044 #else 5045 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, 5046 sc->rx_bd_mbuf_alloc_size); 5047 #endif 5048 5049 if (m_new == NULL) { 5050 sc->mbuf_alloc_failed_count++; 5051 rc = ENOBUFS; 5052 goto bce_get_rx_buf_exit; 5053 } 5054 5055 DBRUN(sc->debug_rx_mbuf_alloc++); 5056 } else { 5057 /* Reuse an existing mbuf. */ 5058 m_new = m; 5059 } 5060 5061 /* Make sure we have a valid packet header. */ 5062 M_ASSERTPKTHDR(m_new); 5063 5064 /* Initialize the mbuf size and pad if necessary for alignment. */ 5065 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; 5066 m_adj(m_new, sc->rx_bd_mbuf_align_pad); 5067 5068 /* ToDo: Consider calling m_fragment() to test error handling. */ 5069 5070 /* Map the mbuf cluster into device memory. */ 5071 map = sc->rx_mbuf_map[*chain_prod]; 5072 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new, 5073 segs, &nsegs, BUS_DMA_NOWAIT); 5074 5075 /* Handle any mapping errors. */ 5076 if (error) { 5077 BCE_PRINTF("%s(%d): Error mapping mbuf into RX " 5078 "chain (%d)!\n", __FILE__, __LINE__, error); 5079 5080 sc->dma_map_addr_rx_failed_count++; 5081 m_freem(m_new); 5082 5083 DBRUN(sc->debug_rx_mbuf_alloc--); 5084 5085 rc = ENOBUFS; 5086 goto bce_get_rx_buf_exit; 5087 } 5088 5089 /* All mbufs must map to a single segment. */ 5090 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 5091 __FUNCTION__, nsegs)); 5092 5093 /* Setup the rx_bd for the segment. */ 5094 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 5095 5096 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 5097 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 5098 rxbd->rx_bd_len = htole32(segs[0].ds_len); 5099 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5100 *prod_bseq += segs[0].ds_len; 5101 5102 /* Save the mbuf and update our counter. */ 5103 sc->rx_mbuf_ptr[*chain_prod] = m_new; 5104 sc->free_rx_bd -= nsegs; 5105 5106 DBRUNMSG(BCE_INSANE_RECV, 5107 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); 5108 5109 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5110 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", 5111 __FUNCTION__, *prod, *chain_prod, *prod_bseq); 5112 5113 bce_get_rx_buf_exit: 5114 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5115 5116 return(rc); 5117 } 5118 5119 5120 #ifdef BCE_JUMBO_HDRSPLIT 5121 /****************************************************************************/ 5122 /* Encapsulate an mbuf cluster into the page chain. */ 5123 /* */ 5124 /* Returns: */ 5125 /* 0 for success, positive value for failure. */ 5126 /****************************************************************************/ 5127 static int 5128 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 5129 u16 *prod_idx) 5130 { 5131 bus_dmamap_t map; 5132 bus_addr_t busaddr; 5133 struct mbuf *m_new = NULL; 5134 struct rx_bd *pgbd; 5135 int error, rc = 0; 5136 #ifdef BCE_DEBUG 5137 u16 debug_prod_idx = *prod_idx; 5138 #endif 5139 5140 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5141 5142 /* Make sure the inputs are valid. */ 5143 DBRUNIF((*prod_idx > MAX_PG_BD), 5144 BCE_PRINTF("%s(%d): page producer out of range: " 5145 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5146 *prod_idx, (u16) MAX_PG_BD)); 5147 5148 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5149 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5150 5151 /* Update counters if we've hit a new low or run out of pages. */ 5152 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), 5153 sc->pg_low_watermark = sc->free_pg_bd); 5154 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); 5155 5156 /* Check whether this is a new mbuf allocation. */ 5157 if (m == NULL) { 5158 5159 /* Simulate an mbuf allocation failure. */ 5160 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5161 sc->mbuf_alloc_failed_count++; 5162 sc->mbuf_alloc_failed_sim_count++; 5163 rc = ENOBUFS; 5164 goto bce_get_pg_buf_exit); 5165 5166 /* This is a new mbuf allocation. */ 5167 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0); 5168 if (m_new == NULL) { 5169 sc->mbuf_alloc_failed_count++; 5170 rc = ENOBUFS; 5171 goto bce_get_pg_buf_exit; 5172 } 5173 5174 DBRUN(sc->debug_pg_mbuf_alloc++); 5175 } else { 5176 /* Reuse an existing mbuf. */ 5177 m_new = m; 5178 m_new->m_data = m_new->m_ext.ext_buf; 5179 } 5180 5181 m_new->m_len = sc->pg_bd_mbuf_alloc_size; 5182 5183 /* ToDo: Consider calling m_fragment() to test error handling. */ 5184 5185 /* Map the mbuf cluster into device memory. */ 5186 map = sc->pg_mbuf_map[*prod_idx]; 5187 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *), 5188 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, 5189 &busaddr, BUS_DMA_NOWAIT); 5190 5191 /* Handle any mapping errors. */ 5192 if (error) { 5193 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", 5194 __FILE__, __LINE__); 5195 5196 m_freem(m_new); 5197 DBRUN(sc->debug_pg_mbuf_alloc--); 5198 5199 rc = ENOBUFS; 5200 goto bce_get_pg_buf_exit; 5201 } 5202 5203 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ 5204 5205 /* 5206 * The page chain uses the same rx_bd data structure 5207 * as the receive chain but doesn't require a byte sequence (bseq). 5208 */ 5209 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)]; 5210 5211 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr)); 5212 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr)); 5213 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size); 5214 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5215 5216 /* Save the mbuf and update our counter. */ 5217 sc->pg_mbuf_ptr[*prod_idx] = m_new; 5218 sc->free_pg_bd--; 5219 5220 DBRUNMSG(BCE_INSANE_RECV, 5221 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); 5222 5223 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5224 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5225 5226 bce_get_pg_buf_exit: 5227 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5228 5229 return(rc); 5230 } 5231 #endif /* BCE_JUMBO_HDRSPLIT */ 5232 5233 5234 /****************************************************************************/ 5235 /* Initialize the TX context memory. */ 5236 /* */ 5237 /* Returns: */ 5238 /* Nothing */ 5239 /****************************************************************************/ 5240 static void 5241 bce_init_tx_context(struct bce_softc *sc) 5242 { 5243 u32 val; 5244 5245 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5246 5247 /* Initialize the context ID for an L2 TX chain. */ 5248 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5249 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5250 /* Set the CID type to support an L2 connection. */ 5251 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | 5252 BCE_L2CTX_TX_TYPE_SIZE_L2_XI; 5253 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 5254 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); 5255 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5256 BCE_L2CTX_TX_CMD_TYPE_XI, val); 5257 5258 /* Point the hardware to the first page in the chain. */ 5259 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5260 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5261 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 5262 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5263 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5264 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 5265 } else { 5266 /* Set the CID type to support an L2 connection. */ 5267 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 5268 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 5269 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 5270 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 5271 5272 /* Point the hardware to the first page in the chain. */ 5273 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5274 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5275 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 5276 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5277 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5278 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 5279 } 5280 5281 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5282 } 5283 5284 5285 /****************************************************************************/ 5286 /* Allocate memory and initialize the TX data structures. */ 5287 /* */ 5288 /* Returns: */ 5289 /* 0 for success, positive value for failure. */ 5290 /****************************************************************************/ 5291 static int 5292 bce_init_tx_chain(struct bce_softc *sc) 5293 { 5294 struct tx_bd *txbd; 5295 int i, rc = 0; 5296 5297 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5298 5299 /* Set the initial TX producer/consumer indices. */ 5300 sc->tx_prod = 0; 5301 sc->tx_cons = 0; 5302 sc->tx_prod_bseq = 0; 5303 sc->used_tx_bd = 0; 5304 sc->max_tx_bd = USABLE_TX_BD; 5305 DBRUN(sc->tx_hi_watermark = 0); 5306 DBRUN(sc->tx_full_count = 0); 5307 5308 /* 5309 * The NetXtreme II supports a linked-list structre called 5310 * a Buffer Descriptor Chain (or BD chain). A BD chain 5311 * consists of a series of 1 or more chain pages, each of which 5312 * consists of a fixed number of BD entries. 5313 * The last BD entry on each page is a pointer to the next page 5314 * in the chain, and the last pointer in the BD chain 5315 * points back to the beginning of the chain. 5316 */ 5317 5318 /* Set the TX next pointer chain entries. */ 5319 for (i = 0; i < TX_PAGES; i++) { 5320 int j; 5321 5322 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 5323 5324 /* Check if we've reached the last page. */ 5325 if (i == (TX_PAGES - 1)) 5326 j = 0; 5327 else 5328 j = i + 1; 5329 5330 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 5331 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 5332 } 5333 5334 bce_init_tx_context(sc); 5335 5336 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 5337 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5338 5339 return(rc); 5340 } 5341 5342 5343 /****************************************************************************/ 5344 /* Free memory and clear the TX data structures. */ 5345 /* */ 5346 /* Returns: */ 5347 /* Nothing. */ 5348 /****************************************************************************/ 5349 static void 5350 bce_free_tx_chain(struct bce_softc *sc) 5351 { 5352 int i; 5353 5354 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5355 5356 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 5357 for (i = 0; i < TOTAL_TX_BD; i++) { 5358 if (sc->tx_mbuf_ptr[i] != NULL) { 5359 if (sc->tx_mbuf_map[i] != NULL) 5360 bus_dmamap_sync(sc->tx_mbuf_tag, 5361 sc->tx_mbuf_map[i], 5362 BUS_DMASYNC_POSTWRITE); 5363 m_freem(sc->tx_mbuf_ptr[i]); 5364 sc->tx_mbuf_ptr[i] = NULL; 5365 DBRUN(sc->debug_tx_mbuf_alloc--); 5366 } 5367 } 5368 5369 /* Clear each TX chain page. */ 5370 for (i = 0; i < TX_PAGES; i++) 5371 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 5372 5373 sc->used_tx_bd = 0; 5374 5375 /* Check if we lost any mbufs in the process. */ 5376 DBRUNIF((sc->debug_tx_mbuf_alloc), 5377 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " 5378 "from tx chain!\n", __FILE__, __LINE__, 5379 sc->debug_tx_mbuf_alloc)); 5380 5381 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5382 } 5383 5384 5385 /****************************************************************************/ 5386 /* Initialize the RX context memory. */ 5387 /* */ 5388 /* Returns: */ 5389 /* Nothing */ 5390 /****************************************************************************/ 5391 static void 5392 bce_init_rx_context(struct bce_softc *sc) 5393 { 5394 u32 val; 5395 5396 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5397 5398 /* Init the type, size, and BD cache levels for the RX context. */ 5399 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 5400 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | 5401 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); 5402 5403 /* 5404 * Set the level for generating pause frames 5405 * when the number of available rx_bd's gets 5406 * too low (the low watermark) and the level 5407 * when pause frames can be stopped (the high 5408 * watermark). 5409 */ 5410 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5411 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5412 u32 lo_water, hi_water; 5413 5414 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { 5415 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 5416 } else { 5417 lo_water = 0; 5418 } 5419 5420 if (lo_water >= USABLE_RX_BD) { 5421 lo_water = 0; 5422 } 5423 5424 hi_water = USABLE_RX_BD / 4; 5425 5426 if (hi_water <= lo_water) { 5427 lo_water = 0; 5428 } 5429 5430 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 5431 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 5432 5433 if (hi_water > 0xf) 5434 hi_water = 0xf; 5435 else if (hi_water == 0) 5436 lo_water = 0; 5437 5438 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | 5439 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 5440 } 5441 5442 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 5443 5444 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 5445 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5446 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5447 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 5448 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 5449 } 5450 5451 /* Point the hardware to the first page in the chain. */ 5452 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 5453 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 5454 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 5455 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 5456 5457 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5458 } 5459 5460 5461 /****************************************************************************/ 5462 /* Allocate memory and initialize the RX data structures. */ 5463 /* */ 5464 /* Returns: */ 5465 /* 0 for success, positive value for failure. */ 5466 /****************************************************************************/ 5467 static int 5468 bce_init_rx_chain(struct bce_softc *sc) 5469 { 5470 struct rx_bd *rxbd; 5471 int i, rc = 0; 5472 5473 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5474 BCE_VERBOSE_CTX); 5475 5476 /* Initialize the RX producer and consumer indices. */ 5477 sc->rx_prod = 0; 5478 sc->rx_cons = 0; 5479 sc->rx_prod_bseq = 0; 5480 sc->free_rx_bd = USABLE_RX_BD; 5481 sc->max_rx_bd = USABLE_RX_BD; 5482 5483 /* Initialize the RX next pointer chain entries. */ 5484 for (i = 0; i < RX_PAGES; i++) { 5485 int j; 5486 5487 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 5488 5489 /* Check if we've reached the last page. */ 5490 if (i == (RX_PAGES - 1)) 5491 j = 0; 5492 else 5493 j = i + 1; 5494 5495 /* Setup the chain page pointers. */ 5496 rxbd->rx_bd_haddr_hi = 5497 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 5498 rxbd->rx_bd_haddr_lo = 5499 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 5500 } 5501 5502 /* Fill up the RX chain. */ 5503 bce_fill_rx_chain(sc); 5504 5505 DBRUN(sc->rx_low_watermark = USABLE_RX_BD); 5506 DBRUN(sc->rx_empty_count = 0); 5507 for (i = 0; i < RX_PAGES; i++) { 5508 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], 5509 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5510 } 5511 5512 bce_init_rx_context(sc); 5513 5514 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD)); 5515 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5516 BCE_VERBOSE_CTX); 5517 5518 /* ToDo: Are there possible failure modes here? */ 5519 5520 return(rc); 5521 } 5522 5523 5524 /****************************************************************************/ 5525 /* Add mbufs to the RX chain until its full or an mbuf allocation error */ 5526 /* occurs. */ 5527 /* */ 5528 /* Returns: */ 5529 /* Nothing */ 5530 /****************************************************************************/ 5531 static void 5532 bce_fill_rx_chain(struct bce_softc *sc) 5533 { 5534 u16 prod, prod_idx; 5535 u32 prod_bseq; 5536 5537 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5538 BCE_VERBOSE_CTX); 5539 5540 /* Get the RX chain producer indices. */ 5541 prod = sc->rx_prod; 5542 prod_bseq = sc->rx_prod_bseq; 5543 5544 /* Keep filling the RX chain until it's full. */ 5545 while (sc->free_rx_bd > 0) { 5546 prod_idx = RX_CHAIN_IDX(prod); 5547 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) { 5548 /* Bail out if we can't add an mbuf to the chain. */ 5549 break; 5550 } 5551 prod = NEXT_RX_BD(prod); 5552 } 5553 5554 /* Save the RX chain producer indices. */ 5555 sc->rx_prod = prod; 5556 sc->rx_prod_bseq = prod_bseq; 5557 5558 /* We should never end up pointing to a next page pointer. */ 5559 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5560 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", 5561 __FUNCTION__, sc->rx_prod)); 5562 5563 /* Write the mailbox and tell the chip about the waiting rx_bd's. */ 5564 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5565 BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod); 5566 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + 5567 BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq); 5568 5569 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5570 BCE_VERBOSE_CTX); 5571 } 5572 5573 5574 /****************************************************************************/ 5575 /* Free memory and clear the RX data structures. */ 5576 /* */ 5577 /* Returns: */ 5578 /* Nothing. */ 5579 /****************************************************************************/ 5580 static void 5581 bce_free_rx_chain(struct bce_softc *sc) 5582 { 5583 int i; 5584 5585 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5586 5587 /* Free any mbufs still in the RX mbuf chain. */ 5588 for (i = 0; i < TOTAL_RX_BD; i++) { 5589 if (sc->rx_mbuf_ptr[i] != NULL) { 5590 if (sc->rx_mbuf_map[i] != NULL) 5591 bus_dmamap_sync(sc->rx_mbuf_tag, 5592 sc->rx_mbuf_map[i], 5593 BUS_DMASYNC_POSTREAD); 5594 m_freem(sc->rx_mbuf_ptr[i]); 5595 sc->rx_mbuf_ptr[i] = NULL; 5596 DBRUN(sc->debug_rx_mbuf_alloc--); 5597 } 5598 } 5599 5600 /* Clear each RX chain page. */ 5601 for (i = 0; i < RX_PAGES; i++) 5602 if (sc->rx_bd_chain[i] != NULL) { 5603 bzero((char *)sc->rx_bd_chain[i], 5604 BCE_RX_CHAIN_PAGE_SZ); 5605 } 5606 5607 sc->free_rx_bd = sc->max_rx_bd; 5608 5609 /* Check if we lost any mbufs in the process. */ 5610 DBRUNIF((sc->debug_rx_mbuf_alloc), 5611 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", 5612 __FUNCTION__, sc->debug_rx_mbuf_alloc)); 5613 5614 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5615 } 5616 5617 5618 #ifdef BCE_JUMBO_HDRSPLIT 5619 /****************************************************************************/ 5620 /* Allocate memory and initialize the page data structures. */ 5621 /* Assumes that bce_init_rx_chain() has not already been called. */ 5622 /* */ 5623 /* Returns: */ 5624 /* 0 for success, positive value for failure. */ 5625 /****************************************************************************/ 5626 static int 5627 bce_init_pg_chain(struct bce_softc *sc) 5628 { 5629 struct rx_bd *pgbd; 5630 int i, rc = 0; 5631 u32 val; 5632 5633 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5634 BCE_VERBOSE_CTX); 5635 5636 /* Initialize the page producer and consumer indices. */ 5637 sc->pg_prod = 0; 5638 sc->pg_cons = 0; 5639 sc->free_pg_bd = USABLE_PG_BD; 5640 sc->max_pg_bd = USABLE_PG_BD; 5641 DBRUN(sc->pg_low_watermark = sc->max_pg_bd); 5642 DBRUN(sc->pg_empty_count = 0); 5643 5644 /* Initialize the page next pointer chain entries. */ 5645 for (i = 0; i < PG_PAGES; i++) { 5646 int j; 5647 5648 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; 5649 5650 /* Check if we've reached the last page. */ 5651 if (i == (PG_PAGES - 1)) 5652 j = 0; 5653 else 5654 j = i + 1; 5655 5656 /* Setup the chain page pointers. */ 5657 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); 5658 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); 5659 } 5660 5661 /* Setup the MQ BIN mapping for host_pg_bidx. */ 5662 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5663 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 5664 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); 5665 5666 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); 5667 5668 /* Configure the rx_bd and page chain mbuf cluster size. */ 5669 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size; 5670 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); 5671 5672 /* Configure the context reserved for jumbo support. */ 5673 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, 5674 BCE_L2CTX_RX_RBDC_JUMBO_KEY); 5675 5676 /* Point the hardware to the first page in the page chain. */ 5677 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); 5678 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); 5679 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); 5680 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); 5681 5682 /* Fill up the page chain. */ 5683 bce_fill_pg_chain(sc); 5684 5685 for (i = 0; i < PG_PAGES; i++) { 5686 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], 5687 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5688 } 5689 5690 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD)); 5691 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5692 BCE_VERBOSE_CTX); 5693 return(rc); 5694 } 5695 5696 5697 /****************************************************************************/ 5698 /* Add mbufs to the page chain until its full or an mbuf allocation error */ 5699 /* occurs. */ 5700 /* */ 5701 /* Returns: */ 5702 /* Nothing */ 5703 /****************************************************************************/ 5704 static void 5705 bce_fill_pg_chain(struct bce_softc *sc) 5706 { 5707 u16 prod, prod_idx; 5708 5709 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5710 BCE_VERBOSE_CTX); 5711 5712 /* Get the page chain prodcuer index. */ 5713 prod = sc->pg_prod; 5714 5715 /* Keep filling the page chain until it's full. */ 5716 while (sc->free_pg_bd > 0) { 5717 prod_idx = PG_CHAIN_IDX(prod); 5718 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) { 5719 /* Bail out if we can't add an mbuf to the chain. */ 5720 break; 5721 } 5722 prod = NEXT_PG_BD(prod); 5723 } 5724 5725 /* Save the page chain producer index. */ 5726 sc->pg_prod = prod; 5727 5728 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5729 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", 5730 __FUNCTION__, sc->pg_prod)); 5731 5732 /* 5733 * Write the mailbox and tell the chip about 5734 * the new rx_bd's in the page chain. 5735 */ 5736 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5737 BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod); 5738 5739 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5740 BCE_VERBOSE_CTX); 5741 } 5742 5743 5744 /****************************************************************************/ 5745 /* Free memory and clear the RX data structures. */ 5746 /* */ 5747 /* Returns: */ 5748 /* Nothing. */ 5749 /****************************************************************************/ 5750 static void 5751 bce_free_pg_chain(struct bce_softc *sc) 5752 { 5753 int i; 5754 5755 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5756 5757 /* Free any mbufs still in the mbuf page chain. */ 5758 for (i = 0; i < TOTAL_PG_BD; i++) { 5759 if (sc->pg_mbuf_ptr[i] != NULL) { 5760 if (sc->pg_mbuf_map[i] != NULL) 5761 bus_dmamap_sync(sc->pg_mbuf_tag, 5762 sc->pg_mbuf_map[i], 5763 BUS_DMASYNC_POSTREAD); 5764 m_freem(sc->pg_mbuf_ptr[i]); 5765 sc->pg_mbuf_ptr[i] = NULL; 5766 DBRUN(sc->debug_pg_mbuf_alloc--); 5767 } 5768 } 5769 5770 /* Clear each page chain pages. */ 5771 for (i = 0; i < PG_PAGES; i++) 5772 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 5773 5774 sc->free_pg_bd = sc->max_pg_bd; 5775 5776 /* Check if we lost any mbufs in the process. */ 5777 DBRUNIF((sc->debug_pg_mbuf_alloc), 5778 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", 5779 __FUNCTION__, sc->debug_pg_mbuf_alloc)); 5780 5781 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5782 } 5783 #endif /* BCE_JUMBO_HDRSPLIT */ 5784 5785 5786 /****************************************************************************/ 5787 /* Set media options. */ 5788 /* */ 5789 /* Returns: */ 5790 /* 0 for success, positive value for failure. */ 5791 /****************************************************************************/ 5792 static int 5793 bce_ifmedia_upd(struct ifnet *ifp) 5794 { 5795 struct bce_softc *sc = ifp->if_softc; 5796 int error; 5797 5798 DBENTER(BCE_VERBOSE); 5799 5800 BCE_LOCK(sc); 5801 error = bce_ifmedia_upd_locked(ifp); 5802 BCE_UNLOCK(sc); 5803 5804 DBEXIT(BCE_VERBOSE); 5805 return (error); 5806 } 5807 5808 5809 /****************************************************************************/ 5810 /* Set media options. */ 5811 /* */ 5812 /* Returns: */ 5813 /* Nothing. */ 5814 /****************************************************************************/ 5815 static int 5816 bce_ifmedia_upd_locked(struct ifnet *ifp) 5817 { 5818 struct bce_softc *sc = ifp->if_softc; 5819 struct mii_data *mii; 5820 int error; 5821 5822 DBENTER(BCE_VERBOSE_PHY); 5823 5824 error = 0; 5825 BCE_LOCK_ASSERT(sc); 5826 5827 mii = device_get_softc(sc->bce_miibus); 5828 5829 /* Make sure the MII bus has been enumerated. */ 5830 if (mii) { 5831 sc->bce_link_up = FALSE; 5832 if (mii->mii_instance) { 5833 struct mii_softc *miisc; 5834 5835 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 5836 mii_phy_reset(miisc); 5837 } 5838 error = mii_mediachg(mii); 5839 } 5840 5841 DBEXIT(BCE_VERBOSE_PHY); 5842 return (error); 5843 } 5844 5845 5846 /****************************************************************************/ 5847 /* Reports current media status. */ 5848 /* */ 5849 /* Returns: */ 5850 /* Nothing. */ 5851 /****************************************************************************/ 5852 static void 5853 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5854 { 5855 struct bce_softc *sc = ifp->if_softc; 5856 struct mii_data *mii; 5857 5858 DBENTER(BCE_VERBOSE_PHY); 5859 5860 BCE_LOCK(sc); 5861 5862 if ((ifp->if_flags & IFF_UP) == 0) { 5863 BCE_UNLOCK(sc); 5864 return; 5865 } 5866 mii = device_get_softc(sc->bce_miibus); 5867 5868 mii_pollstat(mii); 5869 ifmr->ifm_active = mii->mii_media_active; 5870 ifmr->ifm_status = mii->mii_media_status; 5871 5872 BCE_UNLOCK(sc); 5873 5874 DBEXIT(BCE_VERBOSE_PHY); 5875 } 5876 5877 5878 /****************************************************************************/ 5879 /* Handles PHY generated interrupt events. */ 5880 /* */ 5881 /* Returns: */ 5882 /* Nothing. */ 5883 /****************************************************************************/ 5884 static void 5885 bce_phy_intr(struct bce_softc *sc) 5886 { 5887 u32 new_link_state, old_link_state; 5888 5889 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5890 5891 DBRUN(sc->phy_interrupts++); 5892 5893 new_link_state = sc->status_block->status_attn_bits & 5894 STATUS_ATTN_BITS_LINK_STATE; 5895 old_link_state = sc->status_block->status_attn_bits_ack & 5896 STATUS_ATTN_BITS_LINK_STATE; 5897 5898 /* Handle any changes if the link state has changed. */ 5899 if (new_link_state != old_link_state) { 5900 5901 /* Update the status_attn_bits_ack field. */ 5902 if (new_link_state) { 5903 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 5904 STATUS_ATTN_BITS_LINK_STATE); 5905 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", 5906 __FUNCTION__); 5907 } 5908 else { 5909 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 5910 STATUS_ATTN_BITS_LINK_STATE); 5911 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", 5912 __FUNCTION__); 5913 } 5914 5915 /* 5916 * Assume link is down and allow 5917 * tick routine to update the state 5918 * based on the actual media state. 5919 */ 5920 sc->bce_link_up = FALSE; 5921 callout_stop(&sc->bce_tick_callout); 5922 bce_tick(sc); 5923 } 5924 5925 /* Acknowledge the link change interrupt. */ 5926 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 5927 5928 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5929 } 5930 5931 5932 /****************************************************************************/ 5933 /* Reads the receive consumer value from the status block (skipping over */ 5934 /* chain page pointer if necessary). */ 5935 /* */ 5936 /* Returns: */ 5937 /* hw_cons */ 5938 /****************************************************************************/ 5939 static inline u16 5940 bce_get_hw_rx_cons(struct bce_softc *sc) 5941 { 5942 u16 hw_cons; 5943 5944 rmb(); 5945 hw_cons = sc->status_block->status_rx_quick_consumer_index0; 5946 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5947 hw_cons++; 5948 5949 return hw_cons; 5950 } 5951 5952 /****************************************************************************/ 5953 /* Handles received frame interrupt events. */ 5954 /* */ 5955 /* Returns: */ 5956 /* Nothing. */ 5957 /****************************************************************************/ 5958 static void 5959 bce_rx_intr(struct bce_softc *sc) 5960 { 5961 struct ifnet *ifp = sc->bce_ifp; 5962 struct l2_fhdr *l2fhdr; 5963 struct ether_vlan_header *vh; 5964 unsigned int pkt_len; 5965 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; 5966 u32 status; 5967 #ifdef BCE_JUMBO_HDRSPLIT 5968 unsigned int rem_len; 5969 u16 sw_pg_cons, sw_pg_cons_idx; 5970 #endif 5971 5972 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 5973 DBRUN(sc->interrupts_rx++); 5974 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " 5975 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 5976 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 5977 5978 /* Prepare the RX chain pages to be accessed by the host CPU. */ 5979 for (int i = 0; i < RX_PAGES; i++) 5980 bus_dmamap_sync(sc->rx_bd_chain_tag, 5981 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5982 5983 #ifdef BCE_JUMBO_HDRSPLIT 5984 /* Prepare the page chain pages to be accessed by the host CPU. */ 5985 for (int i = 0; i < PG_PAGES; i++) 5986 bus_dmamap_sync(sc->pg_bd_chain_tag, 5987 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5988 #endif 5989 5990 /* Get the hardware's view of the RX consumer index. */ 5991 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 5992 5993 /* Get working copies of the driver's view of the consumer indices. */ 5994 sw_rx_cons = sc->rx_cons; 5995 5996 #ifdef BCE_JUMBO_HDRSPLIT 5997 sw_pg_cons = sc->pg_cons; 5998 #endif 5999 6000 /* Update some debug statistics counters */ 6001 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 6002 sc->rx_low_watermark = sc->free_rx_bd); 6003 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 6004 sc->rx_empty_count++); 6005 6006 /* Scan through the receive chain as long as there is work to do */ 6007 /* ToDo: Consider setting a limit on the number of packets processed. */ 6008 rmb(); 6009 while (sw_rx_cons != hw_rx_cons) { 6010 struct mbuf *m0; 6011 6012 /* Convert the producer/consumer indices to an actual rx_bd index. */ 6013 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); 6014 6015 /* Unmap the mbuf from DMA space. */ 6016 bus_dmamap_sync(sc->rx_mbuf_tag, 6017 sc->rx_mbuf_map[sw_rx_cons_idx], 6018 BUS_DMASYNC_POSTREAD); 6019 bus_dmamap_unload(sc->rx_mbuf_tag, 6020 sc->rx_mbuf_map[sw_rx_cons_idx]); 6021 6022 /* Remove the mbuf from the RX chain. */ 6023 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; 6024 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; 6025 DBRUN(sc->debug_rx_mbuf_alloc--); 6026 sc->free_rx_bd++; 6027 6028 if(m0 == NULL) { 6029 DBPRINT(sc, BCE_EXTREME_RECV, 6030 "%s(): Oops! Empty mbuf pointer " 6031 "found in sc->rx_mbuf_ptr[0x%04X]!\n", 6032 __FUNCTION__, sw_rx_cons_idx); 6033 goto bce_rx_int_next_rx; 6034 } 6035 6036 /* 6037 * Frames received on the NetXteme II are prepended 6038 * with an l2_fhdr structure which provides status 6039 * information about the received frame (including 6040 * VLAN tags and checksum info). The frames are 6041 * also automatically adjusted to align the IP 6042 * header (i.e. two null bytes are inserted before 6043 * the Ethernet header). As a result the data 6044 * DMA'd by the controller into the mbuf looks 6045 * like this: 6046 * 6047 * +---------+-----+---------------------+-----+ 6048 * | l2_fhdr | pad | packet data | FCS | 6049 * +---------+-----+---------------------+-----+ 6050 * 6051 * The l2_fhdr needs to be checked and skipped and 6052 * the FCS needs to be stripped before sending the 6053 * packet up the stack. 6054 */ 6055 l2fhdr = mtod(m0, struct l2_fhdr *); 6056 6057 /* Get the packet data + FCS length and the status. */ 6058 pkt_len = l2fhdr->l2_fhdr_pkt_len; 6059 status = l2fhdr->l2_fhdr_status; 6060 6061 /* 6062 * Skip over the l2_fhdr and pad, resulting in the 6063 * following data in the mbuf: 6064 * +---------------------+-----+ 6065 * | packet data | FCS | 6066 * +---------------------+-----+ 6067 */ 6068 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); 6069 6070 #ifdef BCE_JUMBO_HDRSPLIT 6071 /* 6072 * Check whether the received frame fits in a single 6073 * mbuf or not (i.e. packet data + FCS <= 6074 * sc->rx_bd_mbuf_data_len bytes). 6075 */ 6076 if (pkt_len > m0->m_len) { 6077 /* 6078 * The received frame is larger than a single mbuf. 6079 * If the frame was a TCP frame then only the TCP 6080 * header is placed in the mbuf, the remaining 6081 * payload (including FCS) is placed in the page 6082 * chain, the SPLIT flag is set, and the header 6083 * length is placed in the IP checksum field. 6084 * If the frame is not a TCP frame then the mbuf 6085 * is filled and the remaining bytes are placed 6086 * in the page chain. 6087 */ 6088 6089 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " 6090 "packet.\n", __FUNCTION__); 6091 6092 /* 6093 * When the page chain is enabled and the TCP 6094 * header has been split from the TCP payload, 6095 * the ip_xsum structure will reflect the length 6096 * of the TCP header, not the IP checksum. Set 6097 * the packet length of the mbuf accordingly. 6098 */ 6099 if (status & L2_FHDR_STATUS_SPLIT) 6100 m0->m_len = l2fhdr->l2_fhdr_ip_xsum; 6101 6102 rem_len = pkt_len - m0->m_len; 6103 6104 /* Pull mbufs off the page chain for the remaining data. */ 6105 while (rem_len > 0) { 6106 struct mbuf *m_pg; 6107 6108 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); 6109 6110 /* Remove the mbuf from the page chain. */ 6111 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; 6112 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; 6113 DBRUN(sc->debug_pg_mbuf_alloc--); 6114 sc->free_pg_bd++; 6115 6116 /* Unmap the page chain mbuf from DMA space. */ 6117 bus_dmamap_sync(sc->pg_mbuf_tag, 6118 sc->pg_mbuf_map[sw_pg_cons_idx], 6119 BUS_DMASYNC_POSTREAD); 6120 bus_dmamap_unload(sc->pg_mbuf_tag, 6121 sc->pg_mbuf_map[sw_pg_cons_idx]); 6122 6123 /* Adjust the mbuf length. */ 6124 if (rem_len < m_pg->m_len) { 6125 /* The mbuf chain is complete. */ 6126 m_pg->m_len = rem_len; 6127 rem_len = 0; 6128 } else { 6129 /* More packet data is waiting. */ 6130 rem_len -= m_pg->m_len; 6131 } 6132 6133 /* Concatenate the mbuf cluster to the mbuf. */ 6134 m_cat(m0, m_pg); 6135 6136 sw_pg_cons = NEXT_PG_BD(sw_pg_cons); 6137 } 6138 6139 /* Set the total packet length. */ 6140 m0->m_pkthdr.len = pkt_len; 6141 6142 } else { 6143 /* 6144 * The received packet is small and fits in a 6145 * single mbuf (i.e. the l2_fhdr + pad + packet + 6146 * FCS <= MHLEN). In other words, the packet is 6147 * 154 bytes or less in size. 6148 */ 6149 6150 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " 6151 "packet.\n", __FUNCTION__); 6152 6153 /* Set the total packet length. */ 6154 m0->m_pkthdr.len = m0->m_len = pkt_len; 6155 } 6156 #else 6157 /* Set the total packet length. */ 6158 m0->m_pkthdr.len = m0->m_len = pkt_len; 6159 #endif 6160 6161 /* Remove the trailing Ethernet FCS. */ 6162 m_adj(m0, -ETHER_CRC_LEN); 6163 6164 /* Check that the resulting mbuf chain is valid. */ 6165 DBRUN(m_sanity(m0, FALSE)); 6166 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | 6167 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), 6168 BCE_PRINTF("Invalid Ethernet frame size!\n"); 6169 m_print(m0, 128)); 6170 6171 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), 6172 sc->l2fhdr_error_sim_count++; 6173 status = status | L2_FHDR_ERRORS_PHY_DECODE); 6174 6175 /* Check the received frame for errors. */ 6176 if (status & (L2_FHDR_ERRORS_BAD_CRC | 6177 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 6178 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 6179 6180 /* Log the error and release the mbuf. */ 6181 ifp->if_ierrors++; 6182 sc->l2fhdr_error_count++; 6183 6184 m_freem(m0); 6185 m0 = NULL; 6186 goto bce_rx_int_next_rx; 6187 } 6188 6189 /* Send the packet to the appropriate interface. */ 6190 m0->m_pkthdr.rcvif = ifp; 6191 6192 /* Assume no hardware checksum. */ 6193 m0->m_pkthdr.csum_flags = 0; 6194 6195 /* Validate the checksum if offload enabled. */ 6196 if (ifp->if_capenable & IFCAP_RXCSUM) { 6197 6198 /* Check for an IP datagram. */ 6199 if (!(status & L2_FHDR_STATUS_SPLIT) && 6200 (status & L2_FHDR_STATUS_IP_DATAGRAM)) { 6201 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 6202 DBRUN(sc->csum_offload_ip++); 6203 /* Check if the IP checksum is valid. */ 6204 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 6205 m0->m_pkthdr.csum_flags |= 6206 CSUM_IP_VALID; 6207 } 6208 6209 /* Check for a valid TCP/UDP frame. */ 6210 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 6211 L2_FHDR_STATUS_UDP_DATAGRAM)) { 6212 6213 /* Check for a good TCP/UDP checksum. */ 6214 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 6215 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 6216 DBRUN(sc->csum_offload_tcp_udp++); 6217 m0->m_pkthdr.csum_data = 6218 l2fhdr->l2_fhdr_tcp_udp_xsum; 6219 m0->m_pkthdr.csum_flags |= 6220 (CSUM_DATA_VALID 6221 | CSUM_PSEUDO_HDR); 6222 } 6223 } 6224 } 6225 6226 /* Attach the VLAN tag. */ 6227 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 6228 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 6229 #if __FreeBSD_version < 700000 6230 VLAN_INPUT_TAG(ifp, m0, 6231 l2fhdr->l2_fhdr_vlan_tag, continue); 6232 #else 6233 m0->m_pkthdr.ether_vtag = 6234 l2fhdr->l2_fhdr_vlan_tag; 6235 m0->m_flags |= M_VLANTAG; 6236 #endif 6237 } else { 6238 /* 6239 * bce(4) controllers can't disable VLAN 6240 * tag stripping if management firmware 6241 * (ASF/IPMI/UMP) is running. So we always 6242 * strip VLAN tag and manually reconstruct 6243 * the VLAN frame by appending stripped 6244 * VLAN tag in driver if VLAN tag stripping 6245 * was disabled. 6246 * 6247 * TODO: LLC SNAP handling. 6248 */ 6249 bcopy(mtod(m0, uint8_t *), 6250 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, 6251 ETHER_ADDR_LEN * 2); 6252 m0->m_data -= ETHER_VLAN_ENCAP_LEN; 6253 vh = mtod(m0, struct ether_vlan_header *); 6254 vh->evl_encap_proto = htons(ETHERTYPE_VLAN); 6255 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); 6256 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; 6257 m0->m_len += ETHER_VLAN_ENCAP_LEN; 6258 } 6259 } 6260 6261 /* Increment received packet statistics. */ 6262 ifp->if_ipackets++; 6263 6264 bce_rx_int_next_rx: 6265 sw_rx_cons = NEXT_RX_BD(sw_rx_cons); 6266 6267 /* If we have a packet, pass it up the stack */ 6268 if (m0) { 6269 /* Make sure we don't lose our place when we release the lock. */ 6270 sc->rx_cons = sw_rx_cons; 6271 #ifdef BCE_JUMBO_HDRSPLIT 6272 sc->pg_cons = sw_pg_cons; 6273 #endif 6274 6275 BCE_UNLOCK(sc); 6276 (*ifp->if_input)(ifp, m0); 6277 BCE_LOCK(sc); 6278 6279 /* Recover our place. */ 6280 sw_rx_cons = sc->rx_cons; 6281 #ifdef BCE_JUMBO_HDRSPLIT 6282 sw_pg_cons = sc->pg_cons; 6283 #endif 6284 } 6285 6286 /* Refresh hw_cons to see if there's new work */ 6287 if (sw_rx_cons == hw_rx_cons) 6288 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 6289 } 6290 6291 #ifdef BCE_JUMBO_HDRSPLIT 6292 /* No new packets. Refill the page chain. */ 6293 sc->pg_cons = sw_pg_cons; 6294 bce_fill_pg_chain(sc); 6295 #endif 6296 6297 /* No new packets. Refill the RX chain. */ 6298 sc->rx_cons = sw_rx_cons; 6299 bce_fill_rx_chain(sc); 6300 6301 /* Prepare the page chain pages to be accessed by the NIC. */ 6302 for (int i = 0; i < RX_PAGES; i++) 6303 bus_dmamap_sync(sc->rx_bd_chain_tag, 6304 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6305 6306 #ifdef BCE_JUMBO_HDRSPLIT 6307 for (int i = 0; i < PG_PAGES; i++) 6308 bus_dmamap_sync(sc->pg_bd_chain_tag, 6309 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6310 #endif 6311 6312 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " 6313 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 6314 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 6315 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 6316 } 6317 6318 6319 /****************************************************************************/ 6320 /* Reads the transmit consumer value from the status block (skipping over */ 6321 /* chain page pointer if necessary). */ 6322 /* */ 6323 /* Returns: */ 6324 /* hw_cons */ 6325 /****************************************************************************/ 6326 static inline u16 6327 bce_get_hw_tx_cons(struct bce_softc *sc) 6328 { 6329 u16 hw_cons; 6330 6331 mb(); 6332 hw_cons = sc->status_block->status_tx_quick_consumer_index0; 6333 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6334 hw_cons++; 6335 6336 return hw_cons; 6337 } 6338 6339 6340 /****************************************************************************/ 6341 /* Handles transmit completion interrupt events. */ 6342 /* */ 6343 /* Returns: */ 6344 /* Nothing. */ 6345 /****************************************************************************/ 6346 static void 6347 bce_tx_intr(struct bce_softc *sc) 6348 { 6349 struct ifnet *ifp = sc->bce_ifp; 6350 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 6351 6352 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6353 DBRUN(sc->interrupts_tx++); 6354 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " 6355 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6356 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6357 6358 BCE_LOCK_ASSERT(sc); 6359 6360 /* Get the hardware's view of the TX consumer index. */ 6361 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6362 sw_tx_cons = sc->tx_cons; 6363 6364 /* Prevent speculative reads of the status block. */ 6365 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6366 BUS_SPACE_BARRIER_READ); 6367 6368 /* Cycle through any completed TX chain page entries. */ 6369 while (sw_tx_cons != hw_tx_cons) { 6370 #ifdef BCE_DEBUG 6371 struct tx_bd *txbd = NULL; 6372 #endif 6373 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 6374 6375 DBPRINT(sc, BCE_INFO_SEND, 6376 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 6377 "sw_tx_chain_cons = 0x%04X\n", 6378 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 6379 6380 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 6381 BCE_PRINTF("%s(%d): TX chain consumer out of range! " 6382 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, 6383 (int) MAX_TX_BD); 6384 bce_breakpoint(sc)); 6385 6386 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 6387 [TX_IDX(sw_tx_chain_cons)]); 6388 6389 DBRUNIF((txbd == NULL), 6390 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 6391 __FILE__, __LINE__, sw_tx_chain_cons); 6392 bce_breakpoint(sc)); 6393 6394 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); 6395 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 6396 6397 /* 6398 * Free the associated mbuf. Remember 6399 * that only the last tx_bd of a packet 6400 * has an mbuf pointer and DMA map. 6401 */ 6402 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 6403 6404 /* Validate that this is the last tx_bd. */ 6405 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 6406 BCE_PRINTF("%s(%d): tx_bd END flag not set but " 6407 "txmbuf == NULL!\n", __FILE__, __LINE__); 6408 bce_breakpoint(sc)); 6409 6410 DBRUNMSG(BCE_INFO_SEND, 6411 BCE_PRINTF("%s(): Unloading map/freeing mbuf " 6412 "from tx_bd[0x%04X]\n", __FUNCTION__, 6413 sw_tx_chain_cons)); 6414 6415 /* Unmap the mbuf. */ 6416 bus_dmamap_unload(sc->tx_mbuf_tag, 6417 sc->tx_mbuf_map[sw_tx_chain_cons]); 6418 6419 /* Free the mbuf. */ 6420 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 6421 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 6422 DBRUN(sc->debug_tx_mbuf_alloc--); 6423 6424 ifp->if_opackets++; 6425 } 6426 6427 sc->used_tx_bd--; 6428 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 6429 6430 /* Refresh hw_cons to see if there's new work. */ 6431 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6432 6433 /* Prevent speculative reads of the status block. */ 6434 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6435 BUS_SPACE_BARRIER_READ); 6436 } 6437 6438 /* Clear the TX timeout timer. */ 6439 sc->watchdog_timer = 0; 6440 6441 /* Clear the tx hardware queue full flag. */ 6442 if (sc->used_tx_bd < sc->max_tx_bd) { 6443 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), 6444 DBPRINT(sc, BCE_INFO_SEND, 6445 "%s(): Open TX chain! %d/%d (used/total)\n", 6446 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); 6447 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6448 } 6449 6450 sc->tx_cons = sw_tx_cons; 6451 6452 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " 6453 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6454 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6455 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6456 } 6457 6458 6459 /****************************************************************************/ 6460 /* Disables interrupt generation. */ 6461 /* */ 6462 /* Returns: */ 6463 /* Nothing. */ 6464 /****************************************************************************/ 6465 static void 6466 bce_disable_intr(struct bce_softc *sc) 6467 { 6468 DBENTER(BCE_VERBOSE_INTR); 6469 6470 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 6471 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 6472 6473 DBEXIT(BCE_VERBOSE_INTR); 6474 } 6475 6476 6477 /****************************************************************************/ 6478 /* Enables interrupt generation. */ 6479 /* */ 6480 /* Returns: */ 6481 /* Nothing. */ 6482 /****************************************************************************/ 6483 static void 6484 bce_enable_intr(struct bce_softc *sc, int coal_now) 6485 { 6486 DBENTER(BCE_VERBOSE_INTR); 6487 6488 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6489 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 6490 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 6491 6492 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6493 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 6494 6495 /* Force an immediate interrupt (whether there is new data or not). */ 6496 if (coal_now) 6497 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 6498 6499 DBEXIT(BCE_VERBOSE_INTR); 6500 } 6501 6502 6503 /****************************************************************************/ 6504 /* Handles controller initialization. */ 6505 /* */ 6506 /* Returns: */ 6507 /* Nothing. */ 6508 /****************************************************************************/ 6509 static void 6510 bce_init_locked(struct bce_softc *sc) 6511 { 6512 struct ifnet *ifp; 6513 u32 ether_mtu = 0; 6514 6515 DBENTER(BCE_VERBOSE_RESET); 6516 6517 BCE_LOCK_ASSERT(sc); 6518 6519 ifp = sc->bce_ifp; 6520 6521 /* Check if the driver is still running and bail out if it is. */ 6522 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6523 goto bce_init_locked_exit; 6524 6525 bce_stop(sc); 6526 6527 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 6528 BCE_PRINTF("%s(%d): Controller reset failed!\n", 6529 __FILE__, __LINE__); 6530 goto bce_init_locked_exit; 6531 } 6532 6533 if (bce_chipinit(sc)) { 6534 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 6535 __FILE__, __LINE__); 6536 goto bce_init_locked_exit; 6537 } 6538 6539 if (bce_blockinit(sc)) { 6540 BCE_PRINTF("%s(%d): Block initialization failed!\n", 6541 __FILE__, __LINE__); 6542 goto bce_init_locked_exit; 6543 } 6544 6545 /* Load our MAC address. */ 6546 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); 6547 bce_set_mac_addr(sc); 6548 6549 /* 6550 * Calculate and program the hardware Ethernet MTU 6551 * size. Be generous on the receive if we have room. 6552 */ 6553 #ifdef BCE_JUMBO_HDRSPLIT 6554 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + 6555 sc->pg_bd_mbuf_alloc_size)) 6556 ether_mtu = sc->rx_bd_mbuf_data_len + 6557 sc->pg_bd_mbuf_alloc_size; 6558 #else 6559 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) 6560 ether_mtu = sc->rx_bd_mbuf_data_len; 6561 #endif 6562 else 6563 ether_mtu = ifp->if_mtu; 6564 6565 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 6566 6567 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", 6568 __FUNCTION__, ether_mtu); 6569 6570 /* Program the mtu, enabling jumbo frame support if necessary. */ 6571 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) 6572 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 6573 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 6574 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 6575 else 6576 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 6577 6578 DBPRINT(sc, BCE_INFO_LOAD, 6579 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, " 6580 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, 6581 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, 6582 sc->rx_bd_mbuf_align_pad); 6583 6584 /* Program appropriate promiscuous/multicast filtering. */ 6585 bce_set_rx_mode(sc); 6586 6587 #ifdef BCE_JUMBO_HDRSPLIT 6588 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n", 6589 __FUNCTION__, sc->pg_bd_mbuf_alloc_size); 6590 6591 /* Init page buffer descriptor chain. */ 6592 bce_init_pg_chain(sc); 6593 #endif 6594 6595 /* Init RX buffer descriptor chain. */ 6596 bce_init_rx_chain(sc); 6597 6598 /* Init TX buffer descriptor chain. */ 6599 bce_init_tx_chain(sc); 6600 6601 /* Enable host interrupts. */ 6602 bce_enable_intr(sc, 1); 6603 6604 bce_ifmedia_upd_locked(ifp); 6605 6606 /* Let the OS know the driver is up and running. */ 6607 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6608 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6609 6610 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 6611 6612 bce_init_locked_exit: 6613 DBEXIT(BCE_VERBOSE_RESET); 6614 } 6615 6616 6617 /****************************************************************************/ 6618 /* Initialize the controller just enough so that any management firmware */ 6619 /* running on the device will continue to operate correctly. */ 6620 /* */ 6621 /* Returns: */ 6622 /* Nothing. */ 6623 /****************************************************************************/ 6624 static void 6625 bce_mgmt_init_locked(struct bce_softc *sc) 6626 { 6627 struct ifnet *ifp; 6628 6629 DBENTER(BCE_VERBOSE_RESET); 6630 6631 BCE_LOCK_ASSERT(sc); 6632 6633 /* Bail out if management firmware is not running. */ 6634 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { 6635 DBPRINT(sc, BCE_VERBOSE_SPECIAL, 6636 "No management firmware running...\n"); 6637 goto bce_mgmt_init_locked_exit; 6638 } 6639 6640 ifp = sc->bce_ifp; 6641 6642 /* Enable all critical blocks in the MAC. */ 6643 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 6644 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 6645 DELAY(20); 6646 6647 bce_ifmedia_upd_locked(ifp); 6648 6649 bce_mgmt_init_locked_exit: 6650 DBEXIT(BCE_VERBOSE_RESET); 6651 } 6652 6653 6654 /****************************************************************************/ 6655 /* Handles controller initialization when called from an unlocked routine. */ 6656 /* */ 6657 /* Returns: */ 6658 /* Nothing. */ 6659 /****************************************************************************/ 6660 static void 6661 bce_init(void *xsc) 6662 { 6663 struct bce_softc *sc = xsc; 6664 6665 DBENTER(BCE_VERBOSE_RESET); 6666 6667 BCE_LOCK(sc); 6668 bce_init_locked(sc); 6669 BCE_UNLOCK(sc); 6670 6671 DBEXIT(BCE_VERBOSE_RESET); 6672 } 6673 6674 6675 /****************************************************************************/ 6676 /* Modifies an mbuf for TSO on the hardware. */ 6677 /* */ 6678 /* Returns: */ 6679 /* Pointer to a modified mbuf. */ 6680 /****************************************************************************/ 6681 static struct mbuf * 6682 bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) 6683 { 6684 struct mbuf *m; 6685 struct ether_header *eh; 6686 struct ip *ip; 6687 struct tcphdr *th; 6688 u16 etype; 6689 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; 6690 6691 DBRUN(sc->tso_frames_requested++); 6692 6693 /* Controller may modify mbuf chains. */ 6694 if (M_WRITABLE(*m_head) == 0) { 6695 m = m_dup(*m_head, M_DONTWAIT); 6696 m_freem(*m_head); 6697 if (m == NULL) { 6698 sc->mbuf_alloc_failed_count++; 6699 *m_head = NULL; 6700 return (NULL); 6701 } 6702 *m_head = m; 6703 } 6704 6705 /* 6706 * For TSO the controller needs two pieces of info, 6707 * the MSS and the IP+TCP options length. 6708 */ 6709 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); 6710 if (m == NULL) { 6711 *m_head = NULL; 6712 return (NULL); 6713 } 6714 eh = mtod(m, struct ether_header *); 6715 etype = ntohs(eh->ether_type); 6716 6717 /* Check for supported TSO Ethernet types (only IPv4 for now) */ 6718 switch (etype) { 6719 case ETHERTYPE_IP: 6720 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6721 /* TSO only supported for TCP protocol. */ 6722 if (ip->ip_p != IPPROTO_TCP) { 6723 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", 6724 __FILE__, __LINE__); 6725 m_freem(*m_head); 6726 *m_head = NULL; 6727 return (NULL); 6728 } 6729 6730 /* Get IP header length in bytes (min 20) */ 6731 ip_hlen = ip->ip_hl << 2; 6732 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + 6733 sizeof(struct tcphdr)); 6734 if (m == NULL) { 6735 *m_head = NULL; 6736 return (NULL); 6737 } 6738 6739 /* Get the TCP header length in bytes (min 20) */ 6740 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6741 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 6742 tcp_hlen = (th->th_off << 2); 6743 6744 /* Make sure all IP/TCP options live in the same buffer. */ 6745 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + 6746 tcp_hlen); 6747 if (m == NULL) { 6748 *m_head = NULL; 6749 return (NULL); 6750 } 6751 6752 /* IP header length and checksum will be calc'd by hardware */ 6753 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6754 ip_len = ip->ip_len; 6755 ip->ip_len = 0; 6756 ip->ip_sum = 0; 6757 break; 6758 case ETHERTYPE_IPV6: 6759 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", 6760 __FILE__, __LINE__); 6761 m_freem(*m_head); 6762 *m_head = NULL; 6763 return (NULL); 6764 /* NOT REACHED */ 6765 default: 6766 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", 6767 __FILE__, __LINE__); 6768 m_freem(*m_head); 6769 *m_head = NULL; 6770 return (NULL); 6771 } 6772 6773 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; 6774 6775 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " 6776 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", 6777 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, 6778 tcp_hlen, ip_len); 6779 6780 /* Set the LSO flag in the TX BD */ 6781 *flags |= TX_BD_FLAGS_SW_LSO; 6782 6783 /* Set the length of IP + TCP options (in 32 bit words) */ 6784 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - 6785 sizeof(struct tcphdr)) >> 2) << 8); 6786 6787 DBRUN(sc->tso_frames_completed++); 6788 return (*m_head); 6789 } 6790 6791 6792 /****************************************************************************/ 6793 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 6794 /* memory visible to the controller. */ 6795 /* */ 6796 /* Returns: */ 6797 /* 0 for success, positive value for failure. */ 6798 /* Modified: */ 6799 /* m_head: May be set to NULL if MBUF is excessively fragmented. */ 6800 /****************************************************************************/ 6801 static int 6802 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) 6803 { 6804 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 6805 bus_dmamap_t map; 6806 struct tx_bd *txbd = NULL; 6807 struct mbuf *m0; 6808 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; 6809 u32 prod_bseq; 6810 6811 #ifdef BCE_DEBUG 6812 u16 debug_prod; 6813 #endif 6814 6815 int i, error, nsegs, rc = 0; 6816 6817 DBENTER(BCE_VERBOSE_SEND); 6818 6819 /* Make sure we have room in the TX chain. */ 6820 if (sc->used_tx_bd >= sc->max_tx_bd) 6821 goto bce_tx_encap_exit; 6822 6823 /* Transfer any checksum offload flags to the bd. */ 6824 m0 = *m_head; 6825 if (m0->m_pkthdr.csum_flags) { 6826 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 6827 m0 = bce_tso_setup(sc, m_head, &flags); 6828 if (m0 == NULL) { 6829 DBRUN(sc->tso_frames_failed++); 6830 goto bce_tx_encap_exit; 6831 } 6832 mss = htole16(m0->m_pkthdr.tso_segsz); 6833 } else { 6834 if (m0->m_pkthdr.csum_flags & CSUM_IP) 6835 flags |= TX_BD_FLAGS_IP_CKSUM; 6836 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 6837 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6838 } 6839 } 6840 6841 /* Transfer any VLAN tags to the bd. */ 6842 if (m0->m_flags & M_VLANTAG) { 6843 flags |= TX_BD_FLAGS_VLAN_TAG; 6844 vlan_tag = m0->m_pkthdr.ether_vtag; 6845 } 6846 6847 /* Map the mbuf into DMAable memory. */ 6848 prod = sc->tx_prod; 6849 chain_prod = TX_CHAIN_IDX(prod); 6850 map = sc->tx_mbuf_map[chain_prod]; 6851 6852 /* Map the mbuf into our DMA address space. */ 6853 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 6854 segs, &nsegs, BUS_DMA_NOWAIT); 6855 6856 /* Check if the DMA mapping was successful */ 6857 if (error == EFBIG) { 6858 sc->mbuf_frag_count++; 6859 6860 /* Try to defrag the mbuf. */ 6861 m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS); 6862 if (m0 == NULL) { 6863 /* Defrag was unsuccessful */ 6864 m_freem(*m_head); 6865 *m_head = NULL; 6866 sc->mbuf_alloc_failed_count++; 6867 rc = ENOBUFS; 6868 goto bce_tx_encap_exit; 6869 } 6870 6871 /* Defrag was successful, try mapping again */ 6872 *m_head = m0; 6873 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, 6874 map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 6875 6876 /* Still getting an error after a defrag. */ 6877 if (error == ENOMEM) { 6878 /* Insufficient DMA buffers available. */ 6879 sc->dma_map_addr_tx_failed_count++; 6880 rc = error; 6881 goto bce_tx_encap_exit; 6882 } else if (error != 0) { 6883 /* Release it and return an error. */ 6884 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " 6885 "TX chain!\n", __FILE__, __LINE__); 6886 m_freem(m0); 6887 *m_head = NULL; 6888 sc->dma_map_addr_tx_failed_count++; 6889 rc = ENOBUFS; 6890 goto bce_tx_encap_exit; 6891 } 6892 } else if (error == ENOMEM) { 6893 /* Insufficient DMA buffers available. */ 6894 sc->dma_map_addr_tx_failed_count++; 6895 rc = error; 6896 goto bce_tx_encap_exit; 6897 } else if (error != 0) { 6898 m_freem(m0); 6899 *m_head = NULL; 6900 sc->dma_map_addr_tx_failed_count++; 6901 rc = error; 6902 goto bce_tx_encap_exit; 6903 } 6904 6905 /* Make sure there's room in the chain */ 6906 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 6907 bus_dmamap_unload(sc->tx_mbuf_tag, map); 6908 rc = ENOBUFS; 6909 goto bce_tx_encap_exit; 6910 } 6911 6912 /* prod points to an empty tx_bd at this point. */ 6913 prod_bseq = sc->tx_prod_bseq; 6914 6915 #ifdef BCE_DEBUG 6916 debug_prod = chain_prod; 6917 #endif 6918 6919 DBPRINT(sc, BCE_INFO_SEND, 6920 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " 6921 "prod_bseq = 0x%08X\n", 6922 __FUNCTION__, prod, chain_prod, prod_bseq); 6923 6924 /* 6925 * Cycle through each mbuf segment that makes up 6926 * the outgoing frame, gathering the mapping info 6927 * for that segment and creating a tx_bd for 6928 * the mbuf. 6929 */ 6930 for (i = 0; i < nsegs ; i++) { 6931 6932 chain_prod = TX_CHAIN_IDX(prod); 6933 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] 6934 [TX_IDX(chain_prod)]; 6935 6936 txbd->tx_bd_haddr_lo = 6937 htole32(BCE_ADDR_LO(segs[i].ds_addr)); 6938 txbd->tx_bd_haddr_hi = 6939 htole32(BCE_ADDR_HI(segs[i].ds_addr)); 6940 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 6941 htole16(segs[i].ds_len); 6942 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 6943 txbd->tx_bd_flags = htole16(flags); 6944 prod_bseq += segs[i].ds_len; 6945 if (i == 0) 6946 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 6947 prod = NEXT_TX_BD(prod); 6948 } 6949 6950 /* Set the END flag on the last TX buffer descriptor. */ 6951 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 6952 6953 DBRUNMSG(BCE_EXTREME_SEND, 6954 bce_dump_tx_chain(sc, debug_prod, nsegs)); 6955 6956 /* 6957 * Ensure that the mbuf pointer for this transmission 6958 * is placed at the array index of the last 6959 * descriptor in this chain. This is done 6960 * because a single map is used for all 6961 * segments of the mbuf and we don't want to 6962 * unload the map before all of the segments 6963 * have been freed. 6964 */ 6965 sc->tx_mbuf_ptr[chain_prod] = m0; 6966 sc->used_tx_bd += nsegs; 6967 6968 /* Update some debug statistic counters */ 6969 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 6970 sc->tx_hi_watermark = sc->used_tx_bd); 6971 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 6972 DBRUNIF(sc->debug_tx_mbuf_alloc++); 6973 6974 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); 6975 6976 /* prod points to the next free tx_bd at this point. */ 6977 sc->tx_prod = prod; 6978 sc->tx_prod_bseq = prod_bseq; 6979 6980 /* Tell the chip about the waiting TX frames. */ 6981 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 6982 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); 6983 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 6984 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); 6985 6986 bce_tx_encap_exit: 6987 DBEXIT(BCE_VERBOSE_SEND); 6988 return(rc); 6989 } 6990 6991 6992 /****************************************************************************/ 6993 /* Main transmit routine when called from another routine with a lock. */ 6994 /* */ 6995 /* Returns: */ 6996 /* Nothing. */ 6997 /****************************************************************************/ 6998 static void 6999 bce_start_locked(struct ifnet *ifp) 7000 { 7001 struct bce_softc *sc = ifp->if_softc; 7002 struct mbuf *m_head = NULL; 7003 int count = 0; 7004 u16 tx_prod, tx_chain_prod; 7005 7006 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7007 7008 BCE_LOCK_ASSERT(sc); 7009 7010 /* prod points to the next free tx_bd. */ 7011 tx_prod = sc->tx_prod; 7012 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 7013 7014 DBPRINT(sc, BCE_INFO_SEND, 7015 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 7016 "tx_prod_bseq = 0x%08X\n", 7017 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 7018 7019 /* If there's no link or the transmit queue is empty then just exit. */ 7020 if (sc->bce_link_up == FALSE) { 7021 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", 7022 __FUNCTION__); 7023 goto bce_start_locked_exit; 7024 } 7025 7026 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7027 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", 7028 __FUNCTION__); 7029 goto bce_start_locked_exit; 7030 } 7031 7032 /* 7033 * Keep adding entries while there is space in the ring. 7034 */ 7035 while (sc->used_tx_bd < sc->max_tx_bd) { 7036 7037 /* Check for any frames to send. */ 7038 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 7039 7040 /* Stop when the transmit queue is empty. */ 7041 if (m_head == NULL) 7042 break; 7043 7044 /* 7045 * Pack the data into the transmit ring. If we 7046 * don't have room, place the mbuf back at the 7047 * head of the queue and set the OACTIVE flag 7048 * to wait for the NIC to drain the chain. 7049 */ 7050 if (bce_tx_encap(sc, &m_head)) { 7051 if (m_head != NULL) 7052 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 7053 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7054 DBPRINT(sc, BCE_INFO_SEND, 7055 "TX chain is closed for business! Total " 7056 "tx_bd used = %d\n", sc->used_tx_bd); 7057 break; 7058 } 7059 7060 count++; 7061 7062 /* Send a copy of the frame to any BPF listeners. */ 7063 ETHER_BPF_MTAP(ifp, m_head); 7064 } 7065 7066 /* Exit if no packets were dequeued. */ 7067 if (count == 0) { 7068 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " 7069 "dequeued\n", __FUNCTION__); 7070 goto bce_start_locked_exit; 7071 } 7072 7073 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " 7074 "send queue.\n", __FUNCTION__, count); 7075 7076 /* Set the tx timeout. */ 7077 sc->watchdog_timer = BCE_TX_TIMEOUT; 7078 7079 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); 7080 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); 7081 7082 bce_start_locked_exit: 7083 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7084 return; 7085 } 7086 7087 7088 /****************************************************************************/ 7089 /* Main transmit routine when called from another routine without a lock. */ 7090 /* */ 7091 /* Returns: */ 7092 /* Nothing. */ 7093 /****************************************************************************/ 7094 static void 7095 bce_start(struct ifnet *ifp) 7096 { 7097 struct bce_softc *sc = ifp->if_softc; 7098 7099 DBENTER(BCE_VERBOSE_SEND); 7100 7101 BCE_LOCK(sc); 7102 bce_start_locked(ifp); 7103 BCE_UNLOCK(sc); 7104 7105 DBEXIT(BCE_VERBOSE_SEND); 7106 } 7107 7108 7109 /****************************************************************************/ 7110 /* Handles any IOCTL calls from the operating system. */ 7111 /* */ 7112 /* Returns: */ 7113 /* 0 for success, positive value for failure. */ 7114 /****************************************************************************/ 7115 static int 7116 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 7117 { 7118 struct bce_softc *sc = ifp->if_softc; 7119 struct ifreq *ifr = (struct ifreq *) data; 7120 struct mii_data *mii; 7121 int mask, error = 0, reinit; 7122 7123 DBENTER(BCE_VERBOSE_MISC); 7124 7125 switch(command) { 7126 7127 /* Set the interface MTU. */ 7128 case SIOCSIFMTU: 7129 /* Check that the MTU setting is supported. */ 7130 if ((ifr->ifr_mtu < BCE_MIN_MTU) || 7131 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { 7132 error = EINVAL; 7133 break; 7134 } 7135 7136 DBPRINT(sc, BCE_INFO_MISC, 7137 "SIOCSIFMTU: Changing MTU from %d to %d\n", 7138 (int) ifp->if_mtu, (int) ifr->ifr_mtu); 7139 7140 BCE_LOCK(sc); 7141 ifp->if_mtu = ifr->ifr_mtu; 7142 reinit = 0; 7143 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7144 /* 7145 * Because allocation size is used in RX 7146 * buffer allocation, stop controller if 7147 * it is already running. 7148 */ 7149 bce_stop(sc); 7150 reinit = 1; 7151 } 7152 #ifdef BCE_JUMBO_HDRSPLIT 7153 /* No buffer allocation size changes are necessary. */ 7154 #else 7155 /* Recalculate our buffer allocation sizes. */ 7156 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 7157 ETHER_CRC_LEN) > MCLBYTES) { 7158 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; 7159 sc->rx_bd_mbuf_align_pad = 7160 roundup2(MJUM9BYTES, 16) - MJUM9BYTES; 7161 sc->rx_bd_mbuf_data_len = 7162 sc->rx_bd_mbuf_alloc_size - 7163 sc->rx_bd_mbuf_align_pad; 7164 } else { 7165 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 7166 sc->rx_bd_mbuf_align_pad = 7167 roundup2(MCLBYTES, 16) - MCLBYTES; 7168 sc->rx_bd_mbuf_data_len = 7169 sc->rx_bd_mbuf_alloc_size - 7170 sc->rx_bd_mbuf_align_pad; 7171 } 7172 #endif 7173 7174 if (reinit != 0) 7175 bce_init_locked(sc); 7176 BCE_UNLOCK(sc); 7177 break; 7178 7179 /* Set interface flags. */ 7180 case SIOCSIFFLAGS: 7181 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); 7182 7183 BCE_LOCK(sc); 7184 7185 /* Check if the interface is up. */ 7186 if (ifp->if_flags & IFF_UP) { 7187 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7188 /* Change promiscuous/multicast flags as necessary. */ 7189 bce_set_rx_mode(sc); 7190 } else { 7191 /* Start the HW */ 7192 bce_init_locked(sc); 7193 } 7194 } else { 7195 /* The interface is down, check if driver is running. */ 7196 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7197 bce_stop(sc); 7198 7199 /* If MFW is running, restart the controller a bit. */ 7200 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 7201 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 7202 bce_chipinit(sc); 7203 bce_mgmt_init_locked(sc); 7204 } 7205 } 7206 } 7207 7208 BCE_UNLOCK(sc); 7209 break; 7210 7211 /* Add/Delete multicast address */ 7212 case SIOCADDMULTI: 7213 case SIOCDELMULTI: 7214 DBPRINT(sc, BCE_VERBOSE_MISC, 7215 "Received SIOCADDMULTI/SIOCDELMULTI\n"); 7216 7217 BCE_LOCK(sc); 7218 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7219 bce_set_rx_mode(sc); 7220 BCE_UNLOCK(sc); 7221 7222 break; 7223 7224 /* Set/Get Interface media */ 7225 case SIOCSIFMEDIA: 7226 case SIOCGIFMEDIA: 7227 DBPRINT(sc, BCE_VERBOSE_MISC, 7228 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); 7229 7230 mii = device_get_softc(sc->bce_miibus); 7231 error = ifmedia_ioctl(ifp, ifr, 7232 &mii->mii_media, command); 7233 break; 7234 7235 /* Set interface capability */ 7236 case SIOCSIFCAP: 7237 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 7238 DBPRINT(sc, BCE_INFO_MISC, 7239 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); 7240 7241 /* Toggle the TX checksum capabilities enable flag. */ 7242 if (mask & IFCAP_TXCSUM && 7243 ifp->if_capabilities & IFCAP_TXCSUM) { 7244 ifp->if_capenable ^= IFCAP_TXCSUM; 7245 if (IFCAP_TXCSUM & ifp->if_capenable) 7246 ifp->if_hwassist |= BCE_IF_HWASSIST; 7247 else 7248 ifp->if_hwassist &= ~BCE_IF_HWASSIST; 7249 } 7250 7251 /* Toggle the RX checksum capabilities enable flag. */ 7252 if (mask & IFCAP_RXCSUM && 7253 ifp->if_capabilities & IFCAP_RXCSUM) 7254 ifp->if_capenable ^= IFCAP_RXCSUM; 7255 7256 /* Toggle the TSO capabilities enable flag. */ 7257 if (bce_tso_enable && (mask & IFCAP_TSO4) && 7258 ifp->if_capabilities & IFCAP_TSO4) { 7259 ifp->if_capenable ^= IFCAP_TSO4; 7260 if (IFCAP_TSO4 & ifp->if_capenable) 7261 ifp->if_hwassist |= CSUM_TSO; 7262 else 7263 ifp->if_hwassist &= ~CSUM_TSO; 7264 } 7265 7266 if (mask & IFCAP_VLAN_HWCSUM && 7267 ifp->if_capabilities & IFCAP_VLAN_HWCSUM) 7268 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 7269 7270 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 7271 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 7272 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 7273 /* 7274 * Don't actually disable VLAN tag stripping as 7275 * management firmware (ASF/IPMI/UMP) requires the 7276 * feature. If VLAN tag stripping is disabled driver 7277 * will manually reconstruct the VLAN frame by 7278 * appending stripped VLAN tag. 7279 */ 7280 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 7281 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 7282 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 7283 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 7284 == 0) 7285 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 7286 } 7287 VLAN_CAPABILITIES(ifp); 7288 break; 7289 default: 7290 /* We don't know how to handle the IOCTL, pass it on. */ 7291 error = ether_ioctl(ifp, command, data); 7292 break; 7293 } 7294 7295 DBEXIT(BCE_VERBOSE_MISC); 7296 return(error); 7297 } 7298 7299 7300 /****************************************************************************/ 7301 /* Transmit timeout handler. */ 7302 /* */ 7303 /* Returns: */ 7304 /* Nothing. */ 7305 /****************************************************************************/ 7306 static void 7307 bce_watchdog(struct bce_softc *sc) 7308 { 7309 DBENTER(BCE_EXTREME_SEND); 7310 7311 BCE_LOCK_ASSERT(sc); 7312 7313 /* If the watchdog timer hasn't expired then just exit. */ 7314 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 7315 goto bce_watchdog_exit; 7316 7317 /* If pause frames are active then don't reset the hardware. */ 7318 /* ToDo: Should we reset the timer here? */ 7319 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 7320 goto bce_watchdog_exit; 7321 7322 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", 7323 __FILE__, __LINE__); 7324 7325 DBRUNMSG(BCE_INFO, 7326 bce_dump_driver_state(sc); 7327 bce_dump_status_block(sc); 7328 bce_dump_stats_block(sc); 7329 bce_dump_ftqs(sc); 7330 bce_dump_txp_state(sc, 0); 7331 bce_dump_rxp_state(sc, 0); 7332 bce_dump_tpat_state(sc, 0); 7333 bce_dump_cp_state(sc, 0); 7334 bce_dump_com_state(sc, 0)); 7335 7336 DBRUN(bce_breakpoint(sc)); 7337 7338 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 7339 7340 bce_init_locked(sc); 7341 sc->bce_ifp->if_oerrors++; 7342 7343 bce_watchdog_exit: 7344 DBEXIT(BCE_EXTREME_SEND); 7345 } 7346 7347 7348 /* 7349 * Interrupt handler. 7350 */ 7351 /****************************************************************************/ 7352 /* Main interrupt entry point. Verifies that the controller generated the */ 7353 /* interrupt and then calls a separate routine for handle the various */ 7354 /* interrupt causes (PHY, TX, RX). */ 7355 /* */ 7356 /* Returns: */ 7357 /* 0 for success, positive value for failure. */ 7358 /****************************************************************************/ 7359 static void 7360 bce_intr(void *xsc) 7361 { 7362 struct bce_softc *sc; 7363 struct ifnet *ifp; 7364 u32 status_attn_bits; 7365 u16 hw_rx_cons, hw_tx_cons; 7366 7367 sc = xsc; 7368 ifp = sc->bce_ifp; 7369 7370 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7371 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 7372 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); 7373 7374 BCE_LOCK(sc); 7375 7376 DBRUN(sc->interrupts_generated++); 7377 7378 /* Synchnorize before we read from interface's status block */ 7379 bus_dmamap_sync(sc->status_tag, sc->status_map, 7380 BUS_DMASYNC_POSTREAD); 7381 7382 /* 7383 * If the hardware status block index 7384 * matches the last value read by the 7385 * driver and we haven't asserted our 7386 * interrupt then there's nothing to do. 7387 */ 7388 if ((sc->status_block->status_idx == sc->last_status_idx) && 7389 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 7390 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { 7391 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", 7392 __FUNCTION__); 7393 goto bce_intr_exit; 7394 } 7395 7396 /* Ack the interrupt and stop others from occuring. */ 7397 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7398 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 7399 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 7400 7401 /* Check if the hardware has finished any work. */ 7402 hw_rx_cons = bce_get_hw_rx_cons(sc); 7403 hw_tx_cons = bce_get_hw_tx_cons(sc); 7404 7405 /* Keep processing data as long as there is work to do. */ 7406 for (;;) { 7407 7408 status_attn_bits = sc->status_block->status_attn_bits; 7409 7410 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), 7411 BCE_PRINTF("Simulating unexpected status attention " 7412 "bit set."); 7413 sc->unexpected_attention_sim_count++; 7414 status_attn_bits = status_attn_bits | 7415 STATUS_ATTN_BITS_PARITY_ERROR); 7416 7417 /* Was it a link change interrupt? */ 7418 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 7419 (sc->status_block->status_attn_bits_ack & 7420 STATUS_ATTN_BITS_LINK_STATE)) { 7421 bce_phy_intr(sc); 7422 7423 /* Clear transient updates during link state change. */ 7424 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | 7425 BCE_HC_COMMAND_COAL_NOW_WO_INT); 7426 REG_RD(sc, BCE_HC_COMMAND); 7427 } 7428 7429 /* If any other attention is asserted, the chip is toast. */ 7430 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 7431 (sc->status_block->status_attn_bits_ack & 7432 ~STATUS_ATTN_BITS_LINK_STATE))) { 7433 7434 sc->unexpected_attention_count++; 7435 7436 BCE_PRINTF("%s(%d): Fatal attention detected: " 7437 "0x%08X\n", __FILE__, __LINE__, 7438 sc->status_block->status_attn_bits); 7439 7440 DBRUNMSG(BCE_FATAL, 7441 if (unexpected_attention_sim_control == 0) 7442 bce_breakpoint(sc)); 7443 7444 bce_init_locked(sc); 7445 goto bce_intr_exit; 7446 } 7447 7448 /* Check for any completed RX frames. */ 7449 if (hw_rx_cons != sc->hw_rx_cons) 7450 bce_rx_intr(sc); 7451 7452 /* Check for any completed TX frames. */ 7453 if (hw_tx_cons != sc->hw_tx_cons) 7454 bce_tx_intr(sc); 7455 7456 /* Save status block index value for the next interrupt. */ 7457 sc->last_status_idx = sc->status_block->status_idx; 7458 7459 /* 7460 * Prevent speculative reads from getting 7461 * ahead of the status block. 7462 */ 7463 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 7464 BUS_SPACE_BARRIER_READ); 7465 7466 /* 7467 * If there's no work left then exit the 7468 * interrupt service routine. 7469 */ 7470 hw_rx_cons = bce_get_hw_rx_cons(sc); 7471 hw_tx_cons = bce_get_hw_tx_cons(sc); 7472 7473 if ((hw_rx_cons == sc->hw_rx_cons) && 7474 (hw_tx_cons == sc->hw_tx_cons)) 7475 break; 7476 7477 } 7478 7479 bus_dmamap_sync(sc->status_tag, sc->status_map, 7480 BUS_DMASYNC_PREREAD); 7481 7482 /* Re-enable interrupts. */ 7483 bce_enable_intr(sc, 0); 7484 7485 /* Handle any frames that arrived while handling the interrupt. */ 7486 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 7487 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 7488 bce_start_locked(ifp); 7489 7490 bce_intr_exit: 7491 BCE_UNLOCK(sc); 7492 7493 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7494 } 7495 7496 7497 /****************************************************************************/ 7498 /* Programs the various packet receive modes (broadcast and multicast). */ 7499 /* */ 7500 /* Returns: */ 7501 /* Nothing. */ 7502 /****************************************************************************/ 7503 static void 7504 bce_set_rx_mode(struct bce_softc *sc) 7505 { 7506 struct ifnet *ifp; 7507 struct ifmultiaddr *ifma; 7508 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 7509 u32 rx_mode, sort_mode; 7510 int h, i; 7511 7512 DBENTER(BCE_VERBOSE_MISC); 7513 7514 BCE_LOCK_ASSERT(sc); 7515 7516 ifp = sc->bce_ifp; 7517 7518 /* Initialize receive mode default settings. */ 7519 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 7520 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 7521 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 7522 7523 /* 7524 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 7525 * be enbled. 7526 */ 7527 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 7528 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) 7529 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 7530 7531 /* 7532 * Check for promiscuous, all multicast, or selected 7533 * multicast address filtering. 7534 */ 7535 if (ifp->if_flags & IFF_PROMISC) { 7536 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); 7537 7538 /* Enable promiscuous mode. */ 7539 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 7540 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 7541 } else if (ifp->if_flags & IFF_ALLMULTI) { 7542 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); 7543 7544 /* Enable all multicast addresses. */ 7545 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 7546 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); 7547 } 7548 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 7549 } else { 7550 /* Accept one or more multicast(s). */ 7551 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); 7552 7553 if_maddr_rlock(ifp); 7554 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 7555 if (ifma->ifma_addr->sa_family != AF_LINK) 7556 continue; 7557 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 7558 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; 7559 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 7560 } 7561 if_maddr_runlock(ifp); 7562 7563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 7564 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 7565 7566 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 7567 } 7568 7569 /* Only make changes if the recive mode has actually changed. */ 7570 if (rx_mode != sc->rx_mode) { 7571 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " 7572 "0x%08X\n", rx_mode); 7573 7574 sc->rx_mode = rx_mode; 7575 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 7576 } 7577 7578 /* Disable and clear the exisitng sort before enabling a new sort. */ 7579 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 7580 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 7581 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 7582 7583 DBEXIT(BCE_VERBOSE_MISC); 7584 } 7585 7586 7587 /****************************************************************************/ 7588 /* Called periodically to updates statistics from the controllers */ 7589 /* statistics block. */ 7590 /* */ 7591 /* Returns: */ 7592 /* Nothing. */ 7593 /****************************************************************************/ 7594 static void 7595 bce_stats_update(struct bce_softc *sc) 7596 { 7597 struct ifnet *ifp; 7598 struct statistics_block *stats; 7599 7600 DBENTER(BCE_EXTREME_MISC); 7601 7602 ifp = sc->bce_ifp; 7603 7604 stats = (struct statistics_block *) sc->stats_block; 7605 7606 /* 7607 * Certain controllers don't report 7608 * carrier sense errors correctly. 7609 * See errata E11_5708CA0_1165. 7610 */ 7611 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 7612 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) 7613 ifp->if_oerrors += 7614 (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 7615 7616 /* 7617 * Update the sysctl statistics from the 7618 * hardware statistics. 7619 */ 7620 sc->stat_IfHCInOctets = 7621 ((u64) stats->stat_IfHCInOctets_hi << 32) + 7622 (u64) stats->stat_IfHCInOctets_lo; 7623 7624 sc->stat_IfHCInBadOctets = 7625 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 7626 (u64) stats->stat_IfHCInBadOctets_lo; 7627 7628 sc->stat_IfHCOutOctets = 7629 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 7630 (u64) stats->stat_IfHCOutOctets_lo; 7631 7632 sc->stat_IfHCOutBadOctets = 7633 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 7634 (u64) stats->stat_IfHCOutBadOctets_lo; 7635 7636 sc->stat_IfHCInUcastPkts = 7637 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 7638 (u64) stats->stat_IfHCInUcastPkts_lo; 7639 7640 sc->stat_IfHCInMulticastPkts = 7641 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 7642 (u64) stats->stat_IfHCInMulticastPkts_lo; 7643 7644 sc->stat_IfHCInBroadcastPkts = 7645 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 7646 (u64) stats->stat_IfHCInBroadcastPkts_lo; 7647 7648 sc->stat_IfHCOutUcastPkts = 7649 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 7650 (u64) stats->stat_IfHCOutUcastPkts_lo; 7651 7652 sc->stat_IfHCOutMulticastPkts = 7653 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 7654 (u64) stats->stat_IfHCOutMulticastPkts_lo; 7655 7656 sc->stat_IfHCOutBroadcastPkts = 7657 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 7658 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 7659 7660 /* ToDo: Preserve counters beyond 32 bits? */ 7661 /* ToDo: Read the statistics from auto-clear regs? */ 7662 7663 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 7664 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 7665 7666 sc->stat_Dot3StatsCarrierSenseErrors = 7667 stats->stat_Dot3StatsCarrierSenseErrors; 7668 7669 sc->stat_Dot3StatsFCSErrors = 7670 stats->stat_Dot3StatsFCSErrors; 7671 7672 sc->stat_Dot3StatsAlignmentErrors = 7673 stats->stat_Dot3StatsAlignmentErrors; 7674 7675 sc->stat_Dot3StatsSingleCollisionFrames = 7676 stats->stat_Dot3StatsSingleCollisionFrames; 7677 7678 sc->stat_Dot3StatsMultipleCollisionFrames = 7679 stats->stat_Dot3StatsMultipleCollisionFrames; 7680 7681 sc->stat_Dot3StatsDeferredTransmissions = 7682 stats->stat_Dot3StatsDeferredTransmissions; 7683 7684 sc->stat_Dot3StatsExcessiveCollisions = 7685 stats->stat_Dot3StatsExcessiveCollisions; 7686 7687 sc->stat_Dot3StatsLateCollisions = 7688 stats->stat_Dot3StatsLateCollisions; 7689 7690 sc->stat_EtherStatsCollisions = 7691 stats->stat_EtherStatsCollisions; 7692 7693 sc->stat_EtherStatsFragments = 7694 stats->stat_EtherStatsFragments; 7695 7696 sc->stat_EtherStatsJabbers = 7697 stats->stat_EtherStatsJabbers; 7698 7699 sc->stat_EtherStatsUndersizePkts = 7700 stats->stat_EtherStatsUndersizePkts; 7701 7702 sc->stat_EtherStatsOversizePkts = 7703 stats->stat_EtherStatsOversizePkts; 7704 7705 sc->stat_EtherStatsPktsRx64Octets = 7706 stats->stat_EtherStatsPktsRx64Octets; 7707 7708 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 7709 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 7710 7711 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 7712 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 7713 7714 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 7715 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 7716 7717 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 7718 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 7719 7720 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 7721 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 7722 7723 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 7724 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 7725 7726 sc->stat_EtherStatsPktsTx64Octets = 7727 stats->stat_EtherStatsPktsTx64Octets; 7728 7729 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 7730 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 7731 7732 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 7733 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 7734 7735 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 7736 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 7737 7738 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 7739 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 7740 7741 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 7742 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 7743 7744 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 7745 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 7746 7747 sc->stat_XonPauseFramesReceived = 7748 stats->stat_XonPauseFramesReceived; 7749 7750 sc->stat_XoffPauseFramesReceived = 7751 stats->stat_XoffPauseFramesReceived; 7752 7753 sc->stat_OutXonSent = 7754 stats->stat_OutXonSent; 7755 7756 sc->stat_OutXoffSent = 7757 stats->stat_OutXoffSent; 7758 7759 sc->stat_FlowControlDone = 7760 stats->stat_FlowControlDone; 7761 7762 sc->stat_MacControlFramesReceived = 7763 stats->stat_MacControlFramesReceived; 7764 7765 sc->stat_XoffStateEntered = 7766 stats->stat_XoffStateEntered; 7767 7768 sc->stat_IfInFramesL2FilterDiscards = 7769 stats->stat_IfInFramesL2FilterDiscards; 7770 7771 sc->stat_IfInRuleCheckerDiscards = 7772 stats->stat_IfInRuleCheckerDiscards; 7773 7774 sc->stat_IfInFTQDiscards = 7775 stats->stat_IfInFTQDiscards; 7776 7777 sc->stat_IfInMBUFDiscards = 7778 stats->stat_IfInMBUFDiscards; 7779 7780 sc->stat_IfInRuleCheckerP4Hit = 7781 stats->stat_IfInRuleCheckerP4Hit; 7782 7783 sc->stat_CatchupInRuleCheckerDiscards = 7784 stats->stat_CatchupInRuleCheckerDiscards; 7785 7786 sc->stat_CatchupInFTQDiscards = 7787 stats->stat_CatchupInFTQDiscards; 7788 7789 sc->stat_CatchupInMBUFDiscards = 7790 stats->stat_CatchupInMBUFDiscards; 7791 7792 sc->stat_CatchupInRuleCheckerP4Hit = 7793 stats->stat_CatchupInRuleCheckerP4Hit; 7794 7795 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 7796 7797 /* 7798 * Update the interface statistics from the 7799 * hardware statistics. 7800 */ 7801 ifp->if_collisions = 7802 (u_long) sc->stat_EtherStatsCollisions; 7803 7804 /* ToDo: This method loses soft errors. */ 7805 ifp->if_ierrors = 7806 (u_long) sc->stat_EtherStatsUndersizePkts + 7807 (u_long) sc->stat_EtherStatsOversizePkts + 7808 (u_long) sc->stat_IfInMBUFDiscards + 7809 (u_long) sc->stat_Dot3StatsAlignmentErrors + 7810 (u_long) sc->stat_Dot3StatsFCSErrors + 7811 (u_long) sc->stat_IfInRuleCheckerDiscards + 7812 (u_long) sc->stat_IfInFTQDiscards + 7813 (u_long) sc->com_no_buffers; 7814 7815 /* ToDo: This method loses soft errors. */ 7816 ifp->if_oerrors = 7817 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 7818 (u_long) sc->stat_Dot3StatsExcessiveCollisions + 7819 (u_long) sc->stat_Dot3StatsLateCollisions; 7820 7821 /* ToDo: Add additional statistics? */ 7822 7823 DBEXIT(BCE_EXTREME_MISC); 7824 } 7825 7826 7827 /****************************************************************************/ 7828 /* Periodic function to notify the bootcode that the driver is still */ 7829 /* present. */ 7830 /* */ 7831 /* Returns: */ 7832 /* Nothing. */ 7833 /****************************************************************************/ 7834 static void 7835 bce_pulse(void *xsc) 7836 { 7837 struct bce_softc *sc = xsc; 7838 u32 msg; 7839 7840 DBENTER(BCE_EXTREME_MISC); 7841 7842 BCE_LOCK_ASSERT(sc); 7843 7844 /* Tell the firmware that the driver is still running. */ 7845 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; 7846 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 7847 7848 /* Update the bootcode condition. */ 7849 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 7850 7851 /* Report whether the bootcode still knows the driver is running. */ 7852 if (bootverbose) { 7853 if (sc->bce_drv_cardiac_arrest == FALSE) { 7854 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 7855 sc->bce_drv_cardiac_arrest = TRUE; 7856 BCE_PRINTF("%s(): Warning: bootcode " 7857 "thinks driver is absent! " 7858 "(bc_state = 0x%08X)\n", 7859 __FUNCTION__, sc->bc_state); 7860 } 7861 } else { 7862 /* 7863 * Not supported by all bootcode versions. 7864 * (v5.0.11+ and v5.2.1+) Older bootcode 7865 * will require the driver to reset the 7866 * controller to clear this condition. 7867 */ 7868 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 7869 sc->bce_drv_cardiac_arrest = FALSE; 7870 BCE_PRINTF("%s(): Bootcode found the " 7871 "driver pulse! (bc_state = 0x%08X)\n", 7872 __FUNCTION__, sc->bc_state); 7873 } 7874 } 7875 } 7876 7877 7878 /* Schedule the next pulse. */ 7879 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 7880 7881 DBEXIT(BCE_EXTREME_MISC); 7882 } 7883 7884 7885 /****************************************************************************/ 7886 /* Periodic function to perform maintenance tasks. */ 7887 /* */ 7888 /* Returns: */ 7889 /* Nothing. */ 7890 /****************************************************************************/ 7891 static void 7892 bce_tick(void *xsc) 7893 { 7894 struct bce_softc *sc = xsc; 7895 struct mii_data *mii; 7896 struct ifnet *ifp; 7897 7898 ifp = sc->bce_ifp; 7899 7900 DBENTER(BCE_EXTREME_MISC); 7901 7902 BCE_LOCK_ASSERT(sc); 7903 7904 /* Schedule the next tick. */ 7905 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 7906 7907 /* Update the statistics from the hardware statistics block. */ 7908 bce_stats_update(sc); 7909 7910 /* Top off the receive and page chains. */ 7911 #ifdef BCE_JUMBO_HDRSPLIT 7912 bce_fill_pg_chain(sc); 7913 #endif 7914 bce_fill_rx_chain(sc); 7915 7916 /* Check that chip hasn't hung. */ 7917 bce_watchdog(sc); 7918 7919 /* If link is up already up then we're done. */ 7920 if (sc->bce_link_up == TRUE) 7921 goto bce_tick_exit; 7922 7923 /* Link is down. Check what the PHY's doing. */ 7924 mii = device_get_softc(sc->bce_miibus); 7925 mii_tick(mii); 7926 7927 /* Check if the link has come up. */ 7928 if ((mii->mii_media_status & IFM_ACTIVE) && 7929 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { 7930 DBPRINT(sc, BCE_VERBOSE_MISC, 7931 "%s(): Link up!\n", __FUNCTION__); 7932 sc->bce_link_up = TRUE; 7933 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 7934 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || 7935 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && 7936 bootverbose) 7937 BCE_PRINTF("Gigabit link up!\n"); 7938 7939 /* Now that link is up, handle any outstanding TX traffic. */ 7940 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7941 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " 7942 "pending TX traffic.\n", __FUNCTION__); 7943 bce_start_locked(ifp); 7944 } 7945 } 7946 7947 bce_tick_exit: 7948 DBEXIT(BCE_EXTREME_MISC); 7949 return; 7950 } 7951 7952 7953 #ifdef BCE_DEBUG 7954 /****************************************************************************/ 7955 /* Allows the driver state to be dumped through the sysctl interface. */ 7956 /* */ 7957 /* Returns: */ 7958 /* 0 for success, positive value for failure. */ 7959 /****************************************************************************/ 7960 static int 7961 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 7962 { 7963 int error; 7964 int result; 7965 struct bce_softc *sc; 7966 7967 result = -1; 7968 error = sysctl_handle_int(oidp, &result, 0, req); 7969 7970 if (error || !req->newptr) 7971 return (error); 7972 7973 if (result == 1) { 7974 sc = (struct bce_softc *)arg1; 7975 bce_dump_driver_state(sc); 7976 } 7977 7978 return error; 7979 } 7980 7981 7982 /****************************************************************************/ 7983 /* Allows the hardware state to be dumped through the sysctl interface. */ 7984 /* */ 7985 /* Returns: */ 7986 /* 0 for success, positive value for failure. */ 7987 /****************************************************************************/ 7988 static int 7989 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 7990 { 7991 int error; 7992 int result; 7993 struct bce_softc *sc; 7994 7995 result = -1; 7996 error = sysctl_handle_int(oidp, &result, 0, req); 7997 7998 if (error || !req->newptr) 7999 return (error); 8000 8001 if (result == 1) { 8002 sc = (struct bce_softc *)arg1; 8003 bce_dump_hw_state(sc); 8004 } 8005 8006 return error; 8007 } 8008 8009 8010 /****************************************************************************/ 8011 /* Allows the status block to be dumped through the sysctl interface. */ 8012 /* */ 8013 /* Returns: */ 8014 /* 0 for success, positive value for failure. */ 8015 /****************************************************************************/ 8016 static int 8017 bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) 8018 { 8019 int error; 8020 int result; 8021 struct bce_softc *sc; 8022 8023 result = -1; 8024 error = sysctl_handle_int(oidp, &result, 0, req); 8025 8026 if (error || !req->newptr) 8027 return (error); 8028 8029 if (result == 1) { 8030 sc = (struct bce_softc *)arg1; 8031 bce_dump_status_block(sc); 8032 } 8033 8034 return error; 8035 } 8036 8037 8038 /****************************************************************************/ 8039 /* Allows the stats block to be dumped through the sysctl interface. */ 8040 /* */ 8041 /* Returns: */ 8042 /* 0 for success, positive value for failure. */ 8043 /****************************************************************************/ 8044 static int 8045 bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) 8046 { 8047 int error; 8048 int result; 8049 struct bce_softc *sc; 8050 8051 result = -1; 8052 error = sysctl_handle_int(oidp, &result, 0, req); 8053 8054 if (error || !req->newptr) 8055 return (error); 8056 8057 if (result == 1) { 8058 sc = (struct bce_softc *)arg1; 8059 bce_dump_stats_block(sc); 8060 } 8061 8062 return error; 8063 } 8064 8065 8066 /****************************************************************************/ 8067 /* Allows the stat counters to be cleared without unloading/reloading the */ 8068 /* driver. */ 8069 /* */ 8070 /* Returns: */ 8071 /* 0 for success, positive value for failure. */ 8072 /****************************************************************************/ 8073 static int 8074 bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) 8075 { 8076 int error; 8077 int result; 8078 struct bce_softc *sc; 8079 8080 result = -1; 8081 error = sysctl_handle_int(oidp, &result, 0, req); 8082 8083 if (error || !req->newptr) 8084 return (error); 8085 8086 if (result == 1) { 8087 sc = (struct bce_softc *)arg1; 8088 8089 /* Clear the internal H/W statistics counters. */ 8090 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 8091 8092 /* Reset the driver maintained statistics. */ 8093 sc->interrupts_rx = 8094 sc->interrupts_tx = 0; 8095 sc->tso_frames_requested = 8096 sc->tso_frames_completed = 8097 sc->tso_frames_failed = 0; 8098 sc->rx_empty_count = 8099 sc->tx_full_count = 0; 8100 sc->rx_low_watermark = USABLE_RX_BD; 8101 sc->tx_hi_watermark = 0; 8102 sc->l2fhdr_error_count = 8103 sc->l2fhdr_error_sim_count = 0; 8104 sc->mbuf_alloc_failed_count = 8105 sc->mbuf_alloc_failed_sim_count = 0; 8106 sc->dma_map_addr_rx_failed_count = 8107 sc->dma_map_addr_tx_failed_count = 0; 8108 sc->mbuf_frag_count = 0; 8109 sc->csum_offload_tcp_udp = 8110 sc->csum_offload_ip = 0; 8111 sc->vlan_tagged_frames_rcvd = 8112 sc->vlan_tagged_frames_stripped = 0; 8113 8114 /* Clear firmware maintained statistics. */ 8115 REG_WR_IND(sc, 0x120084, 0); 8116 } 8117 8118 return error; 8119 } 8120 8121 8122 /****************************************************************************/ 8123 /* Allows the bootcode state to be dumped through the sysctl interface. */ 8124 /* */ 8125 /* Returns: */ 8126 /* 0 for success, positive value for failure. */ 8127 /****************************************************************************/ 8128 static int 8129 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) 8130 { 8131 int error; 8132 int result; 8133 struct bce_softc *sc; 8134 8135 result = -1; 8136 error = sysctl_handle_int(oidp, &result, 0, req); 8137 8138 if (error || !req->newptr) 8139 return (error); 8140 8141 if (result == 1) { 8142 sc = (struct bce_softc *)arg1; 8143 bce_dump_bc_state(sc); 8144 } 8145 8146 return error; 8147 } 8148 8149 8150 /****************************************************************************/ 8151 /* Provides a sysctl interface to allow dumping the RX BD chain. */ 8152 /* */ 8153 /* Returns: */ 8154 /* 0 for success, positive value for failure. */ 8155 /****************************************************************************/ 8156 static int 8157 bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) 8158 { 8159 int error; 8160 int result; 8161 struct bce_softc *sc; 8162 8163 result = -1; 8164 error = sysctl_handle_int(oidp, &result, 0, req); 8165 8166 if (error || !req->newptr) 8167 return (error); 8168 8169 if (result == 1) { 8170 sc = (struct bce_softc *)arg1; 8171 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD); 8172 } 8173 8174 return error; 8175 } 8176 8177 8178 /****************************************************************************/ 8179 /* Provides a sysctl interface to allow dumping the RX MBUF chain. */ 8180 /* */ 8181 /* Returns: */ 8182 /* 0 for success, positive value for failure. */ 8183 /****************************************************************************/ 8184 static int 8185 bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) 8186 { 8187 int error; 8188 int result; 8189 struct bce_softc *sc; 8190 8191 result = -1; 8192 error = sysctl_handle_int(oidp, &result, 0, req); 8193 8194 if (error || !req->newptr) 8195 return (error); 8196 8197 if (result == 1) { 8198 sc = (struct bce_softc *)arg1; 8199 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 8200 } 8201 8202 return error; 8203 } 8204 8205 8206 /****************************************************************************/ 8207 /* Provides a sysctl interface to allow dumping the TX chain. */ 8208 /* */ 8209 /* Returns: */ 8210 /* 0 for success, positive value for failure. */ 8211 /****************************************************************************/ 8212 static int 8213 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 8214 { 8215 int error; 8216 int result; 8217 struct bce_softc *sc; 8218 8219 result = -1; 8220 error = sysctl_handle_int(oidp, &result, 0, req); 8221 8222 if (error || !req->newptr) 8223 return (error); 8224 8225 if (result == 1) { 8226 sc = (struct bce_softc *)arg1; 8227 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD); 8228 } 8229 8230 return error; 8231 } 8232 8233 8234 #ifdef BCE_JUMBO_HDRSPLIT 8235 /****************************************************************************/ 8236 /* Provides a sysctl interface to allow dumping the page chain. */ 8237 /* */ 8238 /* Returns: */ 8239 /* 0 for success, positive value for failure. */ 8240 /****************************************************************************/ 8241 static int 8242 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) 8243 { 8244 int error; 8245 int result; 8246 struct bce_softc *sc; 8247 8248 result = -1; 8249 error = sysctl_handle_int(oidp, &result, 0, req); 8250 8251 if (error || !req->newptr) 8252 return (error); 8253 8254 if (result == 1) { 8255 sc = (struct bce_softc *)arg1; 8256 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD); 8257 } 8258 8259 return error; 8260 } 8261 #endif 8262 8263 /****************************************************************************/ 8264 /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ 8265 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8266 /* */ 8267 /* Returns: */ 8268 /* 0 for success, positive value for failure. */ 8269 /****************************************************************************/ 8270 static int 8271 bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) 8272 { 8273 struct bce_softc *sc = (struct bce_softc *)arg1; 8274 int error; 8275 u32 result; 8276 u32 val[1]; 8277 u8 *data = (u8 *) val; 8278 8279 result = -1; 8280 error = sysctl_handle_int(oidp, &result, 0, req); 8281 if (error || (req->newptr == NULL)) 8282 return (error); 8283 8284 bce_nvram_read(sc, result, data, 4); 8285 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); 8286 8287 return (error); 8288 } 8289 8290 8291 /****************************************************************************/ 8292 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 8293 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8294 /* */ 8295 /* Returns: */ 8296 /* 0 for success, positive value for failure. */ 8297 /****************************************************************************/ 8298 static int 8299 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 8300 { 8301 struct bce_softc *sc = (struct bce_softc *)arg1; 8302 int error; 8303 u32 val, result; 8304 8305 result = -1; 8306 error = sysctl_handle_int(oidp, &result, 0, req); 8307 if (error || (req->newptr == NULL)) 8308 return (error); 8309 8310 /* Make sure the register is accessible. */ 8311 if (result < 0x8000) { 8312 val = REG_RD(sc, result); 8313 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8314 } else if (result < 0x0280000) { 8315 val = REG_RD_IND(sc, result); 8316 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8317 } 8318 8319 return (error); 8320 } 8321 8322 8323 /****************************************************************************/ 8324 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 8325 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8326 /* */ 8327 /* Returns: */ 8328 /* 0 for success, positive value for failure. */ 8329 /****************************************************************************/ 8330 static int 8331 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 8332 { 8333 struct bce_softc *sc; 8334 device_t dev; 8335 int error, result; 8336 u16 val; 8337 8338 result = -1; 8339 error = sysctl_handle_int(oidp, &result, 0, req); 8340 if (error || (req->newptr == NULL)) 8341 return (error); 8342 8343 /* Make sure the register is accessible. */ 8344 if (result < 0x20) { 8345 sc = (struct bce_softc *)arg1; 8346 dev = sc->bce_dev; 8347 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 8348 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); 8349 } 8350 return (error); 8351 } 8352 8353 8354 static int 8355 sysctl_nvram_dump(SYSCTL_HANDLER_ARGS) 8356 { 8357 struct bce_softc *sc = (struct bce_softc *)arg1; 8358 int error, i; 8359 8360 if (sc->nvram_buf == NULL) { 8361 sc->nvram_buf = malloc(sc->bce_flash_size, 8362 M_TEMP, M_ZERO | M_WAITOK); 8363 } 8364 if (sc->nvram_buf == NULL) { 8365 return(ENOMEM); 8366 } 8367 if (req->oldlen == sc->bce_flash_size) { 8368 for (i = 0; i < sc->bce_flash_size; i++) { 8369 bce_nvram_read(sc, i, &sc->nvram_buf[i], 1); 8370 } 8371 } 8372 8373 error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size); 8374 8375 return error; 8376 } 8377 8378 #ifdef BCE_NVRAM_WRITE_SUPPORT 8379 static int 8380 sysctl_nvram_write(SYSCTL_HANDLER_ARGS) 8381 { 8382 struct bce_softc *sc = (struct bce_softc *)arg1; 8383 int error; 8384 8385 if (sc->nvram_buf == NULL) { 8386 sc->nvram_buf = malloc(sc->bce_flash_size, 8387 M_TEMP, M_ZERO | M_WAITOK); 8388 } 8389 if (sc->nvram_buf == NULL) { 8390 return(ENOMEM); 8391 } 8392 bzero(sc->nvram_buf, sc->bce_flash_size); 8393 error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size); 8394 8395 if (req->newlen == sc->bce_flash_size) { 8396 bce_nvram_write(sc, 0, sc->nvram_buf , sc->bce_flash_size); 8397 } 8398 8399 8400 return error; 8401 } 8402 #endif 8403 8404 8405 /****************************************************************************/ 8406 /* Provides a sysctl interface to allow reading a CID. */ 8407 /* */ 8408 /* Returns: */ 8409 /* 0 for success, positive value for failure. */ 8410 /****************************************************************************/ 8411 static int 8412 bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) 8413 { 8414 struct bce_softc *sc; 8415 int error, result; 8416 8417 result = -1; 8418 error = sysctl_handle_int(oidp, &result, 0, req); 8419 if (error || (req->newptr == NULL)) 8420 return (error); 8421 8422 /* Make sure the register is accessible. */ 8423 if (result <= TX_CID) { 8424 sc = (struct bce_softc *)arg1; 8425 bce_dump_ctx(sc, result); 8426 } 8427 8428 return (error); 8429 } 8430 8431 8432 /****************************************************************************/ 8433 /* Provides a sysctl interface to forcing the driver to dump state and */ 8434 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8435 /* */ 8436 /* Returns: */ 8437 /* 0 for success, positive value for failure. */ 8438 /****************************************************************************/ 8439 static int 8440 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 8441 { 8442 int error; 8443 int result; 8444 struct bce_softc *sc; 8445 8446 result = -1; 8447 error = sysctl_handle_int(oidp, &result, 0, req); 8448 8449 if (error || !req->newptr) 8450 return (error); 8451 8452 if (result == 1) { 8453 sc = (struct bce_softc *)arg1; 8454 bce_breakpoint(sc); 8455 } 8456 8457 return error; 8458 } 8459 #endif 8460 8461 8462 /****************************************************************************/ 8463 /* Adds any sysctl parameters for tuning or debugging purposes. */ 8464 /* */ 8465 /* Returns: */ 8466 /* 0 for success, positive value for failure. */ 8467 /****************************************************************************/ 8468 static void 8469 bce_add_sysctls(struct bce_softc *sc) 8470 { 8471 struct sysctl_ctx_list *ctx; 8472 struct sysctl_oid_list *children; 8473 8474 DBENTER(BCE_VERBOSE_MISC); 8475 8476 ctx = device_get_sysctl_ctx(sc->bce_dev); 8477 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 8478 8479 #ifdef BCE_DEBUG 8480 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8481 "l2fhdr_error_sim_control", 8482 CTLFLAG_RW, &l2fhdr_error_sim_control, 8483 0, "Debug control to force l2fhdr errors"); 8484 8485 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8486 "l2fhdr_error_sim_count", 8487 CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 8488 0, "Number of simulated l2_fhdr errors"); 8489 #endif 8490 8491 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8492 "l2fhdr_error_count", 8493 CTLFLAG_RD, &sc->l2fhdr_error_count, 8494 0, "Number of l2_fhdr errors"); 8495 8496 #ifdef BCE_DEBUG 8497 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8498 "mbuf_alloc_failed_sim_control", 8499 CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 8500 0, "Debug control to force mbuf allocation failures"); 8501 8502 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8503 "mbuf_alloc_failed_sim_count", 8504 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 8505 0, "Number of simulated mbuf cluster allocation failures"); 8506 #endif 8507 8508 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8509 "mbuf_alloc_failed_count", 8510 CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 8511 0, "Number of mbuf allocation failures"); 8512 8513 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8514 "mbuf_frag_count", 8515 CTLFLAG_RD, &sc->mbuf_frag_count, 8516 0, "Number of fragmented mbufs"); 8517 8518 #ifdef BCE_DEBUG 8519 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8520 "dma_map_addr_failed_sim_control", 8521 CTLFLAG_RW, &dma_map_addr_failed_sim_control, 8522 0, "Debug control to force DMA mapping failures"); 8523 8524 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ 8525 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8526 "dma_map_addr_failed_sim_count", 8527 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 8528 0, "Number of simulated DMA mapping failures"); 8529 8530 #endif 8531 8532 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8533 "dma_map_addr_rx_failed_count", 8534 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 8535 0, "Number of RX DMA mapping failures"); 8536 8537 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8538 "dma_map_addr_tx_failed_count", 8539 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 8540 0, "Number of TX DMA mapping failures"); 8541 8542 #ifdef BCE_DEBUG 8543 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8544 "unexpected_attention_sim_control", 8545 CTLFLAG_RW, &unexpected_attention_sim_control, 8546 0, "Debug control to simulate unexpected attentions"); 8547 8548 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8549 "unexpected_attention_sim_count", 8550 CTLFLAG_RW, &sc->unexpected_attention_sim_count, 8551 0, "Number of simulated unexpected attentions"); 8552 #endif 8553 8554 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8555 "unexpected_attention_count", 8556 CTLFLAG_RW, &sc->unexpected_attention_count, 8557 0, "Number of unexpected attentions"); 8558 8559 #ifdef BCE_DEBUG 8560 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8561 "debug_bootcode_running_failure", 8562 CTLFLAG_RW, &bootcode_running_failure_sim_control, 8563 0, "Debug control to force bootcode running failures"); 8564 8565 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8566 "rx_low_watermark", 8567 CTLFLAG_RD, &sc->rx_low_watermark, 8568 0, "Lowest level of free rx_bd's"); 8569 8570 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8571 "rx_empty_count", 8572 CTLFLAG_RD, &sc->rx_empty_count, 8573 0, "Number of times the RX chain was empty"); 8574 8575 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8576 "tx_hi_watermark", 8577 CTLFLAG_RD, &sc->tx_hi_watermark, 8578 0, "Highest level of used tx_bd's"); 8579 8580 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8581 "tx_full_count", 8582 CTLFLAG_RD, &sc->tx_full_count, 8583 0, "Number of times the TX chain was full"); 8584 8585 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8586 "tso_frames_requested", 8587 CTLFLAG_RD, &sc->tso_frames_requested, 8588 0, "Number of TSO frames requested"); 8589 8590 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8591 "tso_frames_completed", 8592 CTLFLAG_RD, &sc->tso_frames_completed, 8593 0, "Number of TSO frames completed"); 8594 8595 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8596 "tso_frames_failed", 8597 CTLFLAG_RD, &sc->tso_frames_failed, 8598 0, "Number of TSO frames failed"); 8599 8600 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8601 "csum_offload_ip", 8602 CTLFLAG_RD, &sc->csum_offload_ip, 8603 0, "Number of IP checksum offload frames"); 8604 8605 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8606 "csum_offload_tcp_udp", 8607 CTLFLAG_RD, &sc->csum_offload_tcp_udp, 8608 0, "Number of TCP/UDP checksum offload frames"); 8609 8610 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8611 "vlan_tagged_frames_rcvd", 8612 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, 8613 0, "Number of VLAN tagged frames received"); 8614 8615 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8616 "vlan_tagged_frames_stripped", 8617 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, 8618 0, "Number of VLAN tagged frames stripped"); 8619 8620 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8621 "interrupts_rx", 8622 CTLFLAG_RD, &sc->interrupts_rx, 8623 0, "Number of RX interrupts"); 8624 8625 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8626 "interrupts_tx", 8627 CTLFLAG_RD, &sc->interrupts_tx, 8628 0, "Number of TX interrupts"); 8629 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8630 "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD, 8631 (void *)sc, 0, 8632 sysctl_nvram_dump, "S", ""); 8633 #ifdef BCE_NVRAM_WRITE_SUPPORT 8634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8635 "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR, 8636 (void *)sc, 0, 8637 sysctl_nvram_write, "S", ""); 8638 #endif 8639 #endif 8640 8641 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8642 "stat_IfHcInOctets", 8643 CTLFLAG_RD, &sc->stat_IfHCInOctets, 8644 "Bytes received"); 8645 8646 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8647 "stat_IfHCInBadOctets", 8648 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 8649 "Bad bytes received"); 8650 8651 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8652 "stat_IfHCOutOctets", 8653 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 8654 "Bytes sent"); 8655 8656 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8657 "stat_IfHCOutBadOctets", 8658 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 8659 "Bad bytes sent"); 8660 8661 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8662 "stat_IfHCInUcastPkts", 8663 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 8664 "Unicast packets received"); 8665 8666 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8667 "stat_IfHCInMulticastPkts", 8668 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 8669 "Multicast packets received"); 8670 8671 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8672 "stat_IfHCInBroadcastPkts", 8673 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 8674 "Broadcast packets received"); 8675 8676 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8677 "stat_IfHCOutUcastPkts", 8678 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 8679 "Unicast packets sent"); 8680 8681 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8682 "stat_IfHCOutMulticastPkts", 8683 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 8684 "Multicast packets sent"); 8685 8686 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8687 "stat_IfHCOutBroadcastPkts", 8688 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 8689 "Broadcast packets sent"); 8690 8691 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8692 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 8693 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 8694 0, "Internal MAC transmit errors"); 8695 8696 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8697 "stat_Dot3StatsCarrierSenseErrors", 8698 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 8699 0, "Carrier sense errors"); 8700 8701 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8702 "stat_Dot3StatsFCSErrors", 8703 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 8704 0, "Frame check sequence errors"); 8705 8706 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8707 "stat_Dot3StatsAlignmentErrors", 8708 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 8709 0, "Alignment errors"); 8710 8711 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8712 "stat_Dot3StatsSingleCollisionFrames", 8713 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 8714 0, "Single Collision Frames"); 8715 8716 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8717 "stat_Dot3StatsMultipleCollisionFrames", 8718 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 8719 0, "Multiple Collision Frames"); 8720 8721 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8722 "stat_Dot3StatsDeferredTransmissions", 8723 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 8724 0, "Deferred Transmissions"); 8725 8726 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8727 "stat_Dot3StatsExcessiveCollisions", 8728 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 8729 0, "Excessive Collisions"); 8730 8731 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8732 "stat_Dot3StatsLateCollisions", 8733 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 8734 0, "Late Collisions"); 8735 8736 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8737 "stat_EtherStatsCollisions", 8738 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 8739 0, "Collisions"); 8740 8741 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8742 "stat_EtherStatsFragments", 8743 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 8744 0, "Fragments"); 8745 8746 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8747 "stat_EtherStatsJabbers", 8748 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 8749 0, "Jabbers"); 8750 8751 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8752 "stat_EtherStatsUndersizePkts", 8753 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 8754 0, "Undersize packets"); 8755 8756 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8757 "stat_EtherStatsOversizePkts", 8758 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 8759 0, "stat_EtherStatsOversizePkts"); 8760 8761 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8762 "stat_EtherStatsPktsRx64Octets", 8763 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 8764 0, "Bytes received in 64 byte packets"); 8765 8766 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8767 "stat_EtherStatsPktsRx65Octetsto127Octets", 8768 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 8769 0, "Bytes received in 65 to 127 byte packets"); 8770 8771 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8772 "stat_EtherStatsPktsRx128Octetsto255Octets", 8773 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 8774 0, "Bytes received in 128 to 255 byte packets"); 8775 8776 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8777 "stat_EtherStatsPktsRx256Octetsto511Octets", 8778 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 8779 0, "Bytes received in 256 to 511 byte packets"); 8780 8781 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8782 "stat_EtherStatsPktsRx512Octetsto1023Octets", 8783 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 8784 0, "Bytes received in 512 to 1023 byte packets"); 8785 8786 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8787 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 8788 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 8789 0, "Bytes received in 1024 t0 1522 byte packets"); 8790 8791 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8792 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 8793 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 8794 0, "Bytes received in 1523 to 9022 byte packets"); 8795 8796 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8797 "stat_EtherStatsPktsTx64Octets", 8798 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 8799 0, "Bytes sent in 64 byte packets"); 8800 8801 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8802 "stat_EtherStatsPktsTx65Octetsto127Octets", 8803 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 8804 0, "Bytes sent in 65 to 127 byte packets"); 8805 8806 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8807 "stat_EtherStatsPktsTx128Octetsto255Octets", 8808 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 8809 0, "Bytes sent in 128 to 255 byte packets"); 8810 8811 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8812 "stat_EtherStatsPktsTx256Octetsto511Octets", 8813 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 8814 0, "Bytes sent in 256 to 511 byte packets"); 8815 8816 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8817 "stat_EtherStatsPktsTx512Octetsto1023Octets", 8818 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 8819 0, "Bytes sent in 512 to 1023 byte packets"); 8820 8821 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8822 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 8823 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 8824 0, "Bytes sent in 1024 to 1522 byte packets"); 8825 8826 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8827 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 8828 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 8829 0, "Bytes sent in 1523 to 9022 byte packets"); 8830 8831 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8832 "stat_XonPauseFramesReceived", 8833 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 8834 0, "XON pause frames receved"); 8835 8836 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8837 "stat_XoffPauseFramesReceived", 8838 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 8839 0, "XOFF pause frames received"); 8840 8841 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8842 "stat_OutXonSent", 8843 CTLFLAG_RD, &sc->stat_OutXonSent, 8844 0, "XON pause frames sent"); 8845 8846 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8847 "stat_OutXoffSent", 8848 CTLFLAG_RD, &sc->stat_OutXoffSent, 8849 0, "XOFF pause frames sent"); 8850 8851 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8852 "stat_FlowControlDone", 8853 CTLFLAG_RD, &sc->stat_FlowControlDone, 8854 0, "Flow control done"); 8855 8856 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8857 "stat_MacControlFramesReceived", 8858 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 8859 0, "MAC control frames received"); 8860 8861 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8862 "stat_XoffStateEntered", 8863 CTLFLAG_RD, &sc->stat_XoffStateEntered, 8864 0, "XOFF state entered"); 8865 8866 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8867 "stat_IfInFramesL2FilterDiscards", 8868 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 8869 0, "Received L2 packets discarded"); 8870 8871 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8872 "stat_IfInRuleCheckerDiscards", 8873 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 8874 0, "Received packets discarded by rule"); 8875 8876 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8877 "stat_IfInFTQDiscards", 8878 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 8879 0, "Received packet FTQ discards"); 8880 8881 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8882 "stat_IfInMBUFDiscards", 8883 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 8884 0, "Received packets discarded due to lack " 8885 "of controller buffer memory"); 8886 8887 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8888 "stat_IfInRuleCheckerP4Hit", 8889 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 8890 0, "Received packets rule checker hits"); 8891 8892 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8893 "stat_CatchupInRuleCheckerDiscards", 8894 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 8895 0, "Received packets discarded in Catchup path"); 8896 8897 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8898 "stat_CatchupInFTQDiscards", 8899 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 8900 0, "Received packets discarded in FTQ in Catchup path"); 8901 8902 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8903 "stat_CatchupInMBUFDiscards", 8904 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 8905 0, "Received packets discarded in controller " 8906 "buffer memory in Catchup path"); 8907 8908 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8909 "stat_CatchupInRuleCheckerP4Hit", 8910 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 8911 0, "Received packets rule checker hits in Catchup path"); 8912 8913 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8914 "com_no_buffers", 8915 CTLFLAG_RD, &sc->com_no_buffers, 8916 0, "Valid packets received but no RX buffers available"); 8917 8918 #ifdef BCE_DEBUG 8919 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8920 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 8921 (void *)sc, 0, 8922 bce_sysctl_driver_state, "I", "Drive state information"); 8923 8924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8925 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 8926 (void *)sc, 0, 8927 bce_sysctl_hw_state, "I", "Hardware state information"); 8928 8929 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8930 "status_block", CTLTYPE_INT | CTLFLAG_RW, 8931 (void *)sc, 0, 8932 bce_sysctl_status_block, "I", "Dump status block"); 8933 8934 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8935 "stats_block", CTLTYPE_INT | CTLFLAG_RW, 8936 (void *)sc, 0, 8937 bce_sysctl_stats_block, "I", "Dump statistics block"); 8938 8939 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8940 "stats_clear", CTLTYPE_INT | CTLFLAG_RW, 8941 (void *)sc, 0, 8942 bce_sysctl_stats_clear, "I", "Clear statistics block"); 8943 8944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8945 "bc_state", CTLTYPE_INT | CTLFLAG_RW, 8946 (void *)sc, 0, 8947 bce_sysctl_bc_state, "I", "Bootcode state information"); 8948 8949 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8950 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW, 8951 (void *)sc, 0, 8952 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); 8953 8954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8955 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW, 8956 (void *)sc, 0, 8957 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); 8958 8959 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8960 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 8961 (void *)sc, 0, 8962 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 8963 8964 #ifdef BCE_JUMBO_HDRSPLIT 8965 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8966 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, 8967 (void *)sc, 0, 8968 bce_sysctl_dump_pg_chain, "I", "Dump page chain"); 8969 #endif 8970 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8971 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW, 8972 (void *)sc, 0, 8973 bce_sysctl_dump_ctx, "I", "Dump context memory"); 8974 8975 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8976 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 8977 (void *)sc, 0, 8978 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 8979 8980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8981 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 8982 (void *)sc, 0, 8983 bce_sysctl_reg_read, "I", "Register read"); 8984 8985 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8986 "nvram_read", CTLTYPE_INT | CTLFLAG_RW, 8987 (void *)sc, 0, 8988 bce_sysctl_nvram_read, "I", "NVRAM read"); 8989 8990 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8991 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 8992 (void *)sc, 0, 8993 bce_sysctl_phy_read, "I", "PHY register read"); 8994 8995 #endif 8996 8997 DBEXIT(BCE_VERBOSE_MISC); 8998 } 8999 9000 9001 /****************************************************************************/ 9002 /* BCE Debug Routines */ 9003 /****************************************************************************/ 9004 #ifdef BCE_DEBUG 9005 9006 /****************************************************************************/ 9007 /* Freezes the controller to allow for a cohesive state dump. */ 9008 /* */ 9009 /* Returns: */ 9010 /* Nothing. */ 9011 /****************************************************************************/ 9012 static __attribute__ ((noinline)) void 9013 bce_freeze_controller(struct bce_softc *sc) 9014 { 9015 u32 val; 9016 val = REG_RD(sc, BCE_MISC_COMMAND); 9017 val |= BCE_MISC_COMMAND_DISABLE_ALL; 9018 REG_WR(sc, BCE_MISC_COMMAND, val); 9019 } 9020 9021 9022 /****************************************************************************/ 9023 /* Unfreezes the controller after a freeze operation. This may not always */ 9024 /* work and the controller will require a reset! */ 9025 /* */ 9026 /* Returns: */ 9027 /* Nothing. */ 9028 /****************************************************************************/ 9029 static __attribute__ ((noinline)) void 9030 bce_unfreeze_controller(struct bce_softc *sc) 9031 { 9032 u32 val; 9033 val = REG_RD(sc, BCE_MISC_COMMAND); 9034 val |= BCE_MISC_COMMAND_ENABLE_ALL; 9035 REG_WR(sc, BCE_MISC_COMMAND, val); 9036 } 9037 9038 9039 /****************************************************************************/ 9040 /* Prints out Ethernet frame information from an mbuf. */ 9041 /* */ 9042 /* Partially decode an Ethernet frame to look at some important headers. */ 9043 /* */ 9044 /* Returns: */ 9045 /* Nothing. */ 9046 /****************************************************************************/ 9047 static __attribute__ ((noinline)) void 9048 bce_dump_enet(struct bce_softc *sc, struct mbuf *m) 9049 { 9050 struct ether_vlan_header *eh; 9051 u16 etype; 9052 int ehlen; 9053 struct ip *ip; 9054 struct tcphdr *th; 9055 struct udphdr *uh; 9056 struct arphdr *ah; 9057 9058 BCE_PRINTF( 9059 "-----------------------------" 9060 " Frame Decode " 9061 "-----------------------------\n"); 9062 9063 eh = mtod(m, struct ether_vlan_header *); 9064 9065 /* Handle VLAN encapsulation if present. */ 9066 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 9067 etype = ntohs(eh->evl_proto); 9068 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 9069 } else { 9070 etype = ntohs(eh->evl_encap_proto); 9071 ehlen = ETHER_HDR_LEN; 9072 } 9073 9074 /* ToDo: Add VLAN output. */ 9075 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", 9076 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); 9077 9078 switch (etype) { 9079 case ETHERTYPE_IP: 9080 ip = (struct ip *)(m->m_data + ehlen); 9081 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " 9082 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", 9083 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), 9084 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); 9085 9086 switch (ip->ip_p) { 9087 case IPPROTO_TCP: 9088 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9089 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " 9090 "%d bytes, flags = 0x%b, csum = 0x%04X\n", 9091 ntohs(th->th_dport), ntohs(th->th_sport), 9092 (th->th_off << 2), th->th_flags, 9093 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" 9094 "\02SYN\01FIN", ntohs(th->th_sum)); 9095 break; 9096 case IPPROTO_UDP: 9097 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9098 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " 9099 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), 9100 ntohs(uh->uh_sport), ntohs(uh->uh_ulen), 9101 ntohs(uh->uh_sum)); 9102 break; 9103 case IPPROTO_ICMP: 9104 BCE_PRINTF("icmp:\n"); 9105 break; 9106 default: 9107 BCE_PRINTF("----: Other IP protocol.\n"); 9108 } 9109 break; 9110 case ETHERTYPE_IPV6: 9111 BCE_PRINTF("ipv6: No decode supported.\n"); 9112 break; 9113 case ETHERTYPE_ARP: 9114 BCE_PRINTF("-arp: "); 9115 ah = (struct arphdr *) (m->m_data + ehlen); 9116 switch (ntohs(ah->ar_op)) { 9117 case ARPOP_REVREQUEST: 9118 printf("reverse ARP request\n"); 9119 break; 9120 case ARPOP_REVREPLY: 9121 printf("reverse ARP reply\n"); 9122 break; 9123 case ARPOP_REQUEST: 9124 printf("ARP request\n"); 9125 break; 9126 case ARPOP_REPLY: 9127 printf("ARP reply\n"); 9128 break; 9129 default: 9130 printf("other ARP operation\n"); 9131 } 9132 break; 9133 default: 9134 BCE_PRINTF("----: Other protocol.\n"); 9135 } 9136 9137 BCE_PRINTF( 9138 "-----------------------------" 9139 "--------------" 9140 "-----------------------------\n"); 9141 } 9142 9143 9144 /****************************************************************************/ 9145 /* Prints out information about an mbuf. */ 9146 /* */ 9147 /* Returns: */ 9148 /* Nothing. */ 9149 /****************************************************************************/ 9150 static __attribute__ ((noinline)) void 9151 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 9152 { 9153 struct mbuf *mp = m; 9154 9155 if (m == NULL) { 9156 BCE_PRINTF("mbuf: null pointer\n"); 9157 return; 9158 } 9159 9160 while (mp) { 9161 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " 9162 "m_data = %p\n", mp, mp->m_len, mp->m_flags, 9163 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); 9164 9165 if (mp->m_flags & M_PKTHDR) { 9166 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " 9167 "csum_flags = %b\n", mp->m_pkthdr.len, 9168 mp->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG" 9169 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 9170 "\22M_PROMISC\23M_NOFREE", 9171 mp->m_pkthdr.csum_flags, 9172 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 9173 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 9174 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 9175 "\14CSUM_PSEUDO_HDR"); 9176 } 9177 9178 if (mp->m_flags & M_EXT) { 9179 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", 9180 mp->m_ext.ext_buf, mp->m_ext.ext_size); 9181 switch (mp->m_ext.ext_type) { 9182 case EXT_CLUSTER: 9183 printf("EXT_CLUSTER\n"); break; 9184 case EXT_SFBUF: 9185 printf("EXT_SFBUF\n"); break; 9186 case EXT_JUMBO9: 9187 printf("EXT_JUMBO9\n"); break; 9188 case EXT_JUMBO16: 9189 printf("EXT_JUMBO16\n"); break; 9190 case EXT_PACKET: 9191 printf("EXT_PACKET\n"); break; 9192 case EXT_MBUF: 9193 printf("EXT_MBUF\n"); break; 9194 case EXT_NET_DRV: 9195 printf("EXT_NET_DRV\n"); break; 9196 case EXT_MOD_TYPE: 9197 printf("EXT_MDD_TYPE\n"); break; 9198 case EXT_DISPOSABLE: 9199 printf("EXT_DISPOSABLE\n"); break; 9200 case EXT_EXTREF: 9201 printf("EXT_EXTREF\n"); break; 9202 default: 9203 printf("UNKNOWN\n"); 9204 } 9205 } 9206 9207 mp = mp->m_next; 9208 } 9209 } 9210 9211 9212 /****************************************************************************/ 9213 /* Prints out the mbufs in the TX mbuf chain. */ 9214 /* */ 9215 /* Returns: */ 9216 /* Nothing. */ 9217 /****************************************************************************/ 9218 static __attribute__ ((noinline)) void 9219 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9220 { 9221 struct mbuf *m; 9222 9223 BCE_PRINTF( 9224 "----------------------------" 9225 " tx mbuf data " 9226 "----------------------------\n"); 9227 9228 for (int i = 0; i < count; i++) { 9229 m = sc->tx_mbuf_ptr[chain_prod]; 9230 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); 9231 bce_dump_mbuf(sc, m); 9232 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 9233 } 9234 9235 BCE_PRINTF( 9236 "----------------------------" 9237 "----------------" 9238 "----------------------------\n"); 9239 } 9240 9241 9242 /****************************************************************************/ 9243 /* Prints out the mbufs in the RX mbuf chain. */ 9244 /* */ 9245 /* Returns: */ 9246 /* Nothing. */ 9247 /****************************************************************************/ 9248 static __attribute__ ((noinline)) void 9249 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9250 { 9251 struct mbuf *m; 9252 9253 BCE_PRINTF( 9254 "----------------------------" 9255 " rx mbuf data " 9256 "----------------------------\n"); 9257 9258 for (int i = 0; i < count; i++) { 9259 m = sc->rx_mbuf_ptr[chain_prod]; 9260 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); 9261 bce_dump_mbuf(sc, m); 9262 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 9263 } 9264 9265 9266 BCE_PRINTF( 9267 "----------------------------" 9268 "----------------" 9269 "----------------------------\n"); 9270 } 9271 9272 9273 #ifdef BCE_JUMBO_HDRSPLIT 9274 /****************************************************************************/ 9275 /* Prints out the mbufs in the mbuf page chain. */ 9276 /* */ 9277 /* Returns: */ 9278 /* Nothing. */ 9279 /****************************************************************************/ 9280 static __attribute__ ((noinline)) void 9281 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9282 { 9283 struct mbuf *m; 9284 9285 BCE_PRINTF( 9286 "----------------------------" 9287 " pg mbuf data " 9288 "----------------------------\n"); 9289 9290 for (int i = 0; i < count; i++) { 9291 m = sc->pg_mbuf_ptr[chain_prod]; 9292 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); 9293 bce_dump_mbuf(sc, m); 9294 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); 9295 } 9296 9297 9298 BCE_PRINTF( 9299 "----------------------------" 9300 "----------------" 9301 "----------------------------\n"); 9302 } 9303 #endif 9304 9305 9306 /****************************************************************************/ 9307 /* Prints out a tx_bd structure. */ 9308 /* */ 9309 /* Returns: */ 9310 /* Nothing. */ 9311 /****************************************************************************/ 9312 static __attribute__ ((noinline)) void 9313 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 9314 { 9315 int i = 0; 9316 9317 if (idx > MAX_TX_BD) 9318 /* Index out of range. */ 9319 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 9320 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 9321 /* TX Chain page pointer. */ 9322 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9323 "pointer\n", idx, txbd->tx_bd_haddr_hi, 9324 txbd->tx_bd_haddr_lo); 9325 else { 9326 /* Normal tx_bd entry. */ 9327 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 9328 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " 9329 "0x%04X (", idx, txbd->tx_bd_haddr_hi, 9330 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, 9331 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 9332 9333 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { 9334 if (i>0) 9335 printf("|"); 9336 printf("CONN_FAULT"); 9337 i++; 9338 } 9339 9340 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { 9341 if (i>0) 9342 printf("|"); 9343 printf("TCP_UDP_CKSUM"); 9344 i++; 9345 } 9346 9347 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { 9348 if (i>0) 9349 printf("|"); 9350 printf("IP_CKSUM"); 9351 i++; 9352 } 9353 9354 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { 9355 if (i>0) 9356 printf("|"); 9357 printf("VLAN"); 9358 i++; 9359 } 9360 9361 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { 9362 if (i>0) 9363 printf("|"); 9364 printf("COAL_NOW"); 9365 i++; 9366 } 9367 9368 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { 9369 if (i>0) 9370 printf("|"); 9371 printf("DONT_GEN_CRC"); 9372 i++; 9373 } 9374 9375 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { 9376 if (i>0) 9377 printf("|"); 9378 printf("START"); 9379 i++; 9380 } 9381 9382 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { 9383 if (i>0) 9384 printf("|"); 9385 printf("END"); 9386 i++; 9387 } 9388 9389 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { 9390 if (i>0) 9391 printf("|"); 9392 printf("LSO"); 9393 i++; 9394 } 9395 9396 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { 9397 if (i>0) 9398 printf("|"); 9399 printf("SW_OPTION=%d", ((txbd->tx_bd_flags & 9400 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; 9401 } 9402 9403 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { 9404 if (i>0) 9405 printf("|"); 9406 printf("SW_FLAGS"); 9407 i++; 9408 } 9409 9410 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { 9411 if (i>0) 9412 printf("|"); 9413 printf("SNAP)"); 9414 } else { 9415 printf(")\n"); 9416 } 9417 } 9418 } 9419 9420 9421 /****************************************************************************/ 9422 /* Prints out a rx_bd structure. */ 9423 /* */ 9424 /* Returns: */ 9425 /* Nothing. */ 9426 /****************************************************************************/ 9427 static __attribute__ ((noinline)) void 9428 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 9429 { 9430 if (idx > MAX_RX_BD) 9431 /* Index out of range. */ 9432 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 9433 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 9434 /* RX Chain page pointer. */ 9435 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9436 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 9437 rxbd->rx_bd_haddr_lo); 9438 else 9439 /* Normal rx_bd entry. */ 9440 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 9441 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, 9442 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, 9443 rxbd->rx_bd_flags); 9444 } 9445 9446 9447 #ifdef BCE_JUMBO_HDRSPLIT 9448 /****************************************************************************/ 9449 /* Prints out a rx_bd structure in the page chain. */ 9450 /* */ 9451 /* Returns: */ 9452 /* Nothing. */ 9453 /****************************************************************************/ 9454 static __attribute__ ((noinline)) void 9455 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) 9456 { 9457 if (idx > MAX_PG_BD) 9458 /* Index out of range. */ 9459 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); 9460 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) 9461 /* Page Chain page pointer. */ 9462 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 9463 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); 9464 else 9465 /* Normal rx_bd entry. */ 9466 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 9467 "flags = 0x%08X\n", idx, 9468 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, 9469 pgbd->rx_bd_len, pgbd->rx_bd_flags); 9470 } 9471 #endif 9472 9473 9474 /****************************************************************************/ 9475 /* Prints out a l2_fhdr structure. */ 9476 /* */ 9477 /* Returns: */ 9478 /* Nothing. */ 9479 /****************************************************************************/ 9480 static __attribute__ ((noinline)) void 9481 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 9482 { 9483 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " 9484 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " 9485 "tcp_udp_xsum = 0x%04X\n", idx, 9486 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, 9487 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 9488 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 9489 } 9490 9491 9492 /****************************************************************************/ 9493 /* Prints out context memory info. (Only useful for CID 0 to 16.) */ 9494 /* */ 9495 /* Returns: */ 9496 /* Nothing. */ 9497 /****************************************************************************/ 9498 static __attribute__ ((noinline)) void 9499 bce_dump_ctx(struct bce_softc *sc, u16 cid) 9500 { 9501 if (cid > TX_CID) { 9502 BCE_PRINTF(" Unknown CID\n"); 9503 return; 9504 } 9505 9506 BCE_PRINTF( 9507 "----------------------------" 9508 " CTX Data " 9509 "----------------------------\n"); 9510 9511 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); 9512 9513 if (cid == RX_CID) { 9514 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " 9515 "producer index\n", 9516 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); 9517 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " 9518 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9519 BCE_L2CTX_RX_HOST_BSEQ)); 9520 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", 9521 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); 9522 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " 9523 "descriptor address\n", 9524 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); 9525 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " 9526 "descriptor address\n", 9527 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); 9528 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " 9529 "index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9530 BCE_L2CTX_RX_NX_BDIDX)); 9531 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " 9532 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9533 BCE_L2CTX_RX_HOST_PG_BDIDX)); 9534 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " 9535 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), 9536 BCE_L2CTX_RX_PG_BUF_SIZE)); 9537 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " 9538 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9539 BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); 9540 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " 9541 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9542 BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); 9543 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " 9544 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9545 BCE_L2CTX_RX_NX_PG_BDIDX)); 9546 } else if (cid == TX_CID) { 9547 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9548 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9549 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", 9550 CTX_RD(sc, GET_CID_ADDR(cid), 9551 BCE_L2CTX_TX_TYPE_XI)); 9552 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " 9553 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), 9554 BCE_L2CTX_TX_CMD_TYPE_XI)); 9555 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " 9556 "h/w buffer descriptor address\n", 9557 CTX_RD(sc, GET_CID_ADDR(cid), 9558 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); 9559 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " 9560 "h/w buffer descriptor address\n", 9561 CTX_RD(sc, GET_CID_ADDR(cid), 9562 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); 9563 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " 9564 "host producer index\n", 9565 CTX_RD(sc, GET_CID_ADDR(cid), 9566 BCE_L2CTX_TX_HOST_BIDX_XI)); 9567 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " 9568 "host byte sequence\n", 9569 CTX_RD(sc, GET_CID_ADDR(cid), 9570 BCE_L2CTX_TX_HOST_BSEQ_XI)); 9571 } else { 9572 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", 9573 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); 9574 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", 9575 CTX_RD(sc, GET_CID_ADDR(cid), 9576 BCE_L2CTX_TX_CMD_TYPE)); 9577 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " 9578 "h/w buffer descriptor address\n", 9579 CTX_RD(sc, GET_CID_ADDR(cid), 9580 BCE_L2CTX_TX_TBDR_BHADDR_HI)); 9581 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " 9582 "h/w buffer descriptor address\n", 9583 CTX_RD(sc, GET_CID_ADDR(cid), 9584 BCE_L2CTX_TX_TBDR_BHADDR_LO)); 9585 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " 9586 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9587 BCE_L2CTX_TX_HOST_BIDX)); 9588 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " 9589 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9590 BCE_L2CTX_TX_HOST_BSEQ)); 9591 } 9592 } 9593 9594 BCE_PRINTF( 9595 "----------------------------" 9596 " Raw CTX " 9597 "----------------------------\n"); 9598 9599 for (int i = 0x0; i < 0x300; i += 0x10) { 9600 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 9601 CTX_RD(sc, GET_CID_ADDR(cid), i), 9602 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), 9603 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), 9604 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); 9605 } 9606 9607 9608 BCE_PRINTF( 9609 "----------------------------" 9610 "----------------" 9611 "----------------------------\n"); 9612 } 9613 9614 9615 /****************************************************************************/ 9616 /* Prints out the FTQ data. */ 9617 /* */ 9618 /* Returns: */ 9619 /* Nothing. */ 9620 /****************************************************************************/ 9621 static __attribute__ ((noinline)) void 9622 bce_dump_ftqs(struct bce_softc *sc) 9623 { 9624 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; 9625 9626 BCE_PRINTF( 9627 "----------------------------" 9628 " FTQ Data " 9629 "----------------------------\n"); 9630 9631 BCE_PRINTF(" FTQ Command Control Depth_Now " 9632 "Max_Depth Valid_Cnt \n"); 9633 BCE_PRINTF(" ------- ---------- ---------- ---------- " 9634 "---------- ----------\n"); 9635 9636 /* Setup the generic statistic counters for the FTQ valid count. */ 9637 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | 9638 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | 9639 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | 9640 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); 9641 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9642 9643 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | 9644 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | 9645 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | 9646 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); 9647 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); 9648 9649 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | 9650 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | 9651 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | 9652 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); 9653 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); 9654 9655 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | 9656 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | 9657 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | 9658 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); 9659 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); 9660 9661 /* Input queue to the Receive Lookup state machine */ 9662 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); 9663 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); 9664 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; 9665 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; 9666 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9667 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9668 cmd, ctl, cur_depth, max_depth, valid_cnt); 9669 9670 /* Input queue to the Receive Processor */ 9671 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); 9672 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); 9673 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; 9674 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; 9675 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9676 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9677 cmd, ctl, cur_depth, max_depth, valid_cnt); 9678 9679 /* Input queue to the Recevie Processor */ 9680 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); 9681 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); 9682 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; 9683 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; 9684 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9685 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9686 cmd, ctl, cur_depth, max_depth, valid_cnt); 9687 9688 /* Input queue to the Receive Virtual to Physical state machine */ 9689 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); 9690 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); 9691 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; 9692 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; 9693 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9694 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9695 cmd, ctl, cur_depth, max_depth, valid_cnt); 9696 9697 /* Input queue to the Recevie Virtual to Physical state machine */ 9698 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); 9699 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); 9700 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; 9701 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; 9702 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); 9703 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9704 cmd, ctl, cur_depth, max_depth, valid_cnt); 9705 9706 /* Input queue to the Receive Virtual to Physical state machine */ 9707 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); 9708 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); 9709 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; 9710 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; 9711 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); 9712 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9713 cmd, ctl, cur_depth, max_depth, valid_cnt); 9714 9715 /* Input queue to the Receive DMA state machine */ 9716 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); 9717 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); 9718 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9719 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9720 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); 9721 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9722 cmd, ctl, cur_depth, max_depth, valid_cnt); 9723 9724 /* Input queue to the Transmit Scheduler state machine */ 9725 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); 9726 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); 9727 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; 9728 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; 9729 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); 9730 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9731 cmd, ctl, cur_depth, max_depth, valid_cnt); 9732 9733 /* Input queue to the Transmit Buffer Descriptor state machine */ 9734 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); 9735 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); 9736 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; 9737 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; 9738 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); 9739 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9740 cmd, ctl, cur_depth, max_depth, valid_cnt); 9741 9742 /* Input queue to the Transmit Processor */ 9743 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); 9744 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); 9745 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; 9746 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; 9747 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); 9748 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9749 cmd, ctl, cur_depth, max_depth, valid_cnt); 9750 9751 /* Input queue to the Transmit DMA state machine */ 9752 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); 9753 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); 9754 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9755 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9756 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); 9757 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9758 cmd, ctl, cur_depth, max_depth, valid_cnt); 9759 9760 /* Input queue to the Transmit Patch-Up Processor */ 9761 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); 9762 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); 9763 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; 9764 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; 9765 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); 9766 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9767 cmd, ctl, cur_depth, max_depth, valid_cnt); 9768 9769 /* Input queue to the Transmit Assembler state machine */ 9770 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); 9771 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); 9772 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; 9773 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; 9774 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); 9775 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9776 cmd, ctl, cur_depth, max_depth, valid_cnt); 9777 9778 /* Input queue to the Completion Processor */ 9779 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); 9780 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); 9781 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; 9782 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; 9783 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); 9784 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9785 cmd, ctl, cur_depth, max_depth, valid_cnt); 9786 9787 /* Input queue to the Completion Processor */ 9788 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); 9789 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); 9790 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; 9791 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; 9792 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); 9793 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9794 cmd, ctl, cur_depth, max_depth, valid_cnt); 9795 9796 /* Input queue to the Completion Processor */ 9797 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); 9798 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); 9799 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; 9800 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; 9801 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); 9802 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9803 cmd, ctl, cur_depth, max_depth, valid_cnt); 9804 9805 /* Setup the generic statistic counters for the FTQ valid count. */ 9806 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | 9807 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | 9808 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); 9809 9810 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9811 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 9812 val = val | 9813 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 9814 24); 9815 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9816 9817 /* Input queue to the Management Control Processor */ 9818 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); 9819 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); 9820 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9821 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9822 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9823 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9824 cmd, ctl, cur_depth, max_depth, valid_cnt); 9825 9826 /* Input queue to the Command Processor */ 9827 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); 9828 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); 9829 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9830 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9831 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9832 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9833 cmd, ctl, cur_depth, max_depth, valid_cnt); 9834 9835 /* Input queue to the Completion Scheduler state machine */ 9836 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); 9837 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); 9838 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; 9839 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; 9840 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9841 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9842 cmd, ctl, cur_depth, max_depth, valid_cnt); 9843 9844 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9845 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9846 /* Input queue to the RV2P Command Scheduler */ 9847 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); 9848 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); 9849 cur_depth = (ctl & 0xFFC00000) >> 22; 9850 max_depth = (ctl & 0x003FF000) >> 12; 9851 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9852 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9853 cmd, ctl, cur_depth, max_depth, valid_cnt); 9854 } 9855 9856 BCE_PRINTF( 9857 "----------------------------" 9858 "----------------" 9859 "----------------------------\n"); 9860 } 9861 9862 9863 /****************************************************************************/ 9864 /* Prints out the TX chain. */ 9865 /* */ 9866 /* Returns: */ 9867 /* Nothing. */ 9868 /****************************************************************************/ 9869 static __attribute__ ((noinline)) void 9870 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) 9871 { 9872 struct tx_bd *txbd; 9873 9874 /* First some info about the tx_bd chain structure. */ 9875 BCE_PRINTF( 9876 "----------------------------" 9877 " tx_bd chain " 9878 "----------------------------\n"); 9879 9880 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", 9881 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES); 9882 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 9883 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 9884 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD); 9885 9886 BCE_PRINTF( 9887 "----------------------------" 9888 " tx_bd data " 9889 "----------------------------\n"); 9890 9891 /* Now print out a decoded list of TX buffer descriptors. */ 9892 for (int i = 0; i < count; i++) { 9893 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 9894 bce_dump_txbd(sc, tx_prod, txbd); 9895 tx_prod++; 9896 } 9897 9898 BCE_PRINTF( 9899 "----------------------------" 9900 "----------------" 9901 "----------------------------\n"); 9902 } 9903 9904 9905 /****************************************************************************/ 9906 /* Prints out the RX chain. */ 9907 /* */ 9908 /* Returns: */ 9909 /* Nothing. */ 9910 /****************************************************************************/ 9911 static __attribute__ ((noinline)) void 9912 bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) 9913 { 9914 struct rx_bd *rxbd; 9915 9916 /* First some info about the rx_bd chain structure. */ 9917 BCE_PRINTF( 9918 "----------------------------" 9919 " rx_bd chain " 9920 "----------------------------\n"); 9921 9922 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", 9923 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES); 9924 9925 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9926 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 9927 9928 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD); 9929 9930 BCE_PRINTF( 9931 "----------------------------" 9932 " rx_bd data " 9933 "----------------------------\n"); 9934 9935 /* Now print out the rx_bd's themselves. */ 9936 for (int i = 0; i < count; i++) { 9937 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 9938 bce_dump_rxbd(sc, rx_prod, rxbd); 9939 rx_prod = RX_CHAIN_IDX(rx_prod + 1); 9940 } 9941 9942 BCE_PRINTF( 9943 "----------------------------" 9944 "----------------" 9945 "----------------------------\n"); 9946 } 9947 9948 9949 #ifdef BCE_JUMBO_HDRSPLIT 9950 /****************************************************************************/ 9951 /* Prints out the page chain. */ 9952 /* */ 9953 /* Returns: */ 9954 /* Nothing. */ 9955 /****************************************************************************/ 9956 static __attribute__ ((noinline)) void 9957 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) 9958 { 9959 struct rx_bd *pgbd; 9960 9961 /* First some info about the page chain structure. */ 9962 BCE_PRINTF( 9963 "----------------------------" 9964 " page chain " 9965 "----------------------------\n"); 9966 9967 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", 9968 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES); 9969 9970 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9971 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); 9972 9973 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n", 9974 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD); 9975 9976 BCE_PRINTF( 9977 "----------------------------" 9978 " page data " 9979 "----------------------------\n"); 9980 9981 /* Now print out the rx_bd's themselves. */ 9982 for (int i = 0; i < count; i++) { 9983 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; 9984 bce_dump_pgbd(sc, pg_prod, pgbd); 9985 pg_prod = PG_CHAIN_IDX(pg_prod + 1); 9986 } 9987 9988 BCE_PRINTF( 9989 "----------------------------" 9990 "----------------" 9991 "----------------------------\n"); 9992 } 9993 #endif 9994 9995 9996 #define BCE_PRINT_RX_CONS(arg) \ 9997 if (sblk->status_rx_quick_consumer_index##arg) \ 9998 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ 9999 sblk->status_rx_quick_consumer_index##arg, (u16) \ 10000 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ 10001 arg); 10002 10003 10004 #define BCE_PRINT_TX_CONS(arg) \ 10005 if (sblk->status_tx_quick_consumer_index##arg) \ 10006 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ 10007 sblk->status_tx_quick_consumer_index##arg, (u16) \ 10008 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ 10009 arg); 10010 10011 /****************************************************************************/ 10012 /* Prints out the status block from host memory. */ 10013 /* */ 10014 /* Returns: */ 10015 /* Nothing. */ 10016 /****************************************************************************/ 10017 static __attribute__ ((noinline)) void 10018 bce_dump_status_block(struct bce_softc *sc) 10019 { 10020 struct status_block *sblk; 10021 10022 sblk = sc->status_block; 10023 10024 BCE_PRINTF( 10025 "----------------------------" 10026 " Status Block " 10027 "----------------------------\n"); 10028 10029 /* Theses indices are used for normal L2 drivers. */ 10030 BCE_PRINTF(" 0x%08X - attn_bits\n", 10031 sblk->status_attn_bits); 10032 10033 BCE_PRINTF(" 0x%08X - attn_bits_ack\n", 10034 sblk->status_attn_bits_ack); 10035 10036 BCE_PRINT_RX_CONS(0); 10037 BCE_PRINT_TX_CONS(0) 10038 10039 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); 10040 10041 /* Theses indices are not used for normal L2 drivers. */ 10042 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); 10043 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); 10044 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); 10045 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); 10046 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); 10047 10048 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); 10049 10050 if (sblk->status_completion_producer_index || 10051 sblk->status_cmd_consumer_index) 10052 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", 10053 sblk->status_completion_producer_index, 10054 sblk->status_cmd_consumer_index); 10055 10056 BCE_PRINTF( 10057 "----------------------------" 10058 "----------------" 10059 "----------------------------\n"); 10060 } 10061 10062 10063 #define BCE_PRINT_64BIT_STAT(arg) \ 10064 if (sblk->arg##_lo || sblk->arg##_hi) \ 10065 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ 10066 sblk->arg##_lo, #arg); 10067 10068 #define BCE_PRINT_32BIT_STAT(arg) \ 10069 if (sblk->arg) \ 10070 BCE_PRINTF(" 0x%08X : %s\n", \ 10071 sblk->arg, #arg); 10072 10073 /****************************************************************************/ 10074 /* Prints out the statistics block from host memory. */ 10075 /* */ 10076 /* Returns: */ 10077 /* Nothing. */ 10078 /****************************************************************************/ 10079 static __attribute__ ((noinline)) void 10080 bce_dump_stats_block(struct bce_softc *sc) 10081 { 10082 struct statistics_block *sblk; 10083 10084 sblk = sc->stats_block; 10085 10086 BCE_PRINTF( 10087 "---------------" 10088 " Stats Block (All Stats Not Shown Are 0) " 10089 "---------------\n"); 10090 10091 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); 10092 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); 10093 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); 10094 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); 10095 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); 10096 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); 10097 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); 10098 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); 10099 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); 10100 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); 10101 BCE_PRINT_32BIT_STAT( 10102 stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 10103 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); 10104 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); 10105 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); 10106 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); 10107 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); 10108 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); 10109 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); 10110 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); 10111 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); 10112 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); 10113 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); 10114 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); 10115 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); 10116 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); 10117 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); 10118 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); 10119 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); 10120 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); 10121 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); 10122 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); 10123 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); 10124 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); 10125 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); 10126 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); 10127 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); 10128 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); 10129 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); 10130 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); 10131 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); 10132 BCE_PRINT_32BIT_STAT(stat_OutXonSent); 10133 BCE_PRINT_32BIT_STAT(stat_OutXoffSent); 10134 BCE_PRINT_32BIT_STAT(stat_FlowControlDone); 10135 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); 10136 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); 10137 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); 10138 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); 10139 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); 10140 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); 10141 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); 10142 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); 10143 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); 10144 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); 10145 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); 10146 10147 BCE_PRINTF( 10148 "----------------------------" 10149 "----------------" 10150 "----------------------------\n"); 10151 } 10152 10153 10154 /****************************************************************************/ 10155 /* Prints out a summary of the driver state. */ 10156 /* */ 10157 /* Returns: */ 10158 /* Nothing. */ 10159 /****************************************************************************/ 10160 static __attribute__ ((noinline)) void 10161 bce_dump_driver_state(struct bce_softc *sc) 10162 { 10163 u32 val_hi, val_lo; 10164 10165 BCE_PRINTF( 10166 "-----------------------------" 10167 " Driver State " 10168 "-----------------------------\n"); 10169 10170 val_hi = BCE_ADDR_HI(sc); 10171 val_lo = BCE_ADDR_LO(sc); 10172 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " 10173 "address\n", val_hi, val_lo); 10174 10175 val_hi = BCE_ADDR_HI(sc->bce_vhandle); 10176 val_lo = BCE_ADDR_LO(sc->bce_vhandle); 10177 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " 10178 "address\n", val_hi, val_lo); 10179 10180 val_hi = BCE_ADDR_HI(sc->status_block); 10181 val_lo = BCE_ADDR_LO(sc->status_block); 10182 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " 10183 "virtual address\n", val_hi, val_lo); 10184 10185 val_hi = BCE_ADDR_HI(sc->stats_block); 10186 val_lo = BCE_ADDR_LO(sc->stats_block); 10187 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " 10188 "virtual address\n", val_hi, val_lo); 10189 10190 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 10191 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 10192 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 10193 "virtual adddress\n", val_hi, val_lo); 10194 10195 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 10196 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 10197 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 10198 "virtual address\n", val_hi, val_lo); 10199 10200 #ifdef BCE_JUMBO_HDRSPLIT 10201 val_hi = BCE_ADDR_HI(sc->pg_bd_chain); 10202 val_lo = BCE_ADDR_LO(sc->pg_bd_chain); 10203 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " 10204 "virtual address\n", val_hi, val_lo); 10205 #endif 10206 10207 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 10208 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 10209 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 10210 "virtual address\n", val_hi, val_lo); 10211 10212 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 10213 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 10214 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 10215 "virtual address\n", val_hi, val_lo); 10216 10217 #ifdef BCE_JUMBO_HDRSPLIT 10218 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); 10219 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); 10220 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " 10221 "virtual address\n", val_hi, val_lo); 10222 #endif 10223 10224 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) " 10225 "h/w intrs\n", sc->interrupts_generated); 10226 10227 BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) " 10228 "rx interrupts handled\n", sc->interrupts_rx); 10229 10230 BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) " 10231 "tx interrupts handled\n", sc->interrupts_tx); 10232 10233 BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) " 10234 "phy interrupts handled\n", sc->phy_interrupts); 10235 10236 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " 10237 "status block index\n", sc->last_status_idx); 10238 10239 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " 10240 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); 10241 10242 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " 10243 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); 10244 10245 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " 10246 "byte seq index\n", sc->tx_prod_bseq); 10247 10248 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " 10249 "mbufs allocated\n", sc->debug_tx_mbuf_alloc); 10250 10251 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " 10252 "tx_bd's\n", sc->used_tx_bd); 10253 10254 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi " 10255 "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd); 10256 10257 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " 10258 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); 10259 10260 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " 10261 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); 10262 10263 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " 10264 "byte seq index\n", sc->rx_prod_bseq); 10265 10266 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " 10267 "mbufs allocated\n", sc->debug_rx_mbuf_alloc); 10268 10269 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " 10270 "rx_bd's\n", sc->free_rx_bd); 10271 10272 #ifdef BCE_JUMBO_HDRSPLIT 10273 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " 10274 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); 10275 10276 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " 10277 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); 10278 10279 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " 10280 "mbufs allocated\n", sc->debug_pg_mbuf_alloc); 10281 10282 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " 10283 "rx_bd's\n", sc->free_pg_bd); 10284 10285 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low " 10286 "watermark\n", sc->pg_low_watermark, sc->max_pg_bd); 10287 #endif 10288 10289 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " 10290 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); 10291 10292 BCE_PRINTF(" 0x%08X - (sc->bce_flags) " 10293 "bce mac flags\n", sc->bce_flags); 10294 10295 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " 10296 "bce phy flags\n", sc->bce_phy_flags); 10297 10298 BCE_PRINTF( 10299 "----------------------------" 10300 "----------------" 10301 "----------------------------\n"); 10302 } 10303 10304 10305 /****************************************************************************/ 10306 /* Prints out the hardware state through a summary of important register, */ 10307 /* followed by a complete register dump. */ 10308 /* */ 10309 /* Returns: */ 10310 /* Nothing. */ 10311 /****************************************************************************/ 10312 static __attribute__ ((noinline)) void 10313 bce_dump_hw_state(struct bce_softc *sc) 10314 { 10315 u32 val; 10316 10317 BCE_PRINTF( 10318 "----------------------------" 10319 " Hardware State " 10320 "----------------------------\n"); 10321 10322 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10323 10324 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 10325 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", 10326 val, BCE_MISC_ENABLE_STATUS_BITS); 10327 10328 val = REG_RD(sc, BCE_DMA_STATUS); 10329 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", 10330 val, BCE_DMA_STATUS); 10331 10332 val = REG_RD(sc, BCE_CTX_STATUS); 10333 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", 10334 val, BCE_CTX_STATUS); 10335 10336 val = REG_RD(sc, BCE_EMAC_STATUS); 10337 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", 10338 val, BCE_EMAC_STATUS); 10339 10340 val = REG_RD(sc, BCE_RPM_STATUS); 10341 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", 10342 val, BCE_RPM_STATUS); 10343 10344 /* ToDo: Create a #define for this constant. */ 10345 val = REG_RD(sc, 0x2004); 10346 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", 10347 val, 0x2004); 10348 10349 val = REG_RD(sc, BCE_RV2P_STATUS); 10350 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", 10351 val, BCE_RV2P_STATUS); 10352 10353 /* ToDo: Create a #define for this constant. */ 10354 val = REG_RD(sc, 0x2c04); 10355 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", 10356 val, 0x2c04); 10357 10358 val = REG_RD(sc, BCE_TBDR_STATUS); 10359 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", 10360 val, BCE_TBDR_STATUS); 10361 10362 val = REG_RD(sc, BCE_TDMA_STATUS); 10363 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", 10364 val, BCE_TDMA_STATUS); 10365 10366 val = REG_RD(sc, BCE_HC_STATUS); 10367 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", 10368 val, BCE_HC_STATUS); 10369 10370 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10371 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10372 val, BCE_TXP_CPU_STATE); 10373 10374 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10375 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10376 val, BCE_TPAT_CPU_STATE); 10377 10378 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10379 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10380 val, BCE_RXP_CPU_STATE); 10381 10382 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10383 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10384 val, BCE_COM_CPU_STATE); 10385 10386 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 10387 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", 10388 val, BCE_MCP_CPU_STATE); 10389 10390 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10391 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10392 val, BCE_CP_CPU_STATE); 10393 10394 BCE_PRINTF( 10395 "----------------------------" 10396 "----------------" 10397 "----------------------------\n"); 10398 10399 BCE_PRINTF( 10400 "----------------------------" 10401 " Register Dump " 10402 "----------------------------\n"); 10403 10404 for (int i = 0x400; i < 0x8000; i += 0x10) { 10405 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10406 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10407 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10408 } 10409 10410 BCE_PRINTF( 10411 "----------------------------" 10412 "----------------" 10413 "----------------------------\n"); 10414 } 10415 10416 10417 /****************************************************************************/ 10418 /* Prints out the mailbox queue registers. */ 10419 /* */ 10420 /* Returns: */ 10421 /* Nothing. */ 10422 /****************************************************************************/ 10423 static __attribute__ ((noinline)) void 10424 bce_dump_mq_regs(struct bce_softc *sc) 10425 { 10426 BCE_PRINTF( 10427 "----------------------------" 10428 " MQ Regs " 10429 "----------------------------\n"); 10430 10431 BCE_PRINTF( 10432 "----------------------------" 10433 "----------------" 10434 "----------------------------\n"); 10435 10436 for (int i = 0x3c00; i < 0x4000; i += 0x10) { 10437 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10438 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10439 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10440 } 10441 10442 BCE_PRINTF( 10443 "----------------------------" 10444 "----------------" 10445 "----------------------------\n"); 10446 } 10447 10448 10449 /****************************************************************************/ 10450 /* Prints out the bootcode state. */ 10451 /* */ 10452 /* Returns: */ 10453 /* Nothing. */ 10454 /****************************************************************************/ 10455 static __attribute__ ((noinline)) void 10456 bce_dump_bc_state(struct bce_softc *sc) 10457 { 10458 u32 val; 10459 10460 BCE_PRINTF( 10461 "----------------------------" 10462 " Bootcode State " 10463 "----------------------------\n"); 10464 10465 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10466 10467 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); 10468 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", 10469 val, BCE_BC_RESET_TYPE); 10470 10471 val = bce_shmem_rd(sc, BCE_BC_STATE); 10472 BCE_PRINTF("0x%08X - (0x%06X) state\n", 10473 val, BCE_BC_STATE); 10474 10475 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 10476 BCE_PRINTF("0x%08X - (0x%06X) condition\n", 10477 val, BCE_BC_STATE_CONDITION); 10478 10479 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); 10480 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", 10481 val, BCE_BC_STATE_DEBUG_CMD); 10482 10483 BCE_PRINTF( 10484 "----------------------------" 10485 "----------------" 10486 "----------------------------\n"); 10487 } 10488 10489 10490 /****************************************************************************/ 10491 /* Prints out the TXP processor state. */ 10492 /* */ 10493 /* Returns: */ 10494 /* Nothing. */ 10495 /****************************************************************************/ 10496 static __attribute__ ((noinline)) void 10497 bce_dump_txp_state(struct bce_softc *sc, int regs) 10498 { 10499 u32 val; 10500 u32 fw_version[3]; 10501 10502 BCE_PRINTF( 10503 "----------------------------" 10504 " TXP State " 10505 "----------------------------\n"); 10506 10507 for (int i = 0; i < 3; i++) 10508 fw_version[i] = htonl(REG_RD_IND(sc, 10509 (BCE_TXP_SCRATCH + 0x10 + i * 4))); 10510 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10511 10512 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 10513 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", 10514 val, BCE_TXP_CPU_MODE); 10515 10516 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10517 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10518 val, BCE_TXP_CPU_STATE); 10519 10520 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 10521 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", 10522 val, BCE_TXP_CPU_EVENT_MASK); 10523 10524 if (regs) { 10525 BCE_PRINTF( 10526 "----------------------------" 10527 " Register Dump " 10528 "----------------------------\n"); 10529 10530 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 10531 /* Skip the big blank spaces */ 10532 if (i < 0x454000 && i > 0x5ffff) 10533 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10534 "0x%08X 0x%08X\n", i, 10535 REG_RD_IND(sc, i), 10536 REG_RD_IND(sc, i + 0x4), 10537 REG_RD_IND(sc, i + 0x8), 10538 REG_RD_IND(sc, i + 0xC)); 10539 } 10540 } 10541 10542 BCE_PRINTF( 10543 "----------------------------" 10544 "----------------" 10545 "----------------------------\n"); 10546 } 10547 10548 10549 /****************************************************************************/ 10550 /* Prints out the RXP processor state. */ 10551 /* */ 10552 /* Returns: */ 10553 /* Nothing. */ 10554 /****************************************************************************/ 10555 static __attribute__ ((noinline)) void 10556 bce_dump_rxp_state(struct bce_softc *sc, int regs) 10557 { 10558 u32 val; 10559 u32 fw_version[3]; 10560 10561 BCE_PRINTF( 10562 "----------------------------" 10563 " RXP State " 10564 "----------------------------\n"); 10565 10566 for (int i = 0; i < 3; i++) 10567 fw_version[i] = htonl(REG_RD_IND(sc, 10568 (BCE_RXP_SCRATCH + 0x10 + i * 4))); 10569 10570 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10571 10572 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 10573 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", 10574 val, BCE_RXP_CPU_MODE); 10575 10576 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10577 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10578 val, BCE_RXP_CPU_STATE); 10579 10580 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 10581 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", 10582 val, BCE_RXP_CPU_EVENT_MASK); 10583 10584 if (regs) { 10585 BCE_PRINTF( 10586 "----------------------------" 10587 " Register Dump " 10588 "----------------------------\n"); 10589 10590 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 10591 /* Skip the big blank sapces */ 10592 if (i < 0xc5400 && i > 0xdffff) 10593 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10594 "0x%08X 0x%08X\n", i, 10595 REG_RD_IND(sc, i), 10596 REG_RD_IND(sc, i + 0x4), 10597 REG_RD_IND(sc, i + 0x8), 10598 REG_RD_IND(sc, i + 0xC)); 10599 } 10600 } 10601 10602 BCE_PRINTF( 10603 "----------------------------" 10604 "----------------" 10605 "----------------------------\n"); 10606 } 10607 10608 10609 /****************************************************************************/ 10610 /* Prints out the TPAT processor state. */ 10611 /* */ 10612 /* Returns: */ 10613 /* Nothing. */ 10614 /****************************************************************************/ 10615 static __attribute__ ((noinline)) void 10616 bce_dump_tpat_state(struct bce_softc *sc, int regs) 10617 { 10618 u32 val; 10619 u32 fw_version[3]; 10620 10621 BCE_PRINTF( 10622 "----------------------------" 10623 " TPAT State " 10624 "----------------------------\n"); 10625 10626 for (int i = 0; i < 3; i++) 10627 fw_version[i] = htonl(REG_RD_IND(sc, 10628 (BCE_TPAT_SCRATCH + 0x410 + i * 4))); 10629 10630 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10631 10632 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 10633 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", 10634 val, BCE_TPAT_CPU_MODE); 10635 10636 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10637 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10638 val, BCE_TPAT_CPU_STATE); 10639 10640 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 10641 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", 10642 val, BCE_TPAT_CPU_EVENT_MASK); 10643 10644 if (regs) { 10645 BCE_PRINTF( 10646 "----------------------------" 10647 " Register Dump " 10648 "----------------------------\n"); 10649 10650 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 10651 /* Skip the big blank spaces */ 10652 if (i < 0x854000 && i > 0x9ffff) 10653 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10654 "0x%08X 0x%08X\n", i, 10655 REG_RD_IND(sc, i), 10656 REG_RD_IND(sc, i + 0x4), 10657 REG_RD_IND(sc, i + 0x8), 10658 REG_RD_IND(sc, i + 0xC)); 10659 } 10660 } 10661 10662 BCE_PRINTF( 10663 "----------------------------" 10664 "----------------" 10665 "----------------------------\n"); 10666 } 10667 10668 10669 /****************************************************************************/ 10670 /* Prints out the Command Procesor (CP) state. */ 10671 /* */ 10672 /* Returns: */ 10673 /* Nothing. */ 10674 /****************************************************************************/ 10675 static __attribute__ ((noinline)) void 10676 bce_dump_cp_state(struct bce_softc *sc, int regs) 10677 { 10678 u32 val; 10679 u32 fw_version[3]; 10680 10681 BCE_PRINTF( 10682 "----------------------------" 10683 " CP State " 10684 "----------------------------\n"); 10685 10686 for (int i = 0; i < 3; i++) 10687 fw_version[i] = htonl(REG_RD_IND(sc, 10688 (BCE_CP_SCRATCH + 0x10 + i * 4))); 10689 10690 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10691 10692 val = REG_RD_IND(sc, BCE_CP_CPU_MODE); 10693 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", 10694 val, BCE_CP_CPU_MODE); 10695 10696 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10697 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10698 val, BCE_CP_CPU_STATE); 10699 10700 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); 10701 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, 10702 BCE_CP_CPU_EVENT_MASK); 10703 10704 if (regs) { 10705 BCE_PRINTF( 10706 "----------------------------" 10707 " Register Dump " 10708 "----------------------------\n"); 10709 10710 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { 10711 /* Skip the big blank spaces */ 10712 if (i < 0x185400 && i > 0x19ffff) 10713 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10714 "0x%08X 0x%08X\n", i, 10715 REG_RD_IND(sc, i), 10716 REG_RD_IND(sc, i + 0x4), 10717 REG_RD_IND(sc, i + 0x8), 10718 REG_RD_IND(sc, i + 0xC)); 10719 } 10720 } 10721 10722 BCE_PRINTF( 10723 "----------------------------" 10724 "----------------" 10725 "----------------------------\n"); 10726 } 10727 10728 10729 /****************************************************************************/ 10730 /* Prints out the Completion Procesor (COM) state. */ 10731 /* */ 10732 /* Returns: */ 10733 /* Nothing. */ 10734 /****************************************************************************/ 10735 static __attribute__ ((noinline)) void 10736 bce_dump_com_state(struct bce_softc *sc, int regs) 10737 { 10738 u32 val; 10739 u32 fw_version[4]; 10740 10741 BCE_PRINTF( 10742 "----------------------------" 10743 " COM State " 10744 "----------------------------\n"); 10745 10746 for (int i = 0; i < 3; i++) 10747 fw_version[i] = htonl(REG_RD_IND(sc, 10748 (BCE_COM_SCRATCH + 0x10 + i * 4))); 10749 10750 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10751 10752 val = REG_RD_IND(sc, BCE_COM_CPU_MODE); 10753 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", 10754 val, BCE_COM_CPU_MODE); 10755 10756 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10757 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10758 val, BCE_COM_CPU_STATE); 10759 10760 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); 10761 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, 10762 BCE_COM_CPU_EVENT_MASK); 10763 10764 if (regs) { 10765 BCE_PRINTF( 10766 "----------------------------" 10767 " Register Dump " 10768 "----------------------------\n"); 10769 10770 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { 10771 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10772 "0x%08X 0x%08X\n", i, 10773 REG_RD_IND(sc, i), 10774 REG_RD_IND(sc, i + 0x4), 10775 REG_RD_IND(sc, i + 0x8), 10776 REG_RD_IND(sc, i + 0xC)); 10777 } 10778 } 10779 10780 BCE_PRINTF( 10781 "----------------------------" 10782 "----------------" 10783 "----------------------------\n"); 10784 } 10785 10786 10787 /****************************************************************************/ 10788 /* Prints out the Receive Virtual 2 Physical (RV2P) state. */ 10789 /* */ 10790 /* Returns: */ 10791 /* Nothing. */ 10792 /****************************************************************************/ 10793 static __attribute__ ((noinline)) void 10794 bce_dump_rv2p_state(struct bce_softc *sc) 10795 { 10796 u32 val, pc1, pc2, fw_ver_high, fw_ver_low; 10797 10798 BCE_PRINTF( 10799 "----------------------------" 10800 " RV2P State " 10801 "----------------------------\n"); 10802 10803 /* Stall the RV2P processors. */ 10804 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10805 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; 10806 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10807 10808 /* Read the firmware version. */ 10809 val = 0x00000001; 10810 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 10811 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10812 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10813 BCE_RV2P_INSTR_HIGH_HIGH; 10814 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", 10815 fw_ver_high, fw_ver_low); 10816 10817 val = 0x00000001; 10818 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 10819 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10820 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10821 BCE_RV2P_INSTR_HIGH_HIGH; 10822 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", 10823 fw_ver_high, fw_ver_low); 10824 10825 /* Resume the RV2P processors. */ 10826 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10827 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); 10828 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10829 10830 /* Fetch the program counter value. */ 10831 val = 0x68007800; 10832 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10833 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10834 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10835 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10836 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); 10837 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); 10838 10839 /* Fetch the program counter value again to see if it is advancing. */ 10840 val = 0x68007800; 10841 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10842 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10843 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10844 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10845 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); 10846 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); 10847 10848 BCE_PRINTF( 10849 "----------------------------" 10850 "----------------" 10851 "----------------------------\n"); 10852 } 10853 10854 10855 /****************************************************************************/ 10856 /* Prints out the driver state and then enters the debugger. */ 10857 /* */ 10858 /* Returns: */ 10859 /* Nothing. */ 10860 /****************************************************************************/ 10861 static __attribute__ ((noinline)) void 10862 bce_breakpoint(struct bce_softc *sc) 10863 { 10864 10865 /* 10866 * Unreachable code to silence compiler warnings 10867 * about unused functions. 10868 */ 10869 if (0) { 10870 bce_freeze_controller(sc); 10871 bce_unfreeze_controller(sc); 10872 bce_dump_enet(sc, NULL); 10873 bce_dump_txbd(sc, 0, NULL); 10874 bce_dump_rxbd(sc, 0, NULL); 10875 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 10876 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 10877 bce_dump_l2fhdr(sc, 0, NULL); 10878 bce_dump_ctx(sc, RX_CID); 10879 bce_dump_ftqs(sc); 10880 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 10881 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD); 10882 bce_dump_status_block(sc); 10883 bce_dump_stats_block(sc); 10884 bce_dump_driver_state(sc); 10885 bce_dump_hw_state(sc); 10886 bce_dump_bc_state(sc); 10887 bce_dump_txp_state(sc, 0); 10888 bce_dump_rxp_state(sc, 0); 10889 bce_dump_tpat_state(sc, 0); 10890 bce_dump_cp_state(sc, 0); 10891 bce_dump_com_state(sc, 0); 10892 bce_dump_rv2p_state(sc); 10893 10894 #ifdef BCE_JUMBO_HDRSPLIT 10895 bce_dump_pgbd(sc, 0, NULL); 10896 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD); 10897 bce_dump_pg_chain(sc, 0, USABLE_PG_BD); 10898 #endif 10899 } 10900 10901 bce_dump_status_block(sc); 10902 bce_dump_driver_state(sc); 10903 10904 /* Call the debugger. */ 10905 breakpoint(); 10906 10907 return; 10908 } 10909 #endif 10910 10911