1 /*- 2 * Copyright (c) 2006-2010 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * The following controllers are supported by this driver: 36 * BCM5706C A2, A3 37 * BCM5706S A2, A3 38 * BCM5708C B1, B2 39 * BCM5708S B1, B2 40 * BCM5709C A1, C0 41 * BCM5709S A1, C0 42 * BCM5716C C0 43 * BCM5716S C0 44 * 45 * The following controllers are not supported by this driver: 46 * BCM5706C A0, A1 (pre-production) 47 * BCM5706S A0, A1 (pre-production) 48 * BCM5708C A0, B0 (pre-production) 49 * BCM5708S A0, B0 (pre-production) 50 * BCM5709C A0 B0, B1, B2 (pre-production) 51 * BCM5709S A0, B0, B1, B2 (pre-production) 52 */ 53 54 #include "opt_bce.h" 55 56 #include <dev/bce/if_bcereg.h> 57 #include <dev/bce/if_bcefw.h> 58 59 /****************************************************************************/ 60 /* BCE Debug Options */ 61 /****************************************************************************/ 62 #ifdef BCE_DEBUG 63 u32 bce_debug = BCE_WARN; 64 65 /* 0 = Never */ 66 /* 1 = 1 in 2,147,483,648 */ 67 /* 256 = 1 in 8,388,608 */ 68 /* 2048 = 1 in 1,048,576 */ 69 /* 65536 = 1 in 32,768 */ 70 /* 1048576 = 1 in 2,048 */ 71 /* 268435456 = 1 in 8 */ 72 /* 536870912 = 1 in 4 */ 73 /* 1073741824 = 1 in 2 */ 74 75 /* Controls how often the l2_fhdr frame error check will fail. */ 76 int l2fhdr_error_sim_control = 0; 77 78 /* Controls how often the unexpected attention check will fail. */ 79 int unexpected_attention_sim_control = 0; 80 81 /* Controls how often to simulate an mbuf allocation failure. */ 82 int mbuf_alloc_failed_sim_control = 0; 83 84 /* Controls how often to simulate a DMA mapping failure. */ 85 int dma_map_addr_failed_sim_control = 0; 86 87 /* Controls how often to simulate a bootcode failure. */ 88 int bootcode_running_failure_sim_control = 0; 89 #endif 90 91 /****************************************************************************/ 92 /* BCE Build Time Options */ 93 /****************************************************************************/ 94 /* #define BCE_NVRAM_WRITE_SUPPORT 1 */ 95 96 97 /****************************************************************************/ 98 /* PCI Device ID Table */ 99 /* */ 100 /* Used by bce_probe() to identify the devices supported by this driver. */ 101 /****************************************************************************/ 102 #define BCE_DEVDESC_MAX 64 103 104 static struct bce_type bce_devs[] = { 105 /* BCM5706C Controllers and OEM boards. */ 106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 107 "HP NC370T Multifunction Gigabit Server Adapter" }, 108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 109 "HP NC370i Multifunction Gigabit Server Adapter" }, 110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 113 "HP NC371i Multifunction Gigabit Server Adapter" }, 114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 115 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 116 117 /* BCM5706S controllers and OEM boards. */ 118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 119 "HP NC370F Multifunction Gigabit Server Adapter" }, 120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 121 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 122 123 /* BCM5708C controllers and OEM boards. */ 124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 125 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 127 "HP NC373i Multifunction Gigabit Server Adapter" }, 128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 129 "HP NC374m PCIe Multifunction Adapter" }, 130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 131 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 132 133 /* BCM5708S controllers and OEM boards. */ 134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 135 "HP NC373m Multifunction Gigabit Server Adapter" }, 136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 137 "HP NC373i Multifunction Gigabit Server Adapter" }, 138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 139 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 141 "Broadcom NetXtreme II BCM5708 1000Base-SX" }, 142 143 /* BCM5709C controllers and OEM boards. */ 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 145 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 149 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 150 151 /* BCM5709S controllers and OEM boards. */ 152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 155 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 157 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 158 159 /* BCM5716 controllers and OEM boards. */ 160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 161 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 162 163 { 0, 0, 0, 0, NULL } 164 }; 165 166 167 /****************************************************************************/ 168 /* Supported Flash NVRAM device data. */ 169 /****************************************************************************/ 170 static struct flash_spec flash_table[] = 171 { 172 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 173 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 174 175 /* Slow EEPROM */ 176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 179 "EEPROM - slow"}, 180 /* Expansion entry 0001 */ 181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 184 "Entry 0001"}, 185 /* Saifun SA25F010 (non-buffered flash) */ 186 /* strap, cfg1, & write1 need updates */ 187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 190 "Non-buffered flash (128kB)"}, 191 /* Saifun SA25F020 (non-buffered flash) */ 192 /* strap, cfg1, & write1 need updates */ 193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 196 "Non-buffered flash (256kB)"}, 197 /* Expansion entry 0100 */ 198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 201 "Entry 0100"}, 202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 212 /* Saifun SA25F005 (non-buffered flash) */ 213 /* strap, cfg1, & write1 need updates */ 214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 217 "Non-buffered flash (64kB)"}, 218 /* Fast EEPROM */ 219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 222 "EEPROM - fast"}, 223 /* Expansion entry 1001 */ 224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 227 "Entry 1001"}, 228 /* Expansion entry 1010 */ 229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 232 "Entry 1010"}, 233 /* ATMEL AT45DB011B (buffered flash) */ 234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 237 "Buffered flash (128kB)"}, 238 /* Expansion entry 1100 */ 239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 242 "Entry 1100"}, 243 /* Expansion entry 1101 */ 244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 247 "Entry 1101"}, 248 /* Ateml Expansion entry 1110 */ 249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 252 "Entry 1110 (Atmel)"}, 253 /* ATMEL AT45DB021B (buffered flash) */ 254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 257 "Buffered flash (256kB)"}, 258 }; 259 260 /* 261 * The BCM5709 controllers transparently handle the 262 * differences between Atmel 264 byte pages and all 263 * flash devices which use 256 byte pages, so no 264 * logical-to-physical mapping is required in the 265 * driver. 266 */ 267 static struct flash_spec flash_5709 = { 268 .flags = BCE_NV_BUFFERED, 269 .page_bits = BCM5709_FLASH_PAGE_BITS, 270 .page_size = BCM5709_FLASH_PAGE_SIZE, 271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 273 .name = "5709/5716 buffered flash (256kB)", 274 }; 275 276 277 /****************************************************************************/ 278 /* FreeBSD device entry points. */ 279 /****************************************************************************/ 280 static int bce_probe (device_t); 281 static int bce_attach (device_t); 282 static int bce_detach (device_t); 283 static int bce_shutdown (device_t); 284 285 286 /****************************************************************************/ 287 /* BCE Debug Data Structure Dump Routines */ 288 /****************************************************************************/ 289 #ifdef BCE_DEBUG 290 static u32 bce_reg_rd (struct bce_softc *, u32); 291 static void bce_reg_wr (struct bce_softc *, u32, u32); 292 static void bce_reg_wr16 (struct bce_softc *, u32, u16); 293 static u32 bce_ctx_rd (struct bce_softc *, u32, u32); 294 static void bce_dump_enet (struct bce_softc *, struct mbuf *); 295 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); 296 static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); 297 static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); 298 #ifdef BCE_JUMBO_HDRSPLIT 299 static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); 300 #endif 301 static void bce_dump_txbd (struct bce_softc *, 302 int, struct tx_bd *); 303 static void bce_dump_rxbd (struct bce_softc *, 304 int, struct rx_bd *); 305 #ifdef BCE_JUMBO_HDRSPLIT 306 static void bce_dump_pgbd (struct bce_softc *, 307 int, struct rx_bd *); 308 #endif 309 static void bce_dump_l2fhdr (struct bce_softc *, 310 int, struct l2_fhdr *); 311 static void bce_dump_ctx (struct bce_softc *, u16); 312 static void bce_dump_ftqs (struct bce_softc *); 313 static void bce_dump_tx_chain (struct bce_softc *, u16, int); 314 static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); 315 #ifdef BCE_JUMBO_HDRSPLIT 316 static void bce_dump_pg_chain (struct bce_softc *, u16, int); 317 #endif 318 static void bce_dump_status_block (struct bce_softc *); 319 static void bce_dump_stats_block (struct bce_softc *); 320 static void bce_dump_driver_state (struct bce_softc *); 321 static void bce_dump_hw_state (struct bce_softc *); 322 static void bce_dump_mq_regs (struct bce_softc *); 323 static void bce_dump_bc_state (struct bce_softc *); 324 static void bce_dump_txp_state (struct bce_softc *, int); 325 static void bce_dump_rxp_state (struct bce_softc *, int); 326 static void bce_dump_tpat_state (struct bce_softc *, int); 327 static void bce_dump_cp_state (struct bce_softc *, int); 328 static void bce_dump_com_state (struct bce_softc *, int); 329 static void bce_dump_rv2p_state (struct bce_softc *); 330 static void bce_breakpoint (struct bce_softc *); 331 #endif 332 333 334 /****************************************************************************/ 335 /* BCE Register/Memory Access Routines */ 336 /****************************************************************************/ 337 static u32 bce_reg_rd_ind (struct bce_softc *, u32); 338 static void bce_reg_wr_ind (struct bce_softc *, u32, u32); 339 static void bce_shmem_wr (struct bce_softc *, u32, u32); 340 static u32 bce_shmem_rd (struct bce_softc *, u32); 341 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); 342 static int bce_miibus_read_reg (device_t, int, int); 343 static int bce_miibus_write_reg (device_t, int, int, int); 344 static void bce_miibus_statchg (device_t); 345 346 347 /****************************************************************************/ 348 /* BCE NVRAM Access Routines */ 349 /****************************************************************************/ 350 static int bce_acquire_nvram_lock (struct bce_softc *); 351 static int bce_release_nvram_lock (struct bce_softc *); 352 static void bce_enable_nvram_access (struct bce_softc *); 353 static void bce_disable_nvram_access (struct bce_softc *); 354 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); 355 static int bce_init_nvram (struct bce_softc *); 356 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); 357 static int bce_nvram_test (struct bce_softc *); 358 #ifdef BCE_NVRAM_WRITE_SUPPORT 359 static int bce_enable_nvram_write (struct bce_softc *); 360 static void bce_disable_nvram_write (struct bce_softc *); 361 static int bce_nvram_erase_page (struct bce_softc *, u32); 362 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); 363 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); 364 #endif 365 366 /****************************************************************************/ 367 /* */ 368 /****************************************************************************/ 369 static void bce_get_media (struct bce_softc *); 370 static void bce_init_media (struct bce_softc *); 371 static void bce_dma_map_addr (void *, 372 bus_dma_segment_t *, int, int); 373 static int bce_dma_alloc (device_t); 374 static void bce_dma_free (struct bce_softc *); 375 static void bce_release_resources (struct bce_softc *); 376 377 /****************************************************************************/ 378 /* BCE Firmware Synchronization and Load */ 379 /****************************************************************************/ 380 static int bce_fw_sync (struct bce_softc *, u32); 381 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32); 382 static void bce_load_cpu_fw (struct bce_softc *, 383 struct cpu_reg *, struct fw_info *); 384 static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); 385 static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); 386 static void bce_start_rxp_cpu (struct bce_softc *); 387 static void bce_init_rxp_cpu (struct bce_softc *); 388 static void bce_init_txp_cpu (struct bce_softc *); 389 static void bce_init_tpat_cpu (struct bce_softc *); 390 static void bce_init_cp_cpu (struct bce_softc *); 391 static void bce_init_com_cpu (struct bce_softc *); 392 static void bce_init_cpus (struct bce_softc *); 393 394 static void bce_print_adapter_info (struct bce_softc *); 395 static void bce_probe_pci_caps (device_t, struct bce_softc *); 396 static void bce_stop (struct bce_softc *); 397 static int bce_reset (struct bce_softc *, u32); 398 static int bce_chipinit (struct bce_softc *); 399 static int bce_blockinit (struct bce_softc *); 400 401 static int bce_init_tx_chain (struct bce_softc *); 402 static void bce_free_tx_chain (struct bce_softc *); 403 404 static int bce_get_rx_buf (struct bce_softc *, 405 struct mbuf *, u16 *, u16 *, u32 *); 406 static int bce_init_rx_chain (struct bce_softc *); 407 static void bce_fill_rx_chain (struct bce_softc *); 408 static void bce_free_rx_chain (struct bce_softc *); 409 410 #ifdef BCE_JUMBO_HDRSPLIT 411 static int bce_get_pg_buf (struct bce_softc *, 412 struct mbuf *, u16 *, u16 *); 413 static int bce_init_pg_chain (struct bce_softc *); 414 static void bce_fill_pg_chain (struct bce_softc *); 415 static void bce_free_pg_chain (struct bce_softc *); 416 #endif 417 418 static struct mbuf *bce_tso_setup (struct bce_softc *, 419 struct mbuf **, u16 *); 420 static int bce_tx_encap (struct bce_softc *, struct mbuf **); 421 static void bce_start_locked (struct ifnet *); 422 static void bce_start (struct ifnet *); 423 static int bce_ioctl (struct ifnet *, u_long, caddr_t); 424 static void bce_watchdog (struct bce_softc *); 425 static int bce_ifmedia_upd (struct ifnet *); 426 static int bce_ifmedia_upd_locked (struct ifnet *); 427 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); 428 static void bce_init_locked (struct bce_softc *); 429 static void bce_init (void *); 430 static void bce_mgmt_init_locked (struct bce_softc *sc); 431 432 static int bce_init_ctx (struct bce_softc *); 433 static void bce_get_mac_addr (struct bce_softc *); 434 static void bce_set_mac_addr (struct bce_softc *); 435 static void bce_phy_intr (struct bce_softc *); 436 static inline u16 bce_get_hw_rx_cons (struct bce_softc *); 437 static void bce_rx_intr (struct bce_softc *); 438 static void bce_tx_intr (struct bce_softc *); 439 static void bce_disable_intr (struct bce_softc *); 440 static void bce_enable_intr (struct bce_softc *, int); 441 442 static void bce_intr (void *); 443 static void bce_set_rx_mode (struct bce_softc *); 444 static void bce_stats_update (struct bce_softc *); 445 static void bce_tick (void *); 446 static void bce_pulse (void *); 447 static void bce_add_sysctls (struct bce_softc *); 448 449 450 /****************************************************************************/ 451 /* FreeBSD device dispatch table. */ 452 /****************************************************************************/ 453 static device_method_t bce_methods[] = { 454 /* Device interface (device_if.h) */ 455 DEVMETHOD(device_probe, bce_probe), 456 DEVMETHOD(device_attach, bce_attach), 457 DEVMETHOD(device_detach, bce_detach), 458 DEVMETHOD(device_shutdown, bce_shutdown), 459 /* Supported by device interface but not used here. */ 460 /* DEVMETHOD(device_identify, bce_identify), */ 461 /* DEVMETHOD(device_suspend, bce_suspend), */ 462 /* DEVMETHOD(device_resume, bce_resume), */ 463 /* DEVMETHOD(device_quiesce, bce_quiesce), */ 464 465 /* Bus interface (bus_if.h) */ 466 DEVMETHOD(bus_print_child, bus_generic_print_child), 467 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 468 469 /* MII interface (miibus_if.h) */ 470 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 471 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 472 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 473 /* Supported by MII interface but not used here. */ 474 /* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ 475 /* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ 476 477 { 0, 0 } 478 }; 479 480 static driver_t bce_driver = { 481 "bce", 482 bce_methods, 483 sizeof(struct bce_softc) 484 }; 485 486 static devclass_t bce_devclass; 487 488 MODULE_DEPEND(bce, pci, 1, 1, 1); 489 MODULE_DEPEND(bce, ether, 1, 1, 1); 490 MODULE_DEPEND(bce, miibus, 1, 1, 1); 491 492 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0); 493 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0); 494 495 496 /****************************************************************************/ 497 /* Tunable device values */ 498 /****************************************************************************/ 499 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); 500 501 /* Allowable values are TRUE or FALSE */ 502 static int bce_tso_enable = TRUE; 503 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable); 504 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, 505 "TSO Enable/Disable"); 506 507 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 508 /* ToDo: Add MSI-X support. */ 509 static int bce_msi_enable = 1; 510 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable); 511 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, 512 "MSI-X|MSI|INTx selector"); 513 514 /* ToDo: Add tunable to enable/disable strict MTU handling. */ 515 /* Currently allows "loose" RX MTU checking (i.e. sets the */ 516 /* H/W RX MTU to the size of the largest receive buffer, or */ 517 /* 2048 bytes). This will cause a UNH failure but is more */ 518 /* desireable from a functional perspective. */ 519 520 521 /****************************************************************************/ 522 /* Device probe function. */ 523 /* */ 524 /* Compares the device to the driver's list of supported devices and */ 525 /* reports back to the OS whether this is the right driver for the device. */ 526 /* */ 527 /* Returns: */ 528 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 529 /****************************************************************************/ 530 static int 531 bce_probe(device_t dev) 532 { 533 struct bce_type *t; 534 struct bce_softc *sc; 535 char *descbuf; 536 u16 vid = 0, did = 0, svid = 0, sdid = 0; 537 538 t = bce_devs; 539 540 sc = device_get_softc(dev); 541 bzero(sc, sizeof(struct bce_softc)); 542 sc->bce_unit = device_get_unit(dev); 543 sc->bce_dev = dev; 544 545 /* Get the data for the device to be probed. */ 546 vid = pci_get_vendor(dev); 547 did = pci_get_device(dev); 548 svid = pci_get_subvendor(dev); 549 sdid = pci_get_subdevice(dev); 550 551 DBPRINT(sc, BCE_EXTREME_LOAD, 552 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 553 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 554 555 /* Look through the list of known devices for a match. */ 556 while(t->bce_name != NULL) { 557 558 if ((vid == t->bce_vid) && (did == t->bce_did) && 559 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && 560 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { 561 562 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 563 564 if (descbuf == NULL) 565 return(ENOMEM); 566 567 /* Print out the device identity. */ 568 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 569 t->bce_name, (((pci_read_config(dev, 570 PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), 571 (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); 572 573 device_set_desc_copy(dev, descbuf); 574 free(descbuf, M_TEMP); 575 return(BUS_PROBE_DEFAULT); 576 } 577 t++; 578 } 579 580 return(ENXIO); 581 } 582 583 584 /****************************************************************************/ 585 /* PCI Capabilities Probe Function. */ 586 /* */ 587 /* Walks the PCI capabiites list for the device to find what features are */ 588 /* supported. */ 589 /* */ 590 /* Returns: */ 591 /* None. */ 592 /****************************************************************************/ 593 static void 594 bce_print_adapter_info(struct bce_softc *sc) 595 { 596 int i = 0; 597 598 DBENTER(BCE_VERBOSE_LOAD); 599 600 if (bootverbose) { 601 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); 602 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 603 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 604 605 606 /* Bus info. */ 607 if (sc->bce_flags & BCE_PCIE_FLAG) { 608 printf("Bus (PCIe x%d, ", sc->link_width); 609 switch (sc->link_speed) { 610 case 1: printf("2.5Gbps); "); break; 611 case 2: printf("5Gbps); "); break; 612 default: printf("Unknown link speed); "); 613 } 614 } else { 615 printf("Bus (PCI%s, %s, %dMHz); ", 616 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 617 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? 618 "32-bit" : "64-bit"), sc->bus_speed_mhz); 619 } 620 621 /* Firmware version and device features. */ 622 printf("B/C (%s); Flags (", sc->bce_bc_ver); 623 624 #ifdef BCE_JUMBO_HDRSPLIT 625 printf("SPLT"); 626 i++; 627 #endif 628 629 if (sc->bce_flags & BCE_USING_MSI_FLAG) { 630 if (i > 0) printf("|"); 631 printf("MSI"); i++; 632 } 633 634 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 635 if (i > 0) printf("|"); 636 printf("MSI-X"); i++; 637 } 638 639 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { 640 if (i > 0) printf("|"); 641 printf("2.5G"); i++; 642 } 643 644 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 645 if (i > 0) printf("|"); 646 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); 647 } else { 648 printf(")\n"); 649 } 650 } 651 652 DBEXIT(BCE_VERBOSE_LOAD); 653 } 654 655 656 /****************************************************************************/ 657 /* PCI Capabilities Probe Function. */ 658 /* */ 659 /* Walks the PCI capabiites list for the device to find what features are */ 660 /* supported. */ 661 /* */ 662 /* Returns: */ 663 /* None. */ 664 /****************************************************************************/ 665 static void 666 bce_probe_pci_caps(device_t dev, struct bce_softc *sc) 667 { 668 u32 reg; 669 670 DBENTER(BCE_VERBOSE_LOAD); 671 672 /* Check if PCI-X capability is enabled. */ 673 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 674 if (reg != 0) 675 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 676 } 677 678 /* Check if PCIe capability is enabled. */ 679 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 680 if (reg != 0) { 681 u16 link_status = pci_read_config(dev, reg + 0x12, 2); 682 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " 683 "0x%08X\n", link_status); 684 sc->link_speed = link_status & 0xf; 685 sc->link_width = (link_status >> 4) & 0x3f; 686 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 687 sc->bce_flags |= BCE_PCIE_FLAG; 688 } 689 } 690 691 /* Check if MSI capability is enabled. */ 692 if (pci_find_extcap(dev, PCIY_MSI, ®) == 0) { 693 if (reg != 0) 694 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; 695 } 696 697 /* Check if MSI-X capability is enabled. */ 698 if (pci_find_extcap(dev, PCIY_MSIX, ®) == 0) { 699 if (reg != 0) 700 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; 701 } 702 703 DBEXIT(BCE_VERBOSE_LOAD); 704 } 705 706 707 /****************************************************************************/ 708 /* Device attach function. */ 709 /* */ 710 /* Allocates device resources, performs secondary chip identification, */ 711 /* resets and initializes the hardware, and initializes driver instance */ 712 /* variables. */ 713 /* */ 714 /* Returns: */ 715 /* 0 on success, positive value on failure. */ 716 /****************************************************************************/ 717 static int 718 bce_attach(device_t dev) 719 { 720 struct bce_softc *sc; 721 struct ifnet *ifp; 722 u32 val; 723 int error, rid, rc = 0; 724 725 sc = device_get_softc(dev); 726 sc->bce_dev = dev; 727 728 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 729 730 sc->bce_unit = device_get_unit(dev); 731 732 /* Set initial device and PHY flags */ 733 sc->bce_flags = 0; 734 sc->bce_phy_flags = 0; 735 736 pci_enable_busmaster(dev); 737 738 /* Allocate PCI memory resources. */ 739 rid = PCIR_BAR(0); 740 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 741 &rid, RF_ACTIVE); 742 743 if (sc->bce_res_mem == NULL) { 744 BCE_PRINTF("%s(%d): PCI memory allocation failed\n", 745 __FILE__, __LINE__); 746 rc = ENXIO; 747 goto bce_attach_fail; 748 } 749 750 /* Get various resource handles. */ 751 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 752 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 753 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); 754 755 bce_probe_pci_caps(dev, sc); 756 757 rid = 1; 758 #if 0 759 /* Try allocating MSI-X interrupts. */ 760 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && 761 (bce_msi_enable >= 2) && 762 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 763 &rid, RF_ACTIVE)) != NULL)) { 764 765 msi_needed = sc->bce_msi_count = 1; 766 767 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) || 768 (sc->bce_msi_count != msi_needed)) { 769 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," 770 "Received = %d, error = %d\n", __FILE__, __LINE__, 771 msi_needed, sc->bce_msi_count, error); 772 sc->bce_msi_count = 0; 773 pci_release_msi(dev); 774 bus_release_resource(dev, SYS_RES_MEMORY, rid, 775 sc->bce_res_irq); 776 sc->bce_res_irq = NULL; 777 } else { 778 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", 779 __FUNCTION__); 780 sc->bce_flags |= BCE_USING_MSIX_FLAG; 781 sc->bce_intr = bce_intr; 782 } 783 } 784 #endif 785 786 /* Try allocating a MSI interrupt. */ 787 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && 788 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) { 789 sc->bce_msi_count = 1; 790 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) { 791 BCE_PRINTF("%s(%d): MSI allocation failed! " 792 "error = %d\n", __FILE__, __LINE__, error); 793 sc->bce_msi_count = 0; 794 pci_release_msi(dev); 795 } else { 796 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " 797 "interrupt.\n", __FUNCTION__); 798 sc->bce_flags |= BCE_USING_MSI_FLAG; 799 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 800 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 801 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; 802 sc->bce_irq_rid = 1; 803 sc->bce_intr = bce_intr; 804 } 805 } 806 807 /* Try allocating a legacy interrupt. */ 808 if (sc->bce_msi_count == 0) { 809 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", 810 __FUNCTION__); 811 rid = 0; 812 sc->bce_intr = bce_intr; 813 } 814 815 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 816 &rid, RF_SHAREABLE | RF_ACTIVE); 817 818 sc->bce_irq_rid = rid; 819 820 /* Report any IRQ allocation errors. */ 821 if (sc->bce_res_irq == NULL) { 822 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", 823 __FILE__, __LINE__); 824 rc = ENXIO; 825 goto bce_attach_fail; 826 } 827 828 /* Initialize mutex for the current device instance. */ 829 BCE_LOCK_INIT(sc, device_get_nameunit(dev)); 830 831 /* 832 * Configure byte swap and enable indirect register access. 833 * Rely on CPU to do target byte swapping on big endian systems. 834 * Access to registers outside of PCI configurtion space are not 835 * valid until this is done. 836 */ 837 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 838 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 839 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 840 841 /* Save ASIC revsion info. */ 842 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 843 844 /* Weed out any non-production controller revisions. */ 845 switch(BCE_CHIP_ID(sc)) { 846 case BCE_CHIP_ID_5706_A0: 847 case BCE_CHIP_ID_5706_A1: 848 case BCE_CHIP_ID_5708_A0: 849 case BCE_CHIP_ID_5708_B0: 850 case BCE_CHIP_ID_5709_A0: 851 case BCE_CHIP_ID_5709_B0: 852 case BCE_CHIP_ID_5709_B1: 853 case BCE_CHIP_ID_5709_B2: 854 BCE_PRINTF("%s(%d): Unsupported controller " 855 "revision (%c%d)!\n", __FILE__, __LINE__, 856 (((pci_read_config(dev, PCIR_REVID, 4) & 857 0xf0) >> 4) + 'A'), (pci_read_config(dev, 858 PCIR_REVID, 4) & 0xf)); 859 rc = ENODEV; 860 goto bce_attach_fail; 861 } 862 863 /* 864 * The embedded PCIe to PCI-X bridge (EPB) 865 * in the 5708 cannot address memory above 866 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 867 */ 868 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 869 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 870 else 871 sc->max_bus_addr = BUS_SPACE_MAXADDR; 872 873 /* 874 * Find the base address for shared memory access. 875 * Newer versions of bootcode use a signature and offset 876 * while older versions use a fixed address. 877 */ 878 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 879 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 880 /* Multi-port devices use different offsets in shared memory. */ 881 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + 882 (pci_get_function(sc->bce_dev) << 2)); 883 else 884 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 885 886 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", 887 __FUNCTION__, sc->bce_shmem_base); 888 889 /* Fetch the bootcode revision. */ 890 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 891 for (int i = 0, j = 0; i < 3; i++) { 892 u8 num; 893 894 num = (u8) (val >> (24 - (i * 8))); 895 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 896 if (num >= k || !skip0 || k == 1) { 897 sc->bce_bc_ver[j++] = (num / k) + '0'; 898 skip0 = 0; 899 } 900 } 901 902 if (i != 2) 903 sc->bce_bc_ver[j++] = '.'; 904 } 905 906 /* Check if any management firwmare is enabled. */ 907 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 908 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 909 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 910 911 /* Allow time for firmware to enter the running state. */ 912 for (int i = 0; i < 30; i++) { 913 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 914 if (val & BCE_CONDITION_MFW_RUN_MASK) 915 break; 916 DELAY(10000); 917 } 918 919 /* Check if management firmware is running. */ 920 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 921 val &= BCE_CONDITION_MFW_RUN_MASK; 922 if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && 923 (val != BCE_CONDITION_MFW_RUN_NONE)) { 924 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 925 int i = 0; 926 927 /* Read the management firmware version string. */ 928 for (int j = 0; j < 3; j++) { 929 val = bce_reg_rd_ind(sc, addr + j * 4); 930 val = bswap32(val); 931 memcpy(&sc->bce_mfw_ver[i], &val, 4); 932 i += 4; 933 } 934 } else { 935 /* May cause firmware synchronization timeouts. */ 936 BCE_PRINTF("%s(%d): Management firmware enabled " 937 "but not running!\n", __FILE__, __LINE__); 938 strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); 939 940 /* ToDo: Any action the driver should take? */ 941 } 942 } 943 944 /* Get PCI bus information (speed and type). */ 945 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 946 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 947 u32 clkreg; 948 949 sc->bce_flags |= BCE_PCIX_FLAG; 950 951 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); 952 953 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 954 switch (clkreg) { 955 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 956 sc->bus_speed_mhz = 133; 957 break; 958 959 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 960 sc->bus_speed_mhz = 100; 961 break; 962 963 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 964 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 965 sc->bus_speed_mhz = 66; 966 break; 967 968 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 969 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 970 sc->bus_speed_mhz = 50; 971 break; 972 973 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 974 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 975 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 976 sc->bus_speed_mhz = 33; 977 break; 978 } 979 } else { 980 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 981 sc->bus_speed_mhz = 66; 982 else 983 sc->bus_speed_mhz = 33; 984 } 985 986 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 987 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 988 989 /* Reset controller and announce to bootcode that driver is present. */ 990 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 991 BCE_PRINTF("%s(%d): Controller reset failed!\n", 992 __FILE__, __LINE__); 993 rc = ENXIO; 994 goto bce_attach_fail; 995 } 996 997 /* Initialize the controller. */ 998 if (bce_chipinit(sc)) { 999 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 1000 __FILE__, __LINE__); 1001 rc = ENXIO; 1002 goto bce_attach_fail; 1003 } 1004 1005 /* Perform NVRAM test. */ 1006 if (bce_nvram_test(sc)) { 1007 BCE_PRINTF("%s(%d): NVRAM test failed!\n", 1008 __FILE__, __LINE__); 1009 rc = ENXIO; 1010 goto bce_attach_fail; 1011 } 1012 1013 /* Fetch the permanent Ethernet MAC address. */ 1014 bce_get_mac_addr(sc); 1015 1016 /* 1017 * Trip points control how many BDs 1018 * should be ready before generating an 1019 * interrupt while ticks control how long 1020 * a BD can sit in the chain before 1021 * generating an interrupt. Set the default 1022 * values for the RX and TX chains. 1023 */ 1024 1025 #ifdef BCE_DEBUG 1026 /* Force more frequent interrupts. */ 1027 sc->bce_tx_quick_cons_trip_int = 1; 1028 sc->bce_tx_quick_cons_trip = 1; 1029 sc->bce_tx_ticks_int = 0; 1030 sc->bce_tx_ticks = 0; 1031 1032 sc->bce_rx_quick_cons_trip_int = 1; 1033 sc->bce_rx_quick_cons_trip = 1; 1034 sc->bce_rx_ticks_int = 0; 1035 sc->bce_rx_ticks = 0; 1036 #else 1037 /* Improve throughput at the expense of increased latency. */ 1038 sc->bce_tx_quick_cons_trip_int = 20; 1039 sc->bce_tx_quick_cons_trip = 20; 1040 sc->bce_tx_ticks_int = 80; 1041 sc->bce_tx_ticks = 80; 1042 1043 sc->bce_rx_quick_cons_trip_int = 6; 1044 sc->bce_rx_quick_cons_trip = 6; 1045 sc->bce_rx_ticks_int = 18; 1046 sc->bce_rx_ticks = 18; 1047 #endif 1048 1049 /* Not used for L2. */ 1050 sc->bce_comp_prod_trip_int = 0; 1051 sc->bce_comp_prod_trip = 0; 1052 sc->bce_com_ticks_int = 0; 1053 sc->bce_com_ticks = 0; 1054 sc->bce_cmd_ticks_int = 0; 1055 sc->bce_cmd_ticks = 0; 1056 1057 /* Update statistics once every second. */ 1058 sc->bce_stats_ticks = 1000000 & 0xffff00; 1059 1060 /* Find the media type for the adapter. */ 1061 bce_get_media(sc); 1062 1063 /* Store data needed by PHY driver for backplane applications */ 1064 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1065 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); 1066 1067 /* Allocate DMA memory resources. */ 1068 if (bce_dma_alloc(dev)) { 1069 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", 1070 __FILE__, __LINE__); 1071 rc = ENXIO; 1072 goto bce_attach_fail; 1073 } 1074 1075 /* Allocate an ifnet structure. */ 1076 ifp = sc->bce_ifp = if_alloc(IFT_ETHER); 1077 if (ifp == NULL) { 1078 BCE_PRINTF("%s(%d): Interface allocation failed!\n", 1079 __FILE__, __LINE__); 1080 rc = ENXIO; 1081 goto bce_attach_fail; 1082 } 1083 1084 /* Initialize the ifnet interface. */ 1085 ifp->if_softc = sc; 1086 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1087 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1088 ifp->if_ioctl = bce_ioctl; 1089 ifp->if_start = bce_start; 1090 ifp->if_init = bce_init; 1091 ifp->if_mtu = ETHERMTU; 1092 1093 if (bce_tso_enable) { 1094 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; 1095 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 | 1096 IFCAP_VLAN_HWTSO; 1097 } else { 1098 ifp->if_hwassist = BCE_IF_HWASSIST; 1099 ifp->if_capabilities = BCE_IF_CAPABILITIES; 1100 } 1101 1102 ifp->if_capenable = ifp->if_capabilities; 1103 1104 /* 1105 * Assume standard mbuf sizes for buffer allocation. 1106 * This may change later if the MTU size is set to 1107 * something other than 1500. 1108 */ 1109 #ifdef BCE_JUMBO_HDRSPLIT 1110 sc->rx_bd_mbuf_alloc_size = MHLEN; 1111 /* Make sure offset is 16 byte aligned for hardware. */ 1112 sc->rx_bd_mbuf_align_pad = 1113 roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN); 1114 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1115 sc->rx_bd_mbuf_align_pad; 1116 sc->pg_bd_mbuf_alloc_size = MCLBYTES; 1117 #else 1118 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 1119 sc->rx_bd_mbuf_align_pad = 1120 roundup2(MCLBYTES, 16) - MCLBYTES; 1121 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - 1122 sc->rx_bd_mbuf_align_pad; 1123 #endif 1124 1125 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD; 1126 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1127 IFQ_SET_READY(&ifp->if_snd); 1128 1129 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 1130 ifp->if_baudrate = IF_Mbps(2500ULL); 1131 else 1132 ifp->if_baudrate = IF_Mbps(1000); 1133 1134 /* Handle any special PHY initialization for SerDes PHYs. */ 1135 bce_init_media(sc); 1136 1137 /* MII child bus by probing the PHY. */ 1138 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd, 1139 bce_ifmedia_sts)) { 1140 BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n", 1141 __FILE__, __LINE__); 1142 rc = ENXIO; 1143 goto bce_attach_fail; 1144 } 1145 1146 /* Attach to the Ethernet interface list. */ 1147 ether_ifattach(ifp, sc->eaddr); 1148 1149 #if __FreeBSD_version < 500000 1150 callout_init(&sc->bce_tick_callout); 1151 callout_init(&sc->bce_pulse_callout); 1152 #else 1153 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); 1154 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); 1155 #endif 1156 1157 /* Hookup IRQ last. */ 1158 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, 1159 NULL, bce_intr, sc, &sc->bce_intrhand); 1160 1161 if (rc) { 1162 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", 1163 __FILE__, __LINE__); 1164 bce_detach(dev); 1165 goto bce_attach_exit; 1166 } 1167 1168 /* 1169 * At this point we've acquired all the resources 1170 * we need to run so there's no turning back, we're 1171 * cleared for launch. 1172 */ 1173 1174 /* Print some important debugging info. */ 1175 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); 1176 1177 /* Add the supported sysctls to the kernel. */ 1178 bce_add_sysctls(sc); 1179 1180 BCE_LOCK(sc); 1181 1182 /* 1183 * The chip reset earlier notified the bootcode that 1184 * a driver is present. We now need to start our pulse 1185 * routine so that the bootcode is reminded that we're 1186 * still running. 1187 */ 1188 bce_pulse(sc); 1189 1190 bce_mgmt_init_locked(sc); 1191 BCE_UNLOCK(sc); 1192 1193 /* Finally, print some useful adapter info */ 1194 bce_print_adapter_info(sc); 1195 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", 1196 __FUNCTION__, sc); 1197 1198 goto bce_attach_exit; 1199 1200 bce_attach_fail: 1201 bce_release_resources(sc); 1202 1203 bce_attach_exit: 1204 1205 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 1206 1207 return(rc); 1208 } 1209 1210 1211 /****************************************************************************/ 1212 /* Device detach function. */ 1213 /* */ 1214 /* Stops the controller, resets the controller, and releases resources. */ 1215 /* */ 1216 /* Returns: */ 1217 /* 0 on success, positive value on failure. */ 1218 /****************************************************************************/ 1219 static int 1220 bce_detach(device_t dev) 1221 { 1222 struct bce_softc *sc = device_get_softc(dev); 1223 struct ifnet *ifp; 1224 u32 msg; 1225 1226 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1227 1228 ifp = sc->bce_ifp; 1229 1230 /* Stop and reset the controller. */ 1231 BCE_LOCK(sc); 1232 1233 /* Stop the pulse so the bootcode can go to driver absent state. */ 1234 callout_stop(&sc->bce_pulse_callout); 1235 1236 bce_stop(sc); 1237 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1238 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1239 else 1240 msg = BCE_DRV_MSG_CODE_UNLOAD; 1241 bce_reset(sc, msg); 1242 1243 BCE_UNLOCK(sc); 1244 1245 ether_ifdetach(ifp); 1246 1247 /* If we have a child device on the MII bus remove it too. */ 1248 bus_generic_detach(dev); 1249 device_delete_child(dev, sc->bce_miibus); 1250 1251 /* Release all remaining resources. */ 1252 bce_release_resources(sc); 1253 1254 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); 1255 1256 return(0); 1257 } 1258 1259 1260 /****************************************************************************/ 1261 /* Device shutdown function. */ 1262 /* */ 1263 /* Stops and resets the controller. */ 1264 /* */ 1265 /* Returns: */ 1266 /* 0 on success, positive value on failure. */ 1267 /****************************************************************************/ 1268 static int 1269 bce_shutdown(device_t dev) 1270 { 1271 struct bce_softc *sc = device_get_softc(dev); 1272 u32 msg; 1273 1274 DBENTER(BCE_VERBOSE); 1275 1276 BCE_LOCK(sc); 1277 bce_stop(sc); 1278 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1279 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1280 else 1281 msg = BCE_DRV_MSG_CODE_UNLOAD; 1282 bce_reset(sc, msg); 1283 BCE_UNLOCK(sc); 1284 1285 DBEXIT(BCE_VERBOSE); 1286 1287 return (0); 1288 } 1289 1290 1291 #ifdef BCE_DEBUG 1292 /****************************************************************************/ 1293 /* Register read. */ 1294 /* */ 1295 /* Returns: */ 1296 /* The value of the register. */ 1297 /****************************************************************************/ 1298 static u32 1299 bce_reg_rd(struct bce_softc *sc, u32 offset) 1300 { 1301 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset); 1302 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1303 __FUNCTION__, offset, val); 1304 return val; 1305 } 1306 1307 1308 /****************************************************************************/ 1309 /* Register write (16 bit). */ 1310 /* */ 1311 /* Returns: */ 1312 /* Nothing. */ 1313 /****************************************************************************/ 1314 static void 1315 bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) 1316 { 1317 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", 1318 __FUNCTION__, offset, val); 1319 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val); 1320 } 1321 1322 1323 /****************************************************************************/ 1324 /* Register write. */ 1325 /* */ 1326 /* Returns: */ 1327 /* Nothing. */ 1328 /****************************************************************************/ 1329 static void 1330 bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) 1331 { 1332 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1333 __FUNCTION__, offset, val); 1334 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val); 1335 } 1336 #endif 1337 1338 /****************************************************************************/ 1339 /* Indirect register read. */ 1340 /* */ 1341 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1342 /* configuration space. Using this mechanism avoids issues with posted */ 1343 /* reads but is much slower than memory-mapped I/O. */ 1344 /* */ 1345 /* Returns: */ 1346 /* The value of the register. */ 1347 /****************************************************************************/ 1348 static u32 1349 bce_reg_rd_ind(struct bce_softc *sc, u32 offset) 1350 { 1351 device_t dev; 1352 dev = sc->bce_dev; 1353 1354 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1355 #ifdef BCE_DEBUG 1356 { 1357 u32 val; 1358 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1359 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1360 __FUNCTION__, offset, val); 1361 return val; 1362 } 1363 #else 1364 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1365 #endif 1366 } 1367 1368 1369 /****************************************************************************/ 1370 /* Indirect register write. */ 1371 /* */ 1372 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1373 /* configuration space. Using this mechanism avoids issues with posted */ 1374 /* writes but is muchh slower than memory-mapped I/O. */ 1375 /* */ 1376 /* Returns: */ 1377 /* Nothing. */ 1378 /****************************************************************************/ 1379 static void 1380 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) 1381 { 1382 device_t dev; 1383 dev = sc->bce_dev; 1384 1385 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", 1386 __FUNCTION__, offset, val); 1387 1388 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1389 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1390 } 1391 1392 1393 /****************************************************************************/ 1394 /* Shared memory write. */ 1395 /* */ 1396 /* Writes NetXtreme II shared memory region. */ 1397 /* */ 1398 /* Returns: */ 1399 /* Nothing. */ 1400 /****************************************************************************/ 1401 static void 1402 bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) 1403 { 1404 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " 1405 "0x%08X\n", __FUNCTION__, val, offset); 1406 1407 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1408 } 1409 1410 1411 /****************************************************************************/ 1412 /* Shared memory read. */ 1413 /* */ 1414 /* Reads NetXtreme II shared memory region. */ 1415 /* */ 1416 /* Returns: */ 1417 /* The 32 bit value read. */ 1418 /****************************************************************************/ 1419 static u32 1420 bce_shmem_rd(struct bce_softc *sc, u32 offset) 1421 { 1422 u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1423 1424 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " 1425 "0x%08X\n", __FUNCTION__, val, offset); 1426 1427 return val; 1428 } 1429 1430 1431 #ifdef BCE_DEBUG 1432 /****************************************************************************/ 1433 /* Context memory read. */ 1434 /* */ 1435 /* The NetXtreme II controller uses context memory to track connection */ 1436 /* information for L2 and higher network protocols. */ 1437 /* */ 1438 /* Returns: */ 1439 /* The requested 32 bit value of context memory. */ 1440 /****************************************************************************/ 1441 static u32 1442 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) 1443 { 1444 u32 idx, offset, retry_cnt = 5, val; 1445 1446 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || 1447 cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " 1448 "address: 0x%08X.\n", __FUNCTION__, cid_addr)); 1449 1450 offset = ctx_offset + cid_addr; 1451 1452 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1453 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1454 1455 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); 1456 1457 for (idx = 0; idx < retry_cnt; idx++) { 1458 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1459 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) 1460 break; 1461 DELAY(5); 1462 } 1463 1464 if (val & BCE_CTX_CTX_CTRL_READ_REQ) 1465 BCE_PRINTF("%s(%d); Unable to read CTX memory: " 1466 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1467 __FILE__, __LINE__, cid_addr, ctx_offset); 1468 1469 val = REG_RD(sc, BCE_CTX_CTX_DATA); 1470 } else { 1471 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1472 val = REG_RD(sc, BCE_CTX_DATA); 1473 } 1474 1475 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1476 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); 1477 1478 return(val); 1479 } 1480 #endif 1481 1482 1483 /****************************************************************************/ 1484 /* Context memory write. */ 1485 /* */ 1486 /* The NetXtreme II controller uses context memory to track connection */ 1487 /* information for L2 and higher network protocols. */ 1488 /* */ 1489 /* Returns: */ 1490 /* Nothing. */ 1491 /****************************************************************************/ 1492 static void 1493 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) 1494 { 1495 u32 idx, offset = ctx_offset + cid_addr; 1496 u32 val, retry_cnt = 5; 1497 1498 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 1499 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); 1500 1501 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), 1502 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", 1503 __FUNCTION__, cid_addr)); 1504 1505 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 1506 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 1507 1508 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1509 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1510 1511 for (idx = 0; idx < retry_cnt; idx++) { 1512 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1513 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1514 break; 1515 DELAY(5); 1516 } 1517 1518 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) 1519 BCE_PRINTF("%s(%d); Unable to write CTX memory: " 1520 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1521 __FILE__, __LINE__, cid_addr, ctx_offset); 1522 1523 } else { 1524 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1525 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1526 } 1527 } 1528 1529 1530 /****************************************************************************/ 1531 /* PHY register read. */ 1532 /* */ 1533 /* Implements register reads on the MII bus. */ 1534 /* */ 1535 /* Returns: */ 1536 /* The value of the register. */ 1537 /****************************************************************************/ 1538 static int 1539 bce_miibus_read_reg(device_t dev, int phy, int reg) 1540 { 1541 struct bce_softc *sc; 1542 u32 val; 1543 int i; 1544 1545 sc = device_get_softc(dev); 1546 1547 /* Make sure we are accessing the correct PHY address. */ 1548 if (phy != sc->bce_phy_addr) { 1549 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1550 "for PHY read!\n", phy); 1551 return(0); 1552 } 1553 1554 /* 1555 * The 5709S PHY is an IEEE Clause 45 PHY 1556 * with special mappings to work with IEEE 1557 * Clause 22 register accesses. 1558 */ 1559 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1560 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1561 reg += 0x10; 1562 } 1563 1564 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1565 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1566 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1567 1568 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1569 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1570 1571 DELAY(40); 1572 } 1573 1574 1575 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1576 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1577 BCE_EMAC_MDIO_COMM_START_BUSY; 1578 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1579 1580 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1581 DELAY(10); 1582 1583 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1584 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1585 DELAY(5); 1586 1587 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1588 val &= BCE_EMAC_MDIO_COMM_DATA; 1589 1590 break; 1591 } 1592 } 1593 1594 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1595 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " 1596 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1597 val = 0x0; 1598 } else { 1599 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1600 } 1601 1602 1603 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1604 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1605 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1606 1607 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1608 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1609 1610 DELAY(40); 1611 } 1612 1613 DB_PRINT_PHY_REG(reg, val); 1614 return (val & 0xffff); 1615 1616 } 1617 1618 1619 /****************************************************************************/ 1620 /* PHY register write. */ 1621 /* */ 1622 /* Implements register writes on the MII bus. */ 1623 /* */ 1624 /* Returns: */ 1625 /* The value of the register. */ 1626 /****************************************************************************/ 1627 static int 1628 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1629 { 1630 struct bce_softc *sc; 1631 u32 val1; 1632 int i; 1633 1634 sc = device_get_softc(dev); 1635 1636 /* Make sure we are accessing the correct PHY address. */ 1637 if (phy != sc->bce_phy_addr) { 1638 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d " 1639 "for PHY write!\n", phy); 1640 return(0); 1641 } 1642 1643 DB_PRINT_PHY_REG(reg, val); 1644 1645 /* 1646 * The 5709S PHY is an IEEE Clause 45 PHY 1647 * with special mappings to work with IEEE 1648 * Clause 22 register accesses. 1649 */ 1650 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1651 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1652 reg += 0x10; 1653 } 1654 1655 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1656 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1657 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1658 1659 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1660 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1661 1662 DELAY(40); 1663 } 1664 1665 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1666 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1667 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1668 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1669 1670 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1671 DELAY(10); 1672 1673 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1674 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1675 DELAY(5); 1676 break; 1677 } 1678 } 1679 1680 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1681 BCE_PRINTF("%s(%d): PHY write timeout!\n", 1682 __FILE__, __LINE__); 1683 1684 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1685 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1686 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1687 1688 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1689 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1690 1691 DELAY(40); 1692 } 1693 1694 return 0; 1695 } 1696 1697 1698 /****************************************************************************/ 1699 /* MII bus status change. */ 1700 /* */ 1701 /* Called by the MII bus driver when the PHY establishes link to set the */ 1702 /* MAC interface registers. */ 1703 /* */ 1704 /* Returns: */ 1705 /* Nothing. */ 1706 /****************************************************************************/ 1707 static void 1708 bce_miibus_statchg(device_t dev) 1709 { 1710 struct bce_softc *sc; 1711 struct mii_data *mii; 1712 int val; 1713 1714 sc = device_get_softc(dev); 1715 1716 DBENTER(BCE_VERBOSE_PHY); 1717 1718 mii = device_get_softc(sc->bce_miibus); 1719 1720 val = REG_RD(sc, BCE_EMAC_MODE); 1721 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | 1722 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | 1723 BCE_EMAC_MODE_25G); 1724 1725 /* Set MII or GMII interface based on the PHY speed. */ 1726 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1727 case IFM_10_T: 1728 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1729 DBPRINT(sc, BCE_INFO_PHY, 1730 "Enabling 10Mb interface.\n"); 1731 val |= BCE_EMAC_MODE_PORT_MII_10; 1732 break; 1733 } 1734 /* fall-through */ 1735 case IFM_100_TX: 1736 DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); 1737 val |= BCE_EMAC_MODE_PORT_MII; 1738 break; 1739 case IFM_2500_SX: 1740 DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); 1741 val |= BCE_EMAC_MODE_25G; 1742 /* fall-through */ 1743 case IFM_1000_T: 1744 case IFM_1000_SX: 1745 DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); 1746 val |= BCE_EMAC_MODE_PORT_GMII; 1747 break; 1748 default: 1749 DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " 1750 "default GMII interface.\n"); 1751 val |= BCE_EMAC_MODE_PORT_GMII; 1752 } 1753 1754 /* Set half or full duplex based on PHY settings. */ 1755 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1756 DBPRINT(sc, BCE_INFO_PHY, 1757 "Setting Half-Duplex interface.\n"); 1758 val |= BCE_EMAC_MODE_HALF_DUPLEX; 1759 } else 1760 DBPRINT(sc, BCE_INFO_PHY, 1761 "Setting Full-Duplex interface.\n"); 1762 1763 REG_WR(sc, BCE_EMAC_MODE, val); 1764 1765 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */ 1766 if (mii->mii_media_active & IFM_FLAG0) { 1767 DBPRINT(sc, BCE_INFO_PHY, 1768 "%s(): Enabling RX flow control.\n", __FUNCTION__); 1769 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1770 } else { 1771 DBPRINT(sc, BCE_INFO_PHY, 1772 "%s(): Disabling RX flow control.\n", __FUNCTION__); 1773 BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); 1774 } 1775 1776 if (mii->mii_media_active & IFM_FLAG1) { 1777 DBPRINT(sc, BCE_INFO_PHY, 1778 "%s(): Enabling TX flow control.\n", __FUNCTION__); 1779 BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1780 sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; 1781 } else { 1782 DBPRINT(sc, BCE_INFO_PHY, 1783 "%s(): Disabling TX flow control.\n", __FUNCTION__); 1784 BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); 1785 sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; 1786 } 1787 1788 /* ToDo: Update watermarks in bce_init_rx_context(). */ 1789 1790 DBEXIT(BCE_VERBOSE_PHY); 1791 } 1792 1793 1794 /****************************************************************************/ 1795 /* Acquire NVRAM lock. */ 1796 /* */ 1797 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1798 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1799 /* for use by the driver. */ 1800 /* */ 1801 /* Returns: */ 1802 /* 0 on success, positive value on failure. */ 1803 /****************************************************************************/ 1804 static int 1805 bce_acquire_nvram_lock(struct bce_softc *sc) 1806 { 1807 u32 val; 1808 int j, rc = 0; 1809 1810 DBENTER(BCE_VERBOSE_NVRAM); 1811 1812 /* Request access to the flash interface. */ 1813 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1814 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1815 val = REG_RD(sc, BCE_NVM_SW_ARB); 1816 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1817 break; 1818 1819 DELAY(5); 1820 } 1821 1822 if (j >= NVRAM_TIMEOUT_COUNT) { 1823 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1824 rc = EBUSY; 1825 } 1826 1827 DBEXIT(BCE_VERBOSE_NVRAM); 1828 return (rc); 1829 } 1830 1831 1832 /****************************************************************************/ 1833 /* Release NVRAM lock. */ 1834 /* */ 1835 /* When the caller is finished accessing NVRAM the lock must be released. */ 1836 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1837 /* for use by the driver. */ 1838 /* */ 1839 /* Returns: */ 1840 /* 0 on success, positive value on failure. */ 1841 /****************************************************************************/ 1842 static int 1843 bce_release_nvram_lock(struct bce_softc *sc) 1844 { 1845 u32 val; 1846 int j, rc = 0; 1847 1848 DBENTER(BCE_VERBOSE_NVRAM); 1849 1850 /* 1851 * Relinquish nvram interface. 1852 */ 1853 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1854 1855 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1856 val = REG_RD(sc, BCE_NVM_SW_ARB); 1857 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1858 break; 1859 1860 DELAY(5); 1861 } 1862 1863 if (j >= NVRAM_TIMEOUT_COUNT) { 1864 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); 1865 rc = EBUSY; 1866 } 1867 1868 DBEXIT(BCE_VERBOSE_NVRAM); 1869 return (rc); 1870 } 1871 1872 1873 #ifdef BCE_NVRAM_WRITE_SUPPORT 1874 /****************************************************************************/ 1875 /* Enable NVRAM write access. */ 1876 /* */ 1877 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1878 /* */ 1879 /* Returns: */ 1880 /* 0 on success, positive value on failure. */ 1881 /****************************************************************************/ 1882 static int 1883 bce_enable_nvram_write(struct bce_softc *sc) 1884 { 1885 u32 val; 1886 int rc = 0; 1887 1888 DBENTER(BCE_VERBOSE_NVRAM); 1889 1890 val = REG_RD(sc, BCE_MISC_CFG); 1891 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 1892 1893 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 1894 int j; 1895 1896 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1897 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 1898 1899 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1900 DELAY(5); 1901 1902 val = REG_RD(sc, BCE_NVM_COMMAND); 1903 if (val & BCE_NVM_COMMAND_DONE) 1904 break; 1905 } 1906 1907 if (j >= NVRAM_TIMEOUT_COUNT) { 1908 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 1909 rc = EBUSY; 1910 } 1911 } 1912 1913 DBENTER(BCE_VERBOSE_NVRAM); 1914 return (rc); 1915 } 1916 1917 1918 /****************************************************************************/ 1919 /* Disable NVRAM write access. */ 1920 /* */ 1921 /* When the caller is finished writing to NVRAM write access must be */ 1922 /* disabled. */ 1923 /* */ 1924 /* Returns: */ 1925 /* Nothing. */ 1926 /****************************************************************************/ 1927 static void 1928 bce_disable_nvram_write(struct bce_softc *sc) 1929 { 1930 u32 val; 1931 1932 DBENTER(BCE_VERBOSE_NVRAM); 1933 1934 val = REG_RD(sc, BCE_MISC_CFG); 1935 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 1936 1937 DBEXIT(BCE_VERBOSE_NVRAM); 1938 1939 } 1940 #endif 1941 1942 1943 /****************************************************************************/ 1944 /* Enable NVRAM access. */ 1945 /* */ 1946 /* Before accessing NVRAM for read or write operations the caller must */ 1947 /* enabled NVRAM access. */ 1948 /* */ 1949 /* Returns: */ 1950 /* Nothing. */ 1951 /****************************************************************************/ 1952 static void 1953 bce_enable_nvram_access(struct bce_softc *sc) 1954 { 1955 u32 val; 1956 1957 DBENTER(BCE_VERBOSE_NVRAM); 1958 1959 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1960 /* Enable both bits, even on read. */ 1961 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | 1962 BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1963 1964 DBEXIT(BCE_VERBOSE_NVRAM); 1965 } 1966 1967 1968 /****************************************************************************/ 1969 /* Disable NVRAM access. */ 1970 /* */ 1971 /* When the caller is finished accessing NVRAM access must be disabled. */ 1972 /* */ 1973 /* Returns: */ 1974 /* Nothing. */ 1975 /****************************************************************************/ 1976 static void 1977 bce_disable_nvram_access(struct bce_softc *sc) 1978 { 1979 u32 val; 1980 1981 DBENTER(BCE_VERBOSE_NVRAM); 1982 1983 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1984 1985 /* Disable both bits, even after read. */ 1986 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & 1987 ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1988 1989 DBEXIT(BCE_VERBOSE_NVRAM); 1990 } 1991 1992 1993 #ifdef BCE_NVRAM_WRITE_SUPPORT 1994 /****************************************************************************/ 1995 /* Erase NVRAM page before writing. */ 1996 /* */ 1997 /* Non-buffered flash parts require that a page be erased before it is */ 1998 /* written. */ 1999 /* */ 2000 /* Returns: */ 2001 /* 0 on success, positive value on failure. */ 2002 /****************************************************************************/ 2003 static int 2004 bce_nvram_erase_page(struct bce_softc *sc, u32 offset) 2005 { 2006 u32 cmd; 2007 int j, rc = 0; 2008 2009 DBENTER(BCE_VERBOSE_NVRAM); 2010 2011 /* Buffered flash doesn't require an erase. */ 2012 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) 2013 goto bce_nvram_erase_page_exit; 2014 2015 /* Build an erase command. */ 2016 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 2017 BCE_NVM_COMMAND_DOIT; 2018 2019 /* 2020 * Clear the DONE bit separately, set the NVRAM adress to erase, 2021 * and issue the erase command. 2022 */ 2023 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2024 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2025 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2026 2027 /* Wait for completion. */ 2028 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2029 u32 val; 2030 2031 DELAY(5); 2032 2033 val = REG_RD(sc, BCE_NVM_COMMAND); 2034 if (val & BCE_NVM_COMMAND_DONE) 2035 break; 2036 } 2037 2038 if (j >= NVRAM_TIMEOUT_COUNT) { 2039 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 2040 rc = EBUSY; 2041 } 2042 2043 bce_nvram_erase_page_exit: 2044 DBEXIT(BCE_VERBOSE_NVRAM); 2045 return (rc); 2046 } 2047 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2048 2049 2050 /****************************************************************************/ 2051 /* Read a dword (32 bits) from NVRAM. */ 2052 /* */ 2053 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 2054 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 2055 /* */ 2056 /* Returns: */ 2057 /* 0 on success and the 32 bit value read, positive value on failure. */ 2058 /****************************************************************************/ 2059 static int 2060 bce_nvram_read_dword(struct bce_softc *sc, 2061 u32 offset, u8 *ret_val, u32 cmd_flags) 2062 { 2063 u32 cmd; 2064 int i, rc = 0; 2065 2066 DBENTER(BCE_EXTREME_NVRAM); 2067 2068 /* Build the command word. */ 2069 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 2070 2071 /* Calculate the offset for buffered flash if translation is used. */ 2072 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2073 offset = ((offset / sc->bce_flash_info->page_size) << 2074 sc->bce_flash_info->page_bits) + 2075 (offset % sc->bce_flash_info->page_size); 2076 } 2077 2078 /* 2079 * Clear the DONE bit separately, set the address to read, 2080 * and issue the read. 2081 */ 2082 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2083 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2084 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2085 2086 /* Wait for completion. */ 2087 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 2088 u32 val; 2089 2090 DELAY(5); 2091 2092 val = REG_RD(sc, BCE_NVM_COMMAND); 2093 if (val & BCE_NVM_COMMAND_DONE) { 2094 val = REG_RD(sc, BCE_NVM_READ); 2095 2096 val = bce_be32toh(val); 2097 memcpy(ret_val, &val, 4); 2098 break; 2099 } 2100 } 2101 2102 /* Check for errors. */ 2103 if (i >= NVRAM_TIMEOUT_COUNT) { 2104 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " 2105 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 2106 rc = EBUSY; 2107 } 2108 2109 DBEXIT(BCE_EXTREME_NVRAM); 2110 return(rc); 2111 } 2112 2113 2114 #ifdef BCE_NVRAM_WRITE_SUPPORT 2115 /****************************************************************************/ 2116 /* Write a dword (32 bits) to NVRAM. */ 2117 /* */ 2118 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 2119 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 2120 /* enabled NVRAM write access. */ 2121 /* */ 2122 /* Returns: */ 2123 /* 0 on success, positive value on failure. */ 2124 /****************************************************************************/ 2125 static int 2126 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, 2127 u32 cmd_flags) 2128 { 2129 u32 cmd, val32; 2130 int j, rc = 0; 2131 2132 DBENTER(BCE_VERBOSE_NVRAM); 2133 2134 /* Build the command word. */ 2135 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 2136 2137 /* Calculate the offset for buffered flash if translation is used. */ 2138 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 2139 offset = ((offset / sc->bce_flash_info->page_size) << 2140 sc->bce_flash_info->page_bits) + 2141 (offset % sc->bce_flash_info->page_size); 2142 } 2143 2144 /* 2145 * Clear the DONE bit separately, convert NVRAM data to big-endian, 2146 * set the NVRAM address to write, and issue the write command 2147 */ 2148 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 2149 memcpy(&val32, val, 4); 2150 val32 = htobe32(val32); 2151 REG_WR(sc, BCE_NVM_WRITE, val32); 2152 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 2153 REG_WR(sc, BCE_NVM_COMMAND, cmd); 2154 2155 /* Wait for completion. */ 2156 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 2157 DELAY(5); 2158 2159 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 2160 break; 2161 } 2162 if (j >= NVRAM_TIMEOUT_COUNT) { 2163 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " 2164 "offset 0x%08X\n", __FILE__, __LINE__, offset); 2165 rc = EBUSY; 2166 } 2167 2168 DBEXIT(BCE_VERBOSE_NVRAM); 2169 return (rc); 2170 } 2171 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2172 2173 2174 /****************************************************************************/ 2175 /* Initialize NVRAM access. */ 2176 /* */ 2177 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 2178 /* access that device. */ 2179 /* */ 2180 /* Returns: */ 2181 /* 0 on success, positive value on failure. */ 2182 /****************************************************************************/ 2183 static int 2184 bce_init_nvram(struct bce_softc *sc) 2185 { 2186 u32 val; 2187 int j, entry_count, rc = 0; 2188 struct flash_spec *flash; 2189 2190 DBENTER(BCE_VERBOSE_NVRAM); 2191 2192 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2193 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2194 sc->bce_flash_info = &flash_5709; 2195 goto bce_init_nvram_get_flash_size; 2196 } 2197 2198 /* Determine the selected interface. */ 2199 val = REG_RD(sc, BCE_NVM_CFG1); 2200 2201 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 2202 2203 /* 2204 * Flash reconfiguration is required to support additional 2205 * NVRAM devices not directly supported in hardware. 2206 * Check if the flash interface was reconfigured 2207 * by the bootcode. 2208 */ 2209 2210 if (val & 0x40000000) { 2211 /* Flash interface reconfigured by bootcode. */ 2212 2213 DBPRINT(sc,BCE_INFO_LOAD, 2214 "bce_init_nvram(): Flash WAS reconfigured.\n"); 2215 2216 for (j = 0, flash = &flash_table[0]; j < entry_count; 2217 j++, flash++) { 2218 if ((val & FLASH_BACKUP_STRAP_MASK) == 2219 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 2220 sc->bce_flash_info = flash; 2221 break; 2222 } 2223 } 2224 } else { 2225 /* Flash interface not yet reconfigured. */ 2226 u32 mask; 2227 2228 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", 2229 __FUNCTION__); 2230 2231 if (val & (1 << 23)) 2232 mask = FLASH_BACKUP_STRAP_MASK; 2233 else 2234 mask = FLASH_STRAP_MASK; 2235 2236 /* Look for the matching NVRAM device configuration data. */ 2237 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { 2238 2239 /* Check if the device matches any of the known devices. */ 2240 if ((val & mask) == (flash->strapping & mask)) { 2241 /* Found a device match. */ 2242 sc->bce_flash_info = flash; 2243 2244 /* Request access to the flash interface. */ 2245 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2246 return rc; 2247 2248 /* Reconfigure the flash interface. */ 2249 bce_enable_nvram_access(sc); 2250 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 2251 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 2252 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 2253 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 2254 bce_disable_nvram_access(sc); 2255 bce_release_nvram_lock(sc); 2256 2257 break; 2258 } 2259 } 2260 } 2261 2262 /* Check if a matching device was found. */ 2263 if (j == entry_count) { 2264 sc->bce_flash_info = NULL; 2265 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", 2266 __FILE__, __LINE__); 2267 DBEXIT(BCE_VERBOSE_NVRAM); 2268 return (ENODEV); 2269 } 2270 2271 bce_init_nvram_get_flash_size: 2272 /* Write the flash config data to the shared memory interface. */ 2273 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); 2274 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 2275 if (val) 2276 sc->bce_flash_size = val; 2277 else 2278 sc->bce_flash_size = sc->bce_flash_info->total_size; 2279 2280 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", 2281 __FUNCTION__, sc->bce_flash_info->name, 2282 sc->bce_flash_info->total_size); 2283 2284 DBEXIT(BCE_VERBOSE_NVRAM); 2285 return rc; 2286 } 2287 2288 2289 /****************************************************************************/ 2290 /* Read an arbitrary range of data from NVRAM. */ 2291 /* */ 2292 /* Prepares the NVRAM interface for access and reads the requested data */ 2293 /* into the supplied buffer. */ 2294 /* */ 2295 /* Returns: */ 2296 /* 0 on success and the data read, positive value on failure. */ 2297 /****************************************************************************/ 2298 static int 2299 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, 2300 int buf_size) 2301 { 2302 int rc = 0; 2303 u32 cmd_flags, offset32, len32, extra; 2304 2305 DBENTER(BCE_VERBOSE_NVRAM); 2306 2307 if (buf_size == 0) 2308 goto bce_nvram_read_exit; 2309 2310 /* Request access to the flash interface. */ 2311 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2312 goto bce_nvram_read_exit; 2313 2314 /* Enable access to flash interface */ 2315 bce_enable_nvram_access(sc); 2316 2317 len32 = buf_size; 2318 offset32 = offset; 2319 extra = 0; 2320 2321 cmd_flags = 0; 2322 2323 if (offset32 & 3) { 2324 u8 buf[4]; 2325 u32 pre_len; 2326 2327 offset32 &= ~3; 2328 pre_len = 4 - (offset & 3); 2329 2330 if (pre_len >= len32) { 2331 pre_len = len32; 2332 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 2333 } 2334 else { 2335 cmd_flags = BCE_NVM_COMMAND_FIRST; 2336 } 2337 2338 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2339 2340 if (rc) 2341 return rc; 2342 2343 memcpy(ret_buf, buf + (offset & 3), pre_len); 2344 2345 offset32 += 4; 2346 ret_buf += pre_len; 2347 len32 -= pre_len; 2348 } 2349 2350 if (len32 & 3) { 2351 extra = 4 - (len32 & 3); 2352 len32 = (len32 + 4) & ~3; 2353 } 2354 2355 if (len32 == 4) { 2356 u8 buf[4]; 2357 2358 if (cmd_flags) 2359 cmd_flags = BCE_NVM_COMMAND_LAST; 2360 else 2361 cmd_flags = BCE_NVM_COMMAND_FIRST | 2362 BCE_NVM_COMMAND_LAST; 2363 2364 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2365 2366 memcpy(ret_buf, buf, 4 - extra); 2367 } 2368 else if (len32 > 0) { 2369 u8 buf[4]; 2370 2371 /* Read the first word. */ 2372 if (cmd_flags) 2373 cmd_flags = 0; 2374 else 2375 cmd_flags = BCE_NVM_COMMAND_FIRST; 2376 2377 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 2378 2379 /* Advance to the next dword. */ 2380 offset32 += 4; 2381 ret_buf += 4; 2382 len32 -= 4; 2383 2384 while (len32 > 4 && rc == 0) { 2385 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 2386 2387 /* Advance to the next dword. */ 2388 offset32 += 4; 2389 ret_buf += 4; 2390 len32 -= 4; 2391 } 2392 2393 if (rc) 2394 goto bce_nvram_read_locked_exit; 2395 2396 cmd_flags = BCE_NVM_COMMAND_LAST; 2397 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 2398 2399 memcpy(ret_buf, buf, 4 - extra); 2400 } 2401 2402 bce_nvram_read_locked_exit: 2403 /* Disable access to flash interface and release the lock. */ 2404 bce_disable_nvram_access(sc); 2405 bce_release_nvram_lock(sc); 2406 2407 bce_nvram_read_exit: 2408 DBEXIT(BCE_VERBOSE_NVRAM); 2409 return rc; 2410 } 2411 2412 2413 #ifdef BCE_NVRAM_WRITE_SUPPORT 2414 /****************************************************************************/ 2415 /* Write an arbitrary range of data from NVRAM. */ 2416 /* */ 2417 /* Prepares the NVRAM interface for write access and writes the requested */ 2418 /* data from the supplied buffer. The caller is responsible for */ 2419 /* calculating any appropriate CRCs. */ 2420 /* */ 2421 /* Returns: */ 2422 /* 0 on success, positive value on failure. */ 2423 /****************************************************************************/ 2424 static int 2425 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, 2426 int buf_size) 2427 { 2428 u32 written, offset32, len32; 2429 u8 *buf, start[4], end[4]; 2430 int rc = 0; 2431 int align_start, align_end; 2432 2433 DBENTER(BCE_VERBOSE_NVRAM); 2434 2435 buf = data_buf; 2436 offset32 = offset; 2437 len32 = buf_size; 2438 align_start = align_end = 0; 2439 2440 if ((align_start = (offset32 & 3))) { 2441 offset32 &= ~3; 2442 len32 += align_start; 2443 if ((rc = bce_nvram_read(sc, offset32, start, 4))) 2444 goto bce_nvram_write_exit; 2445 } 2446 2447 if (len32 & 3) { 2448 if ((len32 > 4) || !align_start) { 2449 align_end = 4 - (len32 & 3); 2450 len32 += align_end; 2451 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, 2452 end, 4))) { 2453 goto bce_nvram_write_exit; 2454 } 2455 } 2456 } 2457 2458 if (align_start || align_end) { 2459 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 2460 if (buf == 0) { 2461 rc = ENOMEM; 2462 goto bce_nvram_write_exit; 2463 } 2464 2465 if (align_start) { 2466 memcpy(buf, start, 4); 2467 } 2468 2469 if (align_end) { 2470 memcpy(buf + len32 - 4, end, 4); 2471 } 2472 memcpy(buf + align_start, data_buf, buf_size); 2473 } 2474 2475 written = 0; 2476 while ((written < len32) && (rc == 0)) { 2477 u32 page_start, page_end, data_start, data_end; 2478 u32 addr, cmd_flags; 2479 int i; 2480 u8 flash_buffer[264]; 2481 2482 /* Find the page_start addr */ 2483 page_start = offset32 + written; 2484 page_start -= (page_start % sc->bce_flash_info->page_size); 2485 /* Find the page_end addr */ 2486 page_end = page_start + sc->bce_flash_info->page_size; 2487 /* Find the data_start addr */ 2488 data_start = (written == 0) ? offset32 : page_start; 2489 /* Find the data_end addr */ 2490 data_end = (page_end > offset32 + len32) ? 2491 (offset32 + len32) : page_end; 2492 2493 /* Request access to the flash interface. */ 2494 if ((rc = bce_acquire_nvram_lock(sc)) != 0) 2495 goto bce_nvram_write_exit; 2496 2497 /* Enable access to flash interface */ 2498 bce_enable_nvram_access(sc); 2499 2500 cmd_flags = BCE_NVM_COMMAND_FIRST; 2501 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2502 int j; 2503 2504 /* Read the whole page into the buffer 2505 * (non-buffer flash only) */ 2506 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 2507 if (j == (sc->bce_flash_info->page_size - 4)) { 2508 cmd_flags |= BCE_NVM_COMMAND_LAST; 2509 } 2510 rc = bce_nvram_read_dword(sc, 2511 page_start + j, 2512 &flash_buffer[j], 2513 cmd_flags); 2514 2515 if (rc) 2516 goto bce_nvram_write_locked_exit; 2517 2518 cmd_flags = 0; 2519 } 2520 } 2521 2522 /* Enable writes to flash interface (unlock write-protect) */ 2523 if ((rc = bce_enable_nvram_write(sc)) != 0) 2524 goto bce_nvram_write_locked_exit; 2525 2526 /* Erase the page */ 2527 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) 2528 goto bce_nvram_write_locked_exit; 2529 2530 /* Re-enable the write again for the actual write */ 2531 bce_enable_nvram_write(sc); 2532 2533 /* Loop to write back the buffer data from page_start to 2534 * data_start */ 2535 i = 0; 2536 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2537 for (addr = page_start; addr < data_start; 2538 addr += 4, i += 4) { 2539 2540 rc = bce_nvram_write_dword(sc, addr, 2541 &flash_buffer[i], cmd_flags); 2542 2543 if (rc != 0) 2544 goto bce_nvram_write_locked_exit; 2545 2546 cmd_flags = 0; 2547 } 2548 } 2549 2550 /* Loop to write the new data from data_start to data_end */ 2551 for (addr = data_start; addr < data_end; addr += 4, i++) { 2552 if ((addr == page_end - 4) || 2553 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && 2554 (addr == data_end - 4))) { 2555 2556 cmd_flags |= BCE_NVM_COMMAND_LAST; 2557 } 2558 rc = bce_nvram_write_dword(sc, addr, buf, 2559 cmd_flags); 2560 2561 if (rc != 0) 2562 goto bce_nvram_write_locked_exit; 2563 2564 cmd_flags = 0; 2565 buf += 4; 2566 } 2567 2568 /* Loop to write back the buffer data from data_end 2569 * to page_end */ 2570 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { 2571 for (addr = data_end; addr < page_end; 2572 addr += 4, i += 4) { 2573 2574 if (addr == page_end-4) { 2575 cmd_flags = BCE_NVM_COMMAND_LAST; 2576 } 2577 rc = bce_nvram_write_dword(sc, addr, 2578 &flash_buffer[i], cmd_flags); 2579 2580 if (rc != 0) 2581 goto bce_nvram_write_locked_exit; 2582 2583 cmd_flags = 0; 2584 } 2585 } 2586 2587 /* Disable writes to flash interface (lock write-protect) */ 2588 bce_disable_nvram_write(sc); 2589 2590 /* Disable access to flash interface */ 2591 bce_disable_nvram_access(sc); 2592 bce_release_nvram_lock(sc); 2593 2594 /* Increment written */ 2595 written += data_end - data_start; 2596 } 2597 2598 goto bce_nvram_write_exit; 2599 2600 bce_nvram_write_locked_exit: 2601 bce_disable_nvram_write(sc); 2602 bce_disable_nvram_access(sc); 2603 bce_release_nvram_lock(sc); 2604 2605 bce_nvram_write_exit: 2606 if (align_start || align_end) 2607 free(buf, M_DEVBUF); 2608 2609 DBEXIT(BCE_VERBOSE_NVRAM); 2610 return (rc); 2611 } 2612 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 2613 2614 2615 /****************************************************************************/ 2616 /* Verifies that NVRAM is accessible and contains valid data. */ 2617 /* */ 2618 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2619 /* correct. */ 2620 /* */ 2621 /* Returns: */ 2622 /* 0 on success, positive value on failure. */ 2623 /****************************************************************************/ 2624 static int 2625 bce_nvram_test(struct bce_softc *sc) 2626 { 2627 u32 buf[BCE_NVRAM_SIZE / 4]; 2628 u8 *data = (u8 *) buf; 2629 int rc = 0; 2630 u32 magic, csum; 2631 2632 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2633 2634 /* 2635 * Check that the device NVRAM is valid by reading 2636 * the magic value at offset 0. 2637 */ 2638 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { 2639 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", 2640 __FILE__, __LINE__); 2641 goto bce_nvram_test_exit; 2642 } 2643 2644 /* 2645 * Verify that offset 0 of the NVRAM contains 2646 * a valid magic number. 2647 */ 2648 magic = bce_be32toh(buf[0]); 2649 if (magic != BCE_NVRAM_MAGIC) { 2650 rc = ENODEV; 2651 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " 2652 "Expected: 0x%08X, Found: 0x%08X\n", 2653 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); 2654 goto bce_nvram_test_exit; 2655 } 2656 2657 /* 2658 * Verify that the device NVRAM includes valid 2659 * configuration data. 2660 */ 2661 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { 2662 BCE_PRINTF("%s(%d): Unable to read manufacturing " 2663 "Information from NVRAM!\n", __FILE__, __LINE__); 2664 goto bce_nvram_test_exit; 2665 } 2666 2667 csum = ether_crc32_le(data, 0x100); 2668 if (csum != BCE_CRC32_RESIDUAL) { 2669 rc = ENODEV; 2670 BCE_PRINTF("%s(%d): Invalid manufacturing information " 2671 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2672 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); 2673 goto bce_nvram_test_exit; 2674 } 2675 2676 csum = ether_crc32_le(data + 0x100, 0x100); 2677 if (csum != BCE_CRC32_RESIDUAL) { 2678 rc = ENODEV; 2679 BCE_PRINTF("%s(%d): Invalid feature configuration " 2680 "information NVRAM CRC! Expected: 0x%08X, " 2681 "Found: 08%08X\n", __FILE__, __LINE__, 2682 BCE_CRC32_RESIDUAL, csum); 2683 } 2684 2685 bce_nvram_test_exit: 2686 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); 2687 return rc; 2688 } 2689 2690 2691 /****************************************************************************/ 2692 /* Identifies the current media type of the controller and sets the PHY */ 2693 /* address. */ 2694 /* */ 2695 /* Returns: */ 2696 /* Nothing. */ 2697 /****************************************************************************/ 2698 static void 2699 bce_get_media(struct bce_softc *sc) 2700 { 2701 u32 val; 2702 2703 DBENTER(BCE_VERBOSE_PHY); 2704 2705 /* Assume PHY address for copper controllers. */ 2706 sc->bce_phy_addr = 1; 2707 2708 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 2709 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 2710 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2711 u32 strap; 2712 2713 /* 2714 * The BCM5709S is software configurable 2715 * for Copper or SerDes operation. 2716 */ 2717 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2718 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2719 "for copper.\n"); 2720 goto bce_get_media_exit; 2721 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2722 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " 2723 "for dual media.\n"); 2724 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2725 goto bce_get_media_exit; 2726 } 2727 2728 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2729 strap = (val & 2730 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2731 else 2732 strap = (val & 2733 BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 2734 2735 if (pci_get_function(sc->bce_dev) == 0) { 2736 switch (strap) { 2737 case 0x4: 2738 case 0x5: 2739 case 0x6: 2740 DBPRINT(sc, BCE_INFO_LOAD, 2741 "BCM5709 s/w configured for SerDes.\n"); 2742 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2743 break; 2744 default: 2745 DBPRINT(sc, BCE_INFO_LOAD, 2746 "BCM5709 s/w configured for Copper.\n"); 2747 break; 2748 } 2749 } else { 2750 switch (strap) { 2751 case 0x1: 2752 case 0x2: 2753 case 0x4: 2754 DBPRINT(sc, BCE_INFO_LOAD, 2755 "BCM5709 s/w configured for SerDes.\n"); 2756 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2757 break; 2758 default: 2759 DBPRINT(sc, BCE_INFO_LOAD, 2760 "BCM5709 s/w configured for Copper.\n"); 2761 break; 2762 } 2763 } 2764 2765 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) 2766 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2767 2768 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2769 2770 sc->bce_flags |= BCE_NO_WOL_FLAG; 2771 2772 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) 2773 sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; 2774 2775 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2776 /* 5708S/09S/16S use a separate PHY for SerDes. */ 2777 sc->bce_phy_addr = 2; 2778 2779 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2780 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { 2781 sc->bce_phy_flags |= 2782 BCE_PHY_2_5G_CAPABLE_FLAG; 2783 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " 2784 "capable adapter\n"); 2785 } 2786 } 2787 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2788 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) 2789 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2790 2791 bce_get_media_exit: 2792 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), 2793 "Using PHY address %d.\n", sc->bce_phy_addr); 2794 2795 DBEXIT(BCE_VERBOSE_PHY); 2796 } 2797 2798 2799 /****************************************************************************/ 2800 /* Performs PHY initialization required before MII drivers access the */ 2801 /* device. */ 2802 /* */ 2803 /* Returns: */ 2804 /* Nothing. */ 2805 /****************************************************************************/ 2806 static void 2807 bce_init_media(struct bce_softc *sc) 2808 { 2809 if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 2810 /* 2811 * Configure 5709S/5716S PHYs to use traditional IEEE 2812 * Clause 22 method. Otherwise we have no way to attach 2813 * the PHY in mii(4) layer. PHY specific configuration 2814 * is done in mii layer. 2815 */ 2816 2817 /* Select auto-negotiation MMD of the PHY. */ 2818 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2819 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2820 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2821 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2822 2823 /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ 2824 bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, 2825 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2826 } 2827 } 2828 2829 2830 /****************************************************************************/ 2831 /* Free any DMA memory owned by the driver. */ 2832 /* */ 2833 /* Scans through each data structre that requires DMA memory and frees */ 2834 /* the memory if allocated. */ 2835 /* */ 2836 /* Returns: */ 2837 /* Nothing. */ 2838 /****************************************************************************/ 2839 static void 2840 bce_dma_free(struct bce_softc *sc) 2841 { 2842 int i; 2843 2844 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 2845 2846 /* Free, unmap, and destroy the status block. */ 2847 if (sc->status_block != NULL) { 2848 bus_dmamem_free( 2849 sc->status_tag, 2850 sc->status_block, 2851 sc->status_map); 2852 sc->status_block = NULL; 2853 } 2854 2855 if (sc->status_map != NULL) { 2856 bus_dmamap_unload( 2857 sc->status_tag, 2858 sc->status_map); 2859 bus_dmamap_destroy(sc->status_tag, 2860 sc->status_map); 2861 sc->status_map = NULL; 2862 } 2863 2864 if (sc->status_tag != NULL) { 2865 bus_dma_tag_destroy(sc->status_tag); 2866 sc->status_tag = NULL; 2867 } 2868 2869 2870 /* Free, unmap, and destroy the statistics block. */ 2871 if (sc->stats_block != NULL) { 2872 bus_dmamem_free( 2873 sc->stats_tag, 2874 sc->stats_block, 2875 sc->stats_map); 2876 sc->stats_block = NULL; 2877 } 2878 2879 if (sc->stats_map != NULL) { 2880 bus_dmamap_unload( 2881 sc->stats_tag, 2882 sc->stats_map); 2883 bus_dmamap_destroy(sc->stats_tag, 2884 sc->stats_map); 2885 sc->stats_map = NULL; 2886 } 2887 2888 if (sc->stats_tag != NULL) { 2889 bus_dma_tag_destroy(sc->stats_tag); 2890 sc->stats_tag = NULL; 2891 } 2892 2893 2894 /* Free, unmap and destroy all context memory pages. */ 2895 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 2896 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 2897 for (i = 0; i < sc->ctx_pages; i++ ) { 2898 if (sc->ctx_block[i] != NULL) { 2899 bus_dmamem_free( 2900 sc->ctx_tag, 2901 sc->ctx_block[i], 2902 sc->ctx_map[i]); 2903 sc->ctx_block[i] = NULL; 2904 } 2905 2906 if (sc->ctx_map[i] != NULL) { 2907 bus_dmamap_unload( 2908 sc->ctx_tag, 2909 sc->ctx_map[i]); 2910 bus_dmamap_destroy( 2911 sc->ctx_tag, 2912 sc->ctx_map[i]); 2913 sc->ctx_map[i] = NULL; 2914 } 2915 } 2916 2917 /* Destroy the context memory tag. */ 2918 if (sc->ctx_tag != NULL) { 2919 bus_dma_tag_destroy(sc->ctx_tag); 2920 sc->ctx_tag = NULL; 2921 } 2922 } 2923 2924 2925 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2926 for (i = 0; i < TX_PAGES; i++ ) { 2927 if (sc->tx_bd_chain[i] != NULL) { 2928 bus_dmamem_free( 2929 sc->tx_bd_chain_tag, 2930 sc->tx_bd_chain[i], 2931 sc->tx_bd_chain_map[i]); 2932 sc->tx_bd_chain[i] = NULL; 2933 } 2934 2935 if (sc->tx_bd_chain_map[i] != NULL) { 2936 bus_dmamap_unload( 2937 sc->tx_bd_chain_tag, 2938 sc->tx_bd_chain_map[i]); 2939 bus_dmamap_destroy( 2940 sc->tx_bd_chain_tag, 2941 sc->tx_bd_chain_map[i]); 2942 sc->tx_bd_chain_map[i] = NULL; 2943 } 2944 } 2945 2946 /* Destroy the TX buffer descriptor tag. */ 2947 if (sc->tx_bd_chain_tag != NULL) { 2948 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2949 sc->tx_bd_chain_tag = NULL; 2950 } 2951 2952 2953 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2954 for (i = 0; i < RX_PAGES; i++ ) { 2955 if (sc->rx_bd_chain[i] != NULL) { 2956 bus_dmamem_free( 2957 sc->rx_bd_chain_tag, 2958 sc->rx_bd_chain[i], 2959 sc->rx_bd_chain_map[i]); 2960 sc->rx_bd_chain[i] = NULL; 2961 } 2962 2963 if (sc->rx_bd_chain_map[i] != NULL) { 2964 bus_dmamap_unload( 2965 sc->rx_bd_chain_tag, 2966 sc->rx_bd_chain_map[i]); 2967 bus_dmamap_destroy( 2968 sc->rx_bd_chain_tag, 2969 sc->rx_bd_chain_map[i]); 2970 sc->rx_bd_chain_map[i] = NULL; 2971 } 2972 } 2973 2974 /* Destroy the RX buffer descriptor tag. */ 2975 if (sc->rx_bd_chain_tag != NULL) { 2976 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2977 sc->rx_bd_chain_tag = NULL; 2978 } 2979 2980 2981 #ifdef BCE_JUMBO_HDRSPLIT 2982 /* Free, unmap and destroy all page buffer descriptor chain pages. */ 2983 for (i = 0; i < PG_PAGES; i++ ) { 2984 if (sc->pg_bd_chain[i] != NULL) { 2985 bus_dmamem_free( 2986 sc->pg_bd_chain_tag, 2987 sc->pg_bd_chain[i], 2988 sc->pg_bd_chain_map[i]); 2989 sc->pg_bd_chain[i] = NULL; 2990 } 2991 2992 if (sc->pg_bd_chain_map[i] != NULL) { 2993 bus_dmamap_unload( 2994 sc->pg_bd_chain_tag, 2995 sc->pg_bd_chain_map[i]); 2996 bus_dmamap_destroy( 2997 sc->pg_bd_chain_tag, 2998 sc->pg_bd_chain_map[i]); 2999 sc->pg_bd_chain_map[i] = NULL; 3000 } 3001 } 3002 3003 /* Destroy the page buffer descriptor tag. */ 3004 if (sc->pg_bd_chain_tag != NULL) { 3005 bus_dma_tag_destroy(sc->pg_bd_chain_tag); 3006 sc->pg_bd_chain_tag = NULL; 3007 } 3008 #endif 3009 3010 3011 /* Unload and destroy the TX mbuf maps. */ 3012 for (i = 0; i < TOTAL_TX_BD; i++) { 3013 if (sc->tx_mbuf_map[i] != NULL) { 3014 bus_dmamap_unload(sc->tx_mbuf_tag, 3015 sc->tx_mbuf_map[i]); 3016 bus_dmamap_destroy(sc->tx_mbuf_tag, 3017 sc->tx_mbuf_map[i]); 3018 sc->tx_mbuf_map[i] = NULL; 3019 } 3020 } 3021 3022 /* Destroy the TX mbuf tag. */ 3023 if (sc->tx_mbuf_tag != NULL) { 3024 bus_dma_tag_destroy(sc->tx_mbuf_tag); 3025 sc->tx_mbuf_tag = NULL; 3026 } 3027 3028 /* Unload and destroy the RX mbuf maps. */ 3029 for (i = 0; i < TOTAL_RX_BD; i++) { 3030 if (sc->rx_mbuf_map[i] != NULL) { 3031 bus_dmamap_unload(sc->rx_mbuf_tag, 3032 sc->rx_mbuf_map[i]); 3033 bus_dmamap_destroy(sc->rx_mbuf_tag, 3034 sc->rx_mbuf_map[i]); 3035 sc->rx_mbuf_map[i] = NULL; 3036 } 3037 } 3038 3039 /* Destroy the RX mbuf tag. */ 3040 if (sc->rx_mbuf_tag != NULL) { 3041 bus_dma_tag_destroy(sc->rx_mbuf_tag); 3042 sc->rx_mbuf_tag = NULL; 3043 } 3044 3045 #ifdef BCE_JUMBO_HDRSPLIT 3046 /* Unload and destroy the page mbuf maps. */ 3047 for (i = 0; i < TOTAL_PG_BD; i++) { 3048 if (sc->pg_mbuf_map[i] != NULL) { 3049 bus_dmamap_unload(sc->pg_mbuf_tag, 3050 sc->pg_mbuf_map[i]); 3051 bus_dmamap_destroy(sc->pg_mbuf_tag, 3052 sc->pg_mbuf_map[i]); 3053 sc->pg_mbuf_map[i] = NULL; 3054 } 3055 } 3056 3057 /* Destroy the page mbuf tag. */ 3058 if (sc->pg_mbuf_tag != NULL) { 3059 bus_dma_tag_destroy(sc->pg_mbuf_tag); 3060 sc->pg_mbuf_tag = NULL; 3061 } 3062 #endif 3063 3064 /* Destroy the parent tag */ 3065 if (sc->parent_tag != NULL) { 3066 bus_dma_tag_destroy(sc->parent_tag); 3067 sc->parent_tag = NULL; 3068 } 3069 3070 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); 3071 } 3072 3073 3074 /****************************************************************************/ 3075 /* Get DMA memory from the OS. */ 3076 /* */ 3077 /* Validates that the OS has provided DMA buffers in response to a */ 3078 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 3079 /* When the callback is used the OS will return 0 for the mapping function */ 3080 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 3081 /* failures back to the caller. */ 3082 /* */ 3083 /* Returns: */ 3084 /* Nothing. */ 3085 /****************************************************************************/ 3086 static void 3087 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3088 { 3089 bus_addr_t *busaddr = arg; 3090 3091 KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", 3092 __FUNCTION__, nseg)); 3093 /* Simulate a mapping failure. */ 3094 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), 3095 error = ENOMEM); 3096 3097 /* ToDo: How to increment debug sim_count variable here? */ 3098 3099 /* Check for an error and signal the caller that an error occurred. */ 3100 if (error) { 3101 *busaddr = 0; 3102 } else { 3103 *busaddr = segs->ds_addr; 3104 } 3105 3106 return; 3107 } 3108 3109 3110 /****************************************************************************/ 3111 /* Allocate any DMA memory needed by the driver. */ 3112 /* */ 3113 /* Allocates DMA memory needed for the various global structures needed by */ 3114 /* hardware. */ 3115 /* */ 3116 /* Memory alignment requirements: */ 3117 /* +-----------------+----------+----------+----------+----------+ */ 3118 /* | | 5706 | 5708 | 5709 | 5716 | */ 3119 /* +-----------------+----------+----------+----------+----------+ */ 3120 /* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3121 /* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 3122 /* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 3123 /* |PG Buffers | none | none | none | none | */ 3124 /* |TX Buffers | none | none | none | none | */ 3125 /* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 3126 /* |Context Memory | | | | | */ 3127 /* +-----------------+----------+----------+----------+----------+ */ 3128 /* */ 3129 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 3130 /* */ 3131 /* Returns: */ 3132 /* 0 for success, positive value for failure. */ 3133 /****************************************************************************/ 3134 static int 3135 bce_dma_alloc(device_t dev) 3136 { 3137 struct bce_softc *sc; 3138 int i, error, rc = 0; 3139 bus_size_t max_size, max_seg_size; 3140 int max_segments; 3141 3142 sc = device_get_softc(dev); 3143 3144 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3145 3146 /* 3147 * Allocate the parent bus DMA tag appropriate for PCI. 3148 */ 3149 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, 3150 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3151 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 3152 &sc->parent_tag)) { 3153 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", 3154 __FILE__, __LINE__); 3155 rc = ENOMEM; 3156 goto bce_dma_alloc_exit; 3157 } 3158 3159 /* 3160 * Create a DMA tag for the status block, allocate and clear the 3161 * memory, map the memory into DMA space, and fetch the physical 3162 * address of the block. 3163 */ 3164 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3165 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3166 NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 3167 0, NULL, NULL, &sc->status_tag)) { 3168 BCE_PRINTF("%s(%d): Could not allocate status block " 3169 "DMA tag!\n", __FILE__, __LINE__); 3170 rc = ENOMEM; 3171 goto bce_dma_alloc_exit; 3172 } 3173 3174 if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, 3175 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3176 &sc->status_map)) { 3177 BCE_PRINTF("%s(%d): Could not allocate status block " 3178 "DMA memory!\n", __FILE__, __LINE__); 3179 rc = ENOMEM; 3180 goto bce_dma_alloc_exit; 3181 } 3182 3183 error = bus_dmamap_load(sc->status_tag, sc->status_map, 3184 sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, 3185 &sc->status_block_paddr, BUS_DMA_NOWAIT); 3186 3187 if (error) { 3188 BCE_PRINTF("%s(%d): Could not map status block " 3189 "DMA memory!\n", __FILE__, __LINE__); 3190 rc = ENOMEM; 3191 goto bce_dma_alloc_exit; 3192 } 3193 3194 DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", 3195 __FUNCTION__, (uintmax_t) sc->status_block_paddr); 3196 3197 /* 3198 * Create a DMA tag for the statistics block, allocate and clear the 3199 * memory, map the memory into DMA space, and fetch the physical 3200 * address of the block. 3201 */ 3202 if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, 3203 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3204 NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 3205 0, NULL, NULL, &sc->stats_tag)) { 3206 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3207 "DMA tag!\n", __FILE__, __LINE__); 3208 rc = ENOMEM; 3209 goto bce_dma_alloc_exit; 3210 } 3211 3212 if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, 3213 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { 3214 BCE_PRINTF("%s(%d): Could not allocate statistics block " 3215 "DMA memory!\n", __FILE__, __LINE__); 3216 rc = ENOMEM; 3217 goto bce_dma_alloc_exit; 3218 } 3219 3220 error = bus_dmamap_load(sc->stats_tag, sc->stats_map, 3221 sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, 3222 &sc->stats_block_paddr, BUS_DMA_NOWAIT); 3223 3224 if(error) { 3225 BCE_PRINTF("%s(%d): Could not map statistics block " 3226 "DMA memory!\n", __FILE__, __LINE__); 3227 rc = ENOMEM; 3228 goto bce_dma_alloc_exit; 3229 } 3230 3231 DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", 3232 __FUNCTION__, (uintmax_t) sc->stats_block_paddr); 3233 3234 /* BCM5709 uses host memory as cache for context memory. */ 3235 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3236 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3237 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 3238 if (sc->ctx_pages == 0) 3239 sc->ctx_pages = 1; 3240 3241 DBRUNIF((sc->ctx_pages > 512), 3242 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", 3243 __FILE__, __LINE__, sc->ctx_pages)); 3244 3245 /* 3246 * Create a DMA tag for the context pages, 3247 * allocate and clear the memory, map the 3248 * memory into DMA space, and fetch the 3249 * physical address of the block. 3250 */ 3251 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3252 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, 3253 NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 3254 0, NULL, NULL, &sc->ctx_tag)) { 3255 BCE_PRINTF("%s(%d): Could not allocate CTX " 3256 "DMA tag!\n", __FILE__, __LINE__); 3257 rc = ENOMEM; 3258 goto bce_dma_alloc_exit; 3259 } 3260 3261 for (i = 0; i < sc->ctx_pages; i++) { 3262 3263 if(bus_dmamem_alloc(sc->ctx_tag, 3264 (void **)&sc->ctx_block[i], 3265 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3266 &sc->ctx_map[i])) { 3267 BCE_PRINTF("%s(%d): Could not allocate CTX " 3268 "DMA memory!\n", __FILE__, __LINE__); 3269 rc = ENOMEM; 3270 goto bce_dma_alloc_exit; 3271 } 3272 3273 error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 3274 sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, 3275 &sc->ctx_paddr[i], BUS_DMA_NOWAIT); 3276 3277 if (error) { 3278 BCE_PRINTF("%s(%d): Could not map CTX " 3279 "DMA memory!\n", __FILE__, __LINE__); 3280 rc = ENOMEM; 3281 goto bce_dma_alloc_exit; 3282 } 3283 3284 DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " 3285 "= 0x%jX\n", __FUNCTION__, i, 3286 (uintmax_t) sc->ctx_paddr[i]); 3287 } 3288 } 3289 3290 /* 3291 * Create a DMA tag for the TX buffer descriptor chain, 3292 * allocate and clear the memory, and fetch the 3293 * physical address of the block. 3294 */ 3295 if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 3296 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3297 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, 3298 NULL, NULL, &sc->tx_bd_chain_tag)) { 3299 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3300 "chain DMA tag!\n", __FILE__, __LINE__); 3301 rc = ENOMEM; 3302 goto bce_dma_alloc_exit; 3303 } 3304 3305 for (i = 0; i < TX_PAGES; i++) { 3306 3307 if(bus_dmamem_alloc(sc->tx_bd_chain_tag, 3308 (void **)&sc->tx_bd_chain[i], 3309 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3310 &sc->tx_bd_chain_map[i])) { 3311 BCE_PRINTF("%s(%d): Could not allocate TX descriptor " 3312 "chain DMA memory!\n", __FILE__, __LINE__); 3313 rc = ENOMEM; 3314 goto bce_dma_alloc_exit; 3315 } 3316 3317 error = bus_dmamap_load(sc->tx_bd_chain_tag, 3318 sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], 3319 BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3320 &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3321 3322 if (error) { 3323 BCE_PRINTF("%s(%d): Could not map TX descriptor " 3324 "chain DMA memory!\n", __FILE__, __LINE__); 3325 rc = ENOMEM; 3326 goto bce_dma_alloc_exit; 3327 } 3328 3329 DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " 3330 "0x%jX\n", __FUNCTION__, i, 3331 (uintmax_t) sc->tx_bd_chain_paddr[i]); 3332 } 3333 3334 /* Check the required size before mapping to conserve resources. */ 3335 if (bce_tso_enable) { 3336 max_size = BCE_TSO_MAX_SIZE; 3337 max_segments = BCE_MAX_SEGMENTS; 3338 max_seg_size = BCE_TSO_MAX_SEG_SIZE; 3339 } else { 3340 max_size = MCLBYTES * BCE_MAX_SEGMENTS; 3341 max_segments = BCE_MAX_SEGMENTS; 3342 max_seg_size = MCLBYTES; 3343 } 3344 3345 /* Create a DMA tag for TX mbufs. */ 3346 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3347 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 3348 max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { 3349 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", 3350 __FILE__, __LINE__); 3351 rc = ENOMEM; 3352 goto bce_dma_alloc_exit; 3353 } 3354 3355 /* Create DMA maps for the TX mbufs clusters. */ 3356 for (i = 0; i < TOTAL_TX_BD; i++) { 3357 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 3358 &sc->tx_mbuf_map[i])) { 3359 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " 3360 "map!\n", __FILE__, __LINE__); 3361 rc = ENOMEM; 3362 goto bce_dma_alloc_exit; 3363 } 3364 } 3365 3366 /* 3367 * Create a DMA tag for the RX buffer descriptor chain, 3368 * allocate and clear the memory, and fetch the physical 3369 * address of the blocks. 3370 */ 3371 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3372 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, 3373 sc->max_bus_addr, NULL, NULL, 3374 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 3375 0, NULL, NULL, &sc->rx_bd_chain_tag)) { 3376 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " 3377 "DMA tag!\n", __FILE__, __LINE__); 3378 rc = ENOMEM; 3379 goto bce_dma_alloc_exit; 3380 } 3381 3382 for (i = 0; i < RX_PAGES; i++) { 3383 3384 if (bus_dmamem_alloc(sc->rx_bd_chain_tag, 3385 (void **)&sc->rx_bd_chain[i], 3386 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3387 &sc->rx_bd_chain_map[i])) { 3388 BCE_PRINTF("%s(%d): Could not allocate RX descriptor " 3389 "chain DMA memory!\n", __FILE__, __LINE__); 3390 rc = ENOMEM; 3391 goto bce_dma_alloc_exit; 3392 } 3393 3394 error = bus_dmamap_load(sc->rx_bd_chain_tag, 3395 sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], 3396 BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, 3397 &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3398 3399 if (error) { 3400 BCE_PRINTF("%s(%d): Could not map RX descriptor " 3401 "chain DMA memory!\n", __FILE__, __LINE__); 3402 rc = ENOMEM; 3403 goto bce_dma_alloc_exit; 3404 } 3405 3406 DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " 3407 "0x%jX\n", __FUNCTION__, i, 3408 (uintmax_t) sc->rx_bd_chain_paddr[i]); 3409 } 3410 3411 /* 3412 * Create a DMA tag for RX mbufs. 3413 */ 3414 #ifdef BCE_JUMBO_HDRSPLIT 3415 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? 3416 MCLBYTES : sc->rx_bd_mbuf_alloc_size); 3417 #else 3418 max_size = max_seg_size = MJUM9BYTES; 3419 #endif 3420 max_segments = 1; 3421 3422 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " 3423 "(max size = 0x%jX max segments = %d, max segment " 3424 "size = 0x%jX)\n", __FUNCTION__, (uintmax_t) max_size, 3425 max_segments, (uintmax_t) max_seg_size); 3426 3427 if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, 3428 BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3429 max_size, max_segments, max_seg_size, 0, NULL, NULL, 3430 &sc->rx_mbuf_tag)) { 3431 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", 3432 __FILE__, __LINE__); 3433 rc = ENOMEM; 3434 goto bce_dma_alloc_exit; 3435 } 3436 3437 /* Create DMA maps for the RX mbuf clusters. */ 3438 for (i = 0; i < TOTAL_RX_BD; i++) { 3439 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, 3440 &sc->rx_mbuf_map[i])) { 3441 BCE_PRINTF("%s(%d): Unable to create RX mbuf " 3442 "DMA map!\n", __FILE__, __LINE__); 3443 rc = ENOMEM; 3444 goto bce_dma_alloc_exit; 3445 } 3446 } 3447 3448 #ifdef BCE_JUMBO_HDRSPLIT 3449 /* 3450 * Create a DMA tag for the page buffer descriptor chain, 3451 * allocate and clear the memory, and fetch the physical 3452 * address of the blocks. 3453 */ 3454 if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 3455 BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, 3456 NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 3457 0, NULL, NULL, &sc->pg_bd_chain_tag)) { 3458 BCE_PRINTF("%s(%d): Could not allocate page descriptor " 3459 "chain DMA tag!\n", __FILE__, __LINE__); 3460 rc = ENOMEM; 3461 goto bce_dma_alloc_exit; 3462 } 3463 3464 for (i = 0; i < PG_PAGES; i++) { 3465 3466 if (bus_dmamem_alloc(sc->pg_bd_chain_tag, 3467 (void **)&sc->pg_bd_chain[i], 3468 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 3469 &sc->pg_bd_chain_map[i])) { 3470 BCE_PRINTF("%s(%d): Could not allocate page " 3471 "descriptor chain DMA memory!\n", 3472 __FILE__, __LINE__); 3473 rc = ENOMEM; 3474 goto bce_dma_alloc_exit; 3475 } 3476 3477 error = bus_dmamap_load(sc->pg_bd_chain_tag, 3478 sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], 3479 BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, 3480 &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); 3481 3482 if (error) { 3483 BCE_PRINTF("%s(%d): Could not map page descriptor " 3484 "chain DMA memory!\n", __FILE__, __LINE__); 3485 rc = ENOMEM; 3486 goto bce_dma_alloc_exit; 3487 } 3488 3489 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " 3490 "0x%jX\n", __FUNCTION__, i, 3491 (uintmax_t) sc->pg_bd_chain_paddr[i]); 3492 } 3493 3494 /* 3495 * Create a DMA tag for page mbufs. 3496 */ 3497 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ? 3498 MCLBYTES : sc->pg_bd_mbuf_alloc_size); 3499 3500 if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 3501 sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, 3502 max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) { 3503 BCE_PRINTF("%s(%d): Could not allocate page mbuf " 3504 "DMA tag!\n", __FILE__, __LINE__); 3505 rc = ENOMEM; 3506 goto bce_dma_alloc_exit; 3507 } 3508 3509 /* Create DMA maps for the page mbuf clusters. */ 3510 for (i = 0; i < TOTAL_PG_BD; i++) { 3511 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, 3512 &sc->pg_mbuf_map[i])) { 3513 BCE_PRINTF("%s(%d): Unable to create page mbuf " 3514 "DMA map!\n", __FILE__, __LINE__); 3515 rc = ENOMEM; 3516 goto bce_dma_alloc_exit; 3517 } 3518 } 3519 #endif 3520 3521 bce_dma_alloc_exit: 3522 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 3523 return(rc); 3524 } 3525 3526 3527 /****************************************************************************/ 3528 /* Release all resources used by the driver. */ 3529 /* */ 3530 /* Releases all resources acquired by the driver including interrupts, */ 3531 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 3532 /* */ 3533 /* Returns: */ 3534 /* Nothing. */ 3535 /****************************************************************************/ 3536 static void 3537 bce_release_resources(struct bce_softc *sc) 3538 { 3539 device_t dev; 3540 3541 DBENTER(BCE_VERBOSE_RESET); 3542 3543 dev = sc->bce_dev; 3544 3545 bce_dma_free(sc); 3546 3547 if (sc->bce_intrhand != NULL) { 3548 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); 3549 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 3550 } 3551 3552 if (sc->bce_res_irq != NULL) { 3553 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); 3554 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 3555 sc->bce_res_irq); 3556 } 3557 3558 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { 3559 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); 3560 pci_release_msi(dev); 3561 } 3562 3563 if (sc->bce_res_mem != NULL) { 3564 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); 3565 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 3566 sc->bce_res_mem); 3567 } 3568 3569 if (sc->bce_ifp != NULL) { 3570 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); 3571 if_free(sc->bce_ifp); 3572 } 3573 3574 if (mtx_initialized(&sc->bce_mtx)) 3575 BCE_LOCK_DESTROY(sc); 3576 3577 DBEXIT(BCE_VERBOSE_RESET); 3578 } 3579 3580 3581 /****************************************************************************/ 3582 /* Firmware synchronization. */ 3583 /* */ 3584 /* Before performing certain events such as a chip reset, synchronize with */ 3585 /* the firmware first. */ 3586 /* */ 3587 /* Returns: */ 3588 /* 0 for success, positive value for failure. */ 3589 /****************************************************************************/ 3590 static int 3591 bce_fw_sync(struct bce_softc *sc, u32 msg_data) 3592 { 3593 int i, rc = 0; 3594 u32 val; 3595 3596 DBENTER(BCE_VERBOSE_RESET); 3597 3598 /* Don't waste any time if we've timed out before. */ 3599 if (sc->bce_fw_timed_out == TRUE) { 3600 rc = EBUSY; 3601 goto bce_fw_sync_exit; 3602 } 3603 3604 /* Increment the message sequence number. */ 3605 sc->bce_fw_wr_seq++; 3606 msg_data |= sc->bce_fw_wr_seq; 3607 3608 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " 3609 "0x%08X\n", msg_data); 3610 3611 /* Send the message to the bootcode driver mailbox. */ 3612 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3613 3614 /* Wait for the bootcode to acknowledge the message. */ 3615 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 3616 /* Check for a response in the bootcode firmware mailbox. */ 3617 val = bce_shmem_rd(sc, BCE_FW_MB); 3618 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 3619 break; 3620 DELAY(1000); 3621 } 3622 3623 /* If we've timed out, tell bootcode that we've stopped waiting. */ 3624 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && 3625 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { 3626 3627 BCE_PRINTF("%s(%d): Firmware synchronization timeout! " 3628 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 3629 3630 msg_data &= ~BCE_DRV_MSG_CODE; 3631 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 3632 3633 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 3634 3635 sc->bce_fw_timed_out = TRUE; 3636 rc = EBUSY; 3637 } 3638 3639 bce_fw_sync_exit: 3640 DBEXIT(BCE_VERBOSE_RESET); 3641 return (rc); 3642 } 3643 3644 3645 /****************************************************************************/ 3646 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 3647 /* */ 3648 /* Returns: */ 3649 /* Nothing. */ 3650 /****************************************************************************/ 3651 static void 3652 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 3653 u32 rv2p_code_len, u32 rv2p_proc) 3654 { 3655 int i; 3656 u32 val; 3657 3658 DBENTER(BCE_VERBOSE_RESET); 3659 3660 /* Set the page size used by RV2P. */ 3661 if (rv2p_proc == RV2P_PROC2) { 3662 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); 3663 } 3664 3665 for (i = 0; i < rv2p_code_len; i += 8) { 3666 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 3667 rv2p_code++; 3668 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 3669 rv2p_code++; 3670 3671 if (rv2p_proc == RV2P_PROC1) { 3672 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 3673 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 3674 } 3675 else { 3676 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 3677 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 3678 } 3679 } 3680 3681 /* Reset the processor, un-stall is done later. */ 3682 if (rv2p_proc == RV2P_PROC1) { 3683 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 3684 } 3685 else { 3686 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 3687 } 3688 3689 DBEXIT(BCE_VERBOSE_RESET); 3690 } 3691 3692 3693 /****************************************************************************/ 3694 /* Load RISC processor firmware. */ 3695 /* */ 3696 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 3697 /* associated with a particular processor. */ 3698 /* */ 3699 /* Returns: */ 3700 /* Nothing. */ 3701 /****************************************************************************/ 3702 static void 3703 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 3704 struct fw_info *fw) 3705 { 3706 u32 offset; 3707 3708 DBENTER(BCE_VERBOSE_RESET); 3709 3710 bce_halt_cpu(sc, cpu_reg); 3711 3712 /* Load the Text area. */ 3713 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 3714 if (fw->text) { 3715 int j; 3716 3717 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { 3718 REG_WR_IND(sc, offset, fw->text[j]); 3719 } 3720 } 3721 3722 /* Load the Data area. */ 3723 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 3724 if (fw->data) { 3725 int j; 3726 3727 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { 3728 REG_WR_IND(sc, offset, fw->data[j]); 3729 } 3730 } 3731 3732 /* Load the SBSS area. */ 3733 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 3734 if (fw->sbss) { 3735 int j; 3736 3737 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { 3738 REG_WR_IND(sc, offset, fw->sbss[j]); 3739 } 3740 } 3741 3742 /* Load the BSS area. */ 3743 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 3744 if (fw->bss) { 3745 int j; 3746 3747 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { 3748 REG_WR_IND(sc, offset, fw->bss[j]); 3749 } 3750 } 3751 3752 /* Load the Read-Only area. */ 3753 offset = cpu_reg->spad_base + 3754 (fw->rodata_addr - cpu_reg->mips_view_base); 3755 if (fw->rodata) { 3756 int j; 3757 3758 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { 3759 REG_WR_IND(sc, offset, fw->rodata[j]); 3760 } 3761 } 3762 3763 /* Clear the pre-fetch instruction and set the FW start address. */ 3764 REG_WR_IND(sc, cpu_reg->inst, 0); 3765 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 3766 3767 DBEXIT(BCE_VERBOSE_RESET); 3768 } 3769 3770 3771 /****************************************************************************/ 3772 /* Starts the RISC processor. */ 3773 /* */ 3774 /* Assumes the CPU starting address has already been set. */ 3775 /* */ 3776 /* Returns: */ 3777 /* Nothing. */ 3778 /****************************************************************************/ 3779 static void 3780 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3781 { 3782 u32 val; 3783 3784 DBENTER(BCE_VERBOSE_RESET); 3785 3786 /* Start the CPU. */ 3787 val = REG_RD_IND(sc, cpu_reg->mode); 3788 val &= ~cpu_reg->mode_value_halt; 3789 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3790 REG_WR_IND(sc, cpu_reg->mode, val); 3791 3792 DBEXIT(BCE_VERBOSE_RESET); 3793 } 3794 3795 3796 /****************************************************************************/ 3797 /* Halts the RISC processor. */ 3798 /* */ 3799 /* Returns: */ 3800 /* Nothing. */ 3801 /****************************************************************************/ 3802 static void 3803 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 3804 { 3805 u32 val; 3806 3807 DBENTER(BCE_VERBOSE_RESET); 3808 3809 /* Halt the CPU. */ 3810 val = REG_RD_IND(sc, cpu_reg->mode); 3811 val |= cpu_reg->mode_value_halt; 3812 REG_WR_IND(sc, cpu_reg->mode, val); 3813 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 3814 3815 DBEXIT(BCE_VERBOSE_RESET); 3816 } 3817 3818 3819 /****************************************************************************/ 3820 /* Initialize the RX CPU. */ 3821 /* */ 3822 /* Returns: */ 3823 /* Nothing. */ 3824 /****************************************************************************/ 3825 static void 3826 bce_start_rxp_cpu(struct bce_softc *sc) 3827 { 3828 struct cpu_reg cpu_reg; 3829 3830 DBENTER(BCE_VERBOSE_RESET); 3831 3832 cpu_reg.mode = BCE_RXP_CPU_MODE; 3833 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3834 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3835 cpu_reg.state = BCE_RXP_CPU_STATE; 3836 cpu_reg.state_value_clear = 0xffffff; 3837 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3838 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3839 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3840 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3841 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3842 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3843 cpu_reg.mips_view_base = 0x8000000; 3844 3845 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); 3846 bce_start_cpu(sc, &cpu_reg); 3847 3848 DBEXIT(BCE_VERBOSE_RESET); 3849 } 3850 3851 3852 /****************************************************************************/ 3853 /* Initialize the RX CPU. */ 3854 /* */ 3855 /* Returns: */ 3856 /* Nothing. */ 3857 /****************************************************************************/ 3858 static void 3859 bce_init_rxp_cpu(struct bce_softc *sc) 3860 { 3861 struct cpu_reg cpu_reg; 3862 struct fw_info fw; 3863 3864 DBENTER(BCE_VERBOSE_RESET); 3865 3866 cpu_reg.mode = BCE_RXP_CPU_MODE; 3867 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 3868 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 3869 cpu_reg.state = BCE_RXP_CPU_STATE; 3870 cpu_reg.state_value_clear = 0xffffff; 3871 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 3872 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 3873 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 3874 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 3875 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 3876 cpu_reg.spad_base = BCE_RXP_SCRATCH; 3877 cpu_reg.mips_view_base = 0x8000000; 3878 3879 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3880 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3881 fw.ver_major = bce_RXP_b09FwReleaseMajor; 3882 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 3883 fw.ver_fix = bce_RXP_b09FwReleaseFix; 3884 fw.start_addr = bce_RXP_b09FwStartAddr; 3885 3886 fw.text_addr = bce_RXP_b09FwTextAddr; 3887 fw.text_len = bce_RXP_b09FwTextLen; 3888 fw.text_index = 0; 3889 fw.text = bce_RXP_b09FwText; 3890 3891 fw.data_addr = bce_RXP_b09FwDataAddr; 3892 fw.data_len = bce_RXP_b09FwDataLen; 3893 fw.data_index = 0; 3894 fw.data = bce_RXP_b09FwData; 3895 3896 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 3897 fw.sbss_len = bce_RXP_b09FwSbssLen; 3898 fw.sbss_index = 0; 3899 fw.sbss = bce_RXP_b09FwSbss; 3900 3901 fw.bss_addr = bce_RXP_b09FwBssAddr; 3902 fw.bss_len = bce_RXP_b09FwBssLen; 3903 fw.bss_index = 0; 3904 fw.bss = bce_RXP_b09FwBss; 3905 3906 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 3907 fw.rodata_len = bce_RXP_b09FwRodataLen; 3908 fw.rodata_index = 0; 3909 fw.rodata = bce_RXP_b09FwRodata; 3910 } else { 3911 fw.ver_major = bce_RXP_b06FwReleaseMajor; 3912 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 3913 fw.ver_fix = bce_RXP_b06FwReleaseFix; 3914 fw.start_addr = bce_RXP_b06FwStartAddr; 3915 3916 fw.text_addr = bce_RXP_b06FwTextAddr; 3917 fw.text_len = bce_RXP_b06FwTextLen; 3918 fw.text_index = 0; 3919 fw.text = bce_RXP_b06FwText; 3920 3921 fw.data_addr = bce_RXP_b06FwDataAddr; 3922 fw.data_len = bce_RXP_b06FwDataLen; 3923 fw.data_index = 0; 3924 fw.data = bce_RXP_b06FwData; 3925 3926 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 3927 fw.sbss_len = bce_RXP_b06FwSbssLen; 3928 fw.sbss_index = 0; 3929 fw.sbss = bce_RXP_b06FwSbss; 3930 3931 fw.bss_addr = bce_RXP_b06FwBssAddr; 3932 fw.bss_len = bce_RXP_b06FwBssLen; 3933 fw.bss_index = 0; 3934 fw.bss = bce_RXP_b06FwBss; 3935 3936 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 3937 fw.rodata_len = bce_RXP_b06FwRodataLen; 3938 fw.rodata_index = 0; 3939 fw.rodata = bce_RXP_b06FwRodata; 3940 } 3941 3942 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 3943 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3944 3945 /* Delay RXP start until initialization is complete. */ 3946 3947 DBEXIT(BCE_VERBOSE_RESET); 3948 } 3949 3950 3951 /****************************************************************************/ 3952 /* Initialize the TX CPU. */ 3953 /* */ 3954 /* Returns: */ 3955 /* Nothing. */ 3956 /****************************************************************************/ 3957 static void 3958 bce_init_txp_cpu(struct bce_softc *sc) 3959 { 3960 struct cpu_reg cpu_reg; 3961 struct fw_info fw; 3962 3963 DBENTER(BCE_VERBOSE_RESET); 3964 3965 cpu_reg.mode = BCE_TXP_CPU_MODE; 3966 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 3967 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 3968 cpu_reg.state = BCE_TXP_CPU_STATE; 3969 cpu_reg.state_value_clear = 0xffffff; 3970 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 3971 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 3972 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 3973 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 3974 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 3975 cpu_reg.spad_base = BCE_TXP_SCRATCH; 3976 cpu_reg.mips_view_base = 0x8000000; 3977 3978 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 3979 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 3980 fw.ver_major = bce_TXP_b09FwReleaseMajor; 3981 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 3982 fw.ver_fix = bce_TXP_b09FwReleaseFix; 3983 fw.start_addr = bce_TXP_b09FwStartAddr; 3984 3985 fw.text_addr = bce_TXP_b09FwTextAddr; 3986 fw.text_len = bce_TXP_b09FwTextLen; 3987 fw.text_index = 0; 3988 fw.text = bce_TXP_b09FwText; 3989 3990 fw.data_addr = bce_TXP_b09FwDataAddr; 3991 fw.data_len = bce_TXP_b09FwDataLen; 3992 fw.data_index = 0; 3993 fw.data = bce_TXP_b09FwData; 3994 3995 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 3996 fw.sbss_len = bce_TXP_b09FwSbssLen; 3997 fw.sbss_index = 0; 3998 fw.sbss = bce_TXP_b09FwSbss; 3999 4000 fw.bss_addr = bce_TXP_b09FwBssAddr; 4001 fw.bss_len = bce_TXP_b09FwBssLen; 4002 fw.bss_index = 0; 4003 fw.bss = bce_TXP_b09FwBss; 4004 4005 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 4006 fw.rodata_len = bce_TXP_b09FwRodataLen; 4007 fw.rodata_index = 0; 4008 fw.rodata = bce_TXP_b09FwRodata; 4009 } else { 4010 fw.ver_major = bce_TXP_b06FwReleaseMajor; 4011 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 4012 fw.ver_fix = bce_TXP_b06FwReleaseFix; 4013 fw.start_addr = bce_TXP_b06FwStartAddr; 4014 4015 fw.text_addr = bce_TXP_b06FwTextAddr; 4016 fw.text_len = bce_TXP_b06FwTextLen; 4017 fw.text_index = 0; 4018 fw.text = bce_TXP_b06FwText; 4019 4020 fw.data_addr = bce_TXP_b06FwDataAddr; 4021 fw.data_len = bce_TXP_b06FwDataLen; 4022 fw.data_index = 0; 4023 fw.data = bce_TXP_b06FwData; 4024 4025 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 4026 fw.sbss_len = bce_TXP_b06FwSbssLen; 4027 fw.sbss_index = 0; 4028 fw.sbss = bce_TXP_b06FwSbss; 4029 4030 fw.bss_addr = bce_TXP_b06FwBssAddr; 4031 fw.bss_len = bce_TXP_b06FwBssLen; 4032 fw.bss_index = 0; 4033 fw.bss = bce_TXP_b06FwBss; 4034 4035 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 4036 fw.rodata_len = bce_TXP_b06FwRodataLen; 4037 fw.rodata_index = 0; 4038 fw.rodata = bce_TXP_b06FwRodata; 4039 } 4040 4041 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 4042 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4043 bce_start_cpu(sc, &cpu_reg); 4044 4045 DBEXIT(BCE_VERBOSE_RESET); 4046 } 4047 4048 4049 /****************************************************************************/ 4050 /* Initialize the TPAT CPU. */ 4051 /* */ 4052 /* Returns: */ 4053 /* Nothing. */ 4054 /****************************************************************************/ 4055 static void 4056 bce_init_tpat_cpu(struct bce_softc *sc) 4057 { 4058 struct cpu_reg cpu_reg; 4059 struct fw_info fw; 4060 4061 DBENTER(BCE_VERBOSE_RESET); 4062 4063 cpu_reg.mode = BCE_TPAT_CPU_MODE; 4064 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 4065 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 4066 cpu_reg.state = BCE_TPAT_CPU_STATE; 4067 cpu_reg.state_value_clear = 0xffffff; 4068 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 4069 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 4070 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 4071 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 4072 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 4073 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 4074 cpu_reg.mips_view_base = 0x8000000; 4075 4076 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4077 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4078 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 4079 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 4080 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 4081 fw.start_addr = bce_TPAT_b09FwStartAddr; 4082 4083 fw.text_addr = bce_TPAT_b09FwTextAddr; 4084 fw.text_len = bce_TPAT_b09FwTextLen; 4085 fw.text_index = 0; 4086 fw.text = bce_TPAT_b09FwText; 4087 4088 fw.data_addr = bce_TPAT_b09FwDataAddr; 4089 fw.data_len = bce_TPAT_b09FwDataLen; 4090 fw.data_index = 0; 4091 fw.data = bce_TPAT_b09FwData; 4092 4093 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 4094 fw.sbss_len = bce_TPAT_b09FwSbssLen; 4095 fw.sbss_index = 0; 4096 fw.sbss = bce_TPAT_b09FwSbss; 4097 4098 fw.bss_addr = bce_TPAT_b09FwBssAddr; 4099 fw.bss_len = bce_TPAT_b09FwBssLen; 4100 fw.bss_index = 0; 4101 fw.bss = bce_TPAT_b09FwBss; 4102 4103 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 4104 fw.rodata_len = bce_TPAT_b09FwRodataLen; 4105 fw.rodata_index = 0; 4106 fw.rodata = bce_TPAT_b09FwRodata; 4107 } else { 4108 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 4109 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 4110 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 4111 fw.start_addr = bce_TPAT_b06FwStartAddr; 4112 4113 fw.text_addr = bce_TPAT_b06FwTextAddr; 4114 fw.text_len = bce_TPAT_b06FwTextLen; 4115 fw.text_index = 0; 4116 fw.text = bce_TPAT_b06FwText; 4117 4118 fw.data_addr = bce_TPAT_b06FwDataAddr; 4119 fw.data_len = bce_TPAT_b06FwDataLen; 4120 fw.data_index = 0; 4121 fw.data = bce_TPAT_b06FwData; 4122 4123 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 4124 fw.sbss_len = bce_TPAT_b06FwSbssLen; 4125 fw.sbss_index = 0; 4126 fw.sbss = bce_TPAT_b06FwSbss; 4127 4128 fw.bss_addr = bce_TPAT_b06FwBssAddr; 4129 fw.bss_len = bce_TPAT_b06FwBssLen; 4130 fw.bss_index = 0; 4131 fw.bss = bce_TPAT_b06FwBss; 4132 4133 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 4134 fw.rodata_len = bce_TPAT_b06FwRodataLen; 4135 fw.rodata_index = 0; 4136 fw.rodata = bce_TPAT_b06FwRodata; 4137 } 4138 4139 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 4140 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4141 bce_start_cpu(sc, &cpu_reg); 4142 4143 DBEXIT(BCE_VERBOSE_RESET); 4144 } 4145 4146 4147 /****************************************************************************/ 4148 /* Initialize the CP CPU. */ 4149 /* */ 4150 /* Returns: */ 4151 /* Nothing. */ 4152 /****************************************************************************/ 4153 static void 4154 bce_init_cp_cpu(struct bce_softc *sc) 4155 { 4156 struct cpu_reg cpu_reg; 4157 struct fw_info fw; 4158 4159 DBENTER(BCE_VERBOSE_RESET); 4160 4161 cpu_reg.mode = BCE_CP_CPU_MODE; 4162 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 4163 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 4164 cpu_reg.state = BCE_CP_CPU_STATE; 4165 cpu_reg.state_value_clear = 0xffffff; 4166 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 4167 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 4168 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 4169 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 4170 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 4171 cpu_reg.spad_base = BCE_CP_SCRATCH; 4172 cpu_reg.mips_view_base = 0x8000000; 4173 4174 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4175 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4176 fw.ver_major = bce_CP_b09FwReleaseMajor; 4177 fw.ver_minor = bce_CP_b09FwReleaseMinor; 4178 fw.ver_fix = bce_CP_b09FwReleaseFix; 4179 fw.start_addr = bce_CP_b09FwStartAddr; 4180 4181 fw.text_addr = bce_CP_b09FwTextAddr; 4182 fw.text_len = bce_CP_b09FwTextLen; 4183 fw.text_index = 0; 4184 fw.text = bce_CP_b09FwText; 4185 4186 fw.data_addr = bce_CP_b09FwDataAddr; 4187 fw.data_len = bce_CP_b09FwDataLen; 4188 fw.data_index = 0; 4189 fw.data = bce_CP_b09FwData; 4190 4191 fw.sbss_addr = bce_CP_b09FwSbssAddr; 4192 fw.sbss_len = bce_CP_b09FwSbssLen; 4193 fw.sbss_index = 0; 4194 fw.sbss = bce_CP_b09FwSbss; 4195 4196 fw.bss_addr = bce_CP_b09FwBssAddr; 4197 fw.bss_len = bce_CP_b09FwBssLen; 4198 fw.bss_index = 0; 4199 fw.bss = bce_CP_b09FwBss; 4200 4201 fw.rodata_addr = bce_CP_b09FwRodataAddr; 4202 fw.rodata_len = bce_CP_b09FwRodataLen; 4203 fw.rodata_index = 0; 4204 fw.rodata = bce_CP_b09FwRodata; 4205 } else { 4206 fw.ver_major = bce_CP_b06FwReleaseMajor; 4207 fw.ver_minor = bce_CP_b06FwReleaseMinor; 4208 fw.ver_fix = bce_CP_b06FwReleaseFix; 4209 fw.start_addr = bce_CP_b06FwStartAddr; 4210 4211 fw.text_addr = bce_CP_b06FwTextAddr; 4212 fw.text_len = bce_CP_b06FwTextLen; 4213 fw.text_index = 0; 4214 fw.text = bce_CP_b06FwText; 4215 4216 fw.data_addr = bce_CP_b06FwDataAddr; 4217 fw.data_len = bce_CP_b06FwDataLen; 4218 fw.data_index = 0; 4219 fw.data = bce_CP_b06FwData; 4220 4221 fw.sbss_addr = bce_CP_b06FwSbssAddr; 4222 fw.sbss_len = bce_CP_b06FwSbssLen; 4223 fw.sbss_index = 0; 4224 fw.sbss = bce_CP_b06FwSbss; 4225 4226 fw.bss_addr = bce_CP_b06FwBssAddr; 4227 fw.bss_len = bce_CP_b06FwBssLen; 4228 fw.bss_index = 0; 4229 fw.bss = bce_CP_b06FwBss; 4230 4231 fw.rodata_addr = bce_CP_b06FwRodataAddr; 4232 fw.rodata_len = bce_CP_b06FwRodataLen; 4233 fw.rodata_index = 0; 4234 fw.rodata = bce_CP_b06FwRodata; 4235 } 4236 4237 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 4238 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4239 bce_start_cpu(sc, &cpu_reg); 4240 4241 DBEXIT(BCE_VERBOSE_RESET); 4242 } 4243 4244 4245 /****************************************************************************/ 4246 /* Initialize the COM CPU. */ 4247 /* */ 4248 /* Returns: */ 4249 /* Nothing. */ 4250 /****************************************************************************/ 4251 static void 4252 bce_init_com_cpu(struct bce_softc *sc) 4253 { 4254 struct cpu_reg cpu_reg; 4255 struct fw_info fw; 4256 4257 DBENTER(BCE_VERBOSE_RESET); 4258 4259 cpu_reg.mode = BCE_COM_CPU_MODE; 4260 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 4261 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 4262 cpu_reg.state = BCE_COM_CPU_STATE; 4263 cpu_reg.state_value_clear = 0xffffff; 4264 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 4265 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 4266 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 4267 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 4268 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 4269 cpu_reg.spad_base = BCE_COM_SCRATCH; 4270 cpu_reg.mips_view_base = 0x8000000; 4271 4272 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4273 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4274 fw.ver_major = bce_COM_b09FwReleaseMajor; 4275 fw.ver_minor = bce_COM_b09FwReleaseMinor; 4276 fw.ver_fix = bce_COM_b09FwReleaseFix; 4277 fw.start_addr = bce_COM_b09FwStartAddr; 4278 4279 fw.text_addr = bce_COM_b09FwTextAddr; 4280 fw.text_len = bce_COM_b09FwTextLen; 4281 fw.text_index = 0; 4282 fw.text = bce_COM_b09FwText; 4283 4284 fw.data_addr = bce_COM_b09FwDataAddr; 4285 fw.data_len = bce_COM_b09FwDataLen; 4286 fw.data_index = 0; 4287 fw.data = bce_COM_b09FwData; 4288 4289 fw.sbss_addr = bce_COM_b09FwSbssAddr; 4290 fw.sbss_len = bce_COM_b09FwSbssLen; 4291 fw.sbss_index = 0; 4292 fw.sbss = bce_COM_b09FwSbss; 4293 4294 fw.bss_addr = bce_COM_b09FwBssAddr; 4295 fw.bss_len = bce_COM_b09FwBssLen; 4296 fw.bss_index = 0; 4297 fw.bss = bce_COM_b09FwBss; 4298 4299 fw.rodata_addr = bce_COM_b09FwRodataAddr; 4300 fw.rodata_len = bce_COM_b09FwRodataLen; 4301 fw.rodata_index = 0; 4302 fw.rodata = bce_COM_b09FwRodata; 4303 } else { 4304 fw.ver_major = bce_COM_b06FwReleaseMajor; 4305 fw.ver_minor = bce_COM_b06FwReleaseMinor; 4306 fw.ver_fix = bce_COM_b06FwReleaseFix; 4307 fw.start_addr = bce_COM_b06FwStartAddr; 4308 4309 fw.text_addr = bce_COM_b06FwTextAddr; 4310 fw.text_len = bce_COM_b06FwTextLen; 4311 fw.text_index = 0; 4312 fw.text = bce_COM_b06FwText; 4313 4314 fw.data_addr = bce_COM_b06FwDataAddr; 4315 fw.data_len = bce_COM_b06FwDataLen; 4316 fw.data_index = 0; 4317 fw.data = bce_COM_b06FwData; 4318 4319 fw.sbss_addr = bce_COM_b06FwSbssAddr; 4320 fw.sbss_len = bce_COM_b06FwSbssLen; 4321 fw.sbss_index = 0; 4322 fw.sbss = bce_COM_b06FwSbss; 4323 4324 fw.bss_addr = bce_COM_b06FwBssAddr; 4325 fw.bss_len = bce_COM_b06FwBssLen; 4326 fw.bss_index = 0; 4327 fw.bss = bce_COM_b06FwBss; 4328 4329 fw.rodata_addr = bce_COM_b06FwRodataAddr; 4330 fw.rodata_len = bce_COM_b06FwRodataLen; 4331 fw.rodata_index = 0; 4332 fw.rodata = bce_COM_b06FwRodata; 4333 } 4334 4335 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 4336 bce_load_cpu_fw(sc, &cpu_reg, &fw); 4337 bce_start_cpu(sc, &cpu_reg); 4338 4339 DBEXIT(BCE_VERBOSE_RESET); 4340 } 4341 4342 4343 /****************************************************************************/ 4344 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 4345 /* */ 4346 /* Loads the firmware for each CPU and starts the CPU. */ 4347 /* */ 4348 /* Returns: */ 4349 /* Nothing. */ 4350 /****************************************************************************/ 4351 static void 4352 bce_init_cpus(struct bce_softc *sc) 4353 { 4354 DBENTER(BCE_VERBOSE_RESET); 4355 4356 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4357 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4358 4359 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { 4360 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 4361 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 4362 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 4363 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 4364 } else { 4365 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 4366 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 4367 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 4368 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 4369 } 4370 4371 } else { 4372 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 4373 sizeof(bce_rv2p_proc1), RV2P_PROC1); 4374 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 4375 sizeof(bce_rv2p_proc2), RV2P_PROC2); 4376 } 4377 4378 bce_init_rxp_cpu(sc); 4379 bce_init_txp_cpu(sc); 4380 bce_init_tpat_cpu(sc); 4381 bce_init_com_cpu(sc); 4382 bce_init_cp_cpu(sc); 4383 4384 DBEXIT(BCE_VERBOSE_RESET); 4385 } 4386 4387 4388 /****************************************************************************/ 4389 /* Initialize context memory. */ 4390 /* */ 4391 /* Clears the memory associated with each Context ID (CID). */ 4392 /* */ 4393 /* Returns: */ 4394 /* Nothing. */ 4395 /****************************************************************************/ 4396 static int 4397 bce_init_ctx(struct bce_softc *sc) 4398 { 4399 u32 offset, val, vcid_addr; 4400 int i, j, rc, retry_cnt; 4401 4402 rc = 0; 4403 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4404 4405 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4406 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4407 retry_cnt = CTX_INIT_RETRY_COUNT; 4408 4409 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); 4410 4411 /* 4412 * BCM5709 context memory may be cached 4413 * in host memory so prepare the host memory 4414 * for access. 4415 */ 4416 val = BCE_CTX_COMMAND_ENABLED | 4417 BCE_CTX_COMMAND_MEM_INIT | (1 << 12); 4418 val |= (BCM_PAGE_BITS - 8) << 16; 4419 REG_WR(sc, BCE_CTX_COMMAND, val); 4420 4421 /* Wait for mem init command to complete. */ 4422 for (i = 0; i < retry_cnt; i++) { 4423 val = REG_RD(sc, BCE_CTX_COMMAND); 4424 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 4425 break; 4426 DELAY(2); 4427 } 4428 if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { 4429 BCE_PRINTF("%s(): Context memory initialization failed!\n", 4430 __FUNCTION__); 4431 rc = EBUSY; 4432 goto init_ctx_fail; 4433 } 4434 4435 for (i = 0; i < sc->ctx_pages; i++) { 4436 /* Set the physical address of the context memory. */ 4437 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 4438 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 4439 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 4440 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 4441 BCE_ADDR_HI(sc->ctx_paddr[i])); 4442 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | 4443 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4444 4445 /* Verify the context memory write was successful. */ 4446 for (j = 0; j < retry_cnt; j++) { 4447 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 4448 if ((val & 4449 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 4450 break; 4451 DELAY(5); 4452 } 4453 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { 4454 BCE_PRINTF("%s(): Failed to initialize " 4455 "context page %d!\n", __FUNCTION__, i); 4456 rc = EBUSY; 4457 goto init_ctx_fail; 4458 } 4459 } 4460 } else { 4461 4462 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); 4463 4464 /* 4465 * For the 5706/5708, context memory is local to 4466 * the controller, so initialize the controller 4467 * context memory. 4468 */ 4469 4470 vcid_addr = GET_CID_ADDR(96); 4471 while (vcid_addr) { 4472 4473 vcid_addr -= PHY_CTX_SIZE; 4474 4475 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 4476 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4477 4478 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 4479 CTX_WR(sc, 0x00, offset, 0); 4480 } 4481 4482 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 4483 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 4484 } 4485 4486 } 4487 init_ctx_fail: 4488 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); 4489 return (rc); 4490 } 4491 4492 4493 /****************************************************************************/ 4494 /* Fetch the permanent MAC address of the controller. */ 4495 /* */ 4496 /* Returns: */ 4497 /* Nothing. */ 4498 /****************************************************************************/ 4499 static void 4500 bce_get_mac_addr(struct bce_softc *sc) 4501 { 4502 u32 mac_lo = 0, mac_hi = 0; 4503 4504 DBENTER(BCE_VERBOSE_RESET); 4505 4506 /* 4507 * The NetXtreme II bootcode populates various NIC 4508 * power-on and runtime configuration items in a 4509 * shared memory area. The factory configured MAC 4510 * address is available from both NVRAM and the 4511 * shared memory area so we'll read the value from 4512 * shared memory for speed. 4513 */ 4514 4515 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 4516 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 4517 4518 if ((mac_lo == 0) && (mac_hi == 0)) { 4519 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", 4520 __FILE__, __LINE__); 4521 } else { 4522 sc->eaddr[0] = (u_char)(mac_hi >> 8); 4523 sc->eaddr[1] = (u_char)(mac_hi >> 0); 4524 sc->eaddr[2] = (u_char)(mac_lo >> 24); 4525 sc->eaddr[3] = (u_char)(mac_lo >> 16); 4526 sc->eaddr[4] = (u_char)(mac_lo >> 8); 4527 sc->eaddr[5] = (u_char)(mac_lo >> 0); 4528 } 4529 4530 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " 4531 "address = %6D\n", sc->eaddr, ":"); 4532 DBEXIT(BCE_VERBOSE_RESET); 4533 } 4534 4535 4536 /****************************************************************************/ 4537 /* Program the MAC address. */ 4538 /* */ 4539 /* Returns: */ 4540 /* Nothing. */ 4541 /****************************************************************************/ 4542 static void 4543 bce_set_mac_addr(struct bce_softc *sc) 4544 { 4545 u32 val; 4546 u8 *mac_addr = sc->eaddr; 4547 4548 /* ToDo: Add support for setting multiple MAC addresses. */ 4549 4550 DBENTER(BCE_VERBOSE_RESET); 4551 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " 4552 "%6D\n", sc->eaddr, ":"); 4553 4554 val = (mac_addr[0] << 8) | mac_addr[1]; 4555 4556 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 4557 4558 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 4559 (mac_addr[4] << 8) | mac_addr[5]; 4560 4561 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 4562 4563 DBEXIT(BCE_VERBOSE_RESET); 4564 } 4565 4566 4567 /****************************************************************************/ 4568 /* Stop the controller. */ 4569 /* */ 4570 /* Returns: */ 4571 /* Nothing. */ 4572 /****************************************************************************/ 4573 static void 4574 bce_stop(struct bce_softc *sc) 4575 { 4576 struct ifnet *ifp; 4577 4578 DBENTER(BCE_VERBOSE_RESET); 4579 4580 BCE_LOCK_ASSERT(sc); 4581 4582 ifp = sc->bce_ifp; 4583 4584 callout_stop(&sc->bce_tick_callout); 4585 4586 /* Disable the transmit/receive blocks. */ 4587 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 4588 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4589 DELAY(20); 4590 4591 bce_disable_intr(sc); 4592 4593 /* Free RX buffers. */ 4594 #ifdef BCE_JUMBO_HDRSPLIT 4595 bce_free_pg_chain(sc); 4596 #endif 4597 bce_free_rx_chain(sc); 4598 4599 /* Free TX buffers. */ 4600 bce_free_tx_chain(sc); 4601 4602 sc->watchdog_timer = 0; 4603 4604 sc->bce_link_up = FALSE; 4605 4606 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4607 4608 DBEXIT(BCE_VERBOSE_RESET); 4609 } 4610 4611 4612 static int 4613 bce_reset(struct bce_softc *sc, u32 reset_code) 4614 { 4615 u32 val; 4616 int i, rc = 0; 4617 4618 DBENTER(BCE_VERBOSE_RESET); 4619 4620 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", 4621 __FUNCTION__, reset_code); 4622 4623 /* Wait for pending PCI transactions to complete. */ 4624 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 4625 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4626 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4627 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4628 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4629 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 4630 DELAY(5); 4631 4632 /* Disable DMA */ 4633 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4634 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4635 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4636 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4637 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4638 } 4639 4640 /* Assume bootcode is running. */ 4641 sc->bce_fw_timed_out = FALSE; 4642 sc->bce_drv_cardiac_arrest = FALSE; 4643 4644 /* Give the firmware a chance to prepare for the reset. */ 4645 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 4646 if (rc) 4647 goto bce_reset_exit; 4648 4649 /* Set a firmware reminder that this is a soft reset. */ 4650 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); 4651 4652 /* Dummy read to force the chip to complete all current transactions. */ 4653 val = REG_RD(sc, BCE_MISC_ID); 4654 4655 /* Chip reset. */ 4656 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4657 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4658 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 4659 REG_RD(sc, BCE_MISC_COMMAND); 4660 DELAY(5); 4661 4662 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4663 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4664 4665 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 4666 } else { 4667 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4668 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4669 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4670 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 4671 4672 /* Allow up to 30us for reset to complete. */ 4673 for (i = 0; i < 10; i++) { 4674 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 4675 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4676 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 4677 break; 4678 } 4679 DELAY(10); 4680 } 4681 4682 /* Check that reset completed successfully. */ 4683 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4684 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 4685 BCE_PRINTF("%s(%d): Reset failed!\n", 4686 __FILE__, __LINE__); 4687 rc = EBUSY; 4688 goto bce_reset_exit; 4689 } 4690 } 4691 4692 /* Make sure byte swapping is properly configured. */ 4693 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 4694 if (val != 0x01020304) { 4695 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", 4696 __FILE__, __LINE__); 4697 rc = ENODEV; 4698 goto bce_reset_exit; 4699 } 4700 4701 /* Just completed a reset, assume that firmware is running again. */ 4702 sc->bce_fw_timed_out = FALSE; 4703 sc->bce_drv_cardiac_arrest = FALSE; 4704 4705 /* Wait for the firmware to finish its initialization. */ 4706 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 4707 if (rc) 4708 BCE_PRINTF("%s(%d): Firmware did not complete " 4709 "initialization!\n", __FILE__, __LINE__); 4710 4711 bce_reset_exit: 4712 DBEXIT(BCE_VERBOSE_RESET); 4713 return (rc); 4714 } 4715 4716 4717 static int 4718 bce_chipinit(struct bce_softc *sc) 4719 { 4720 u32 val; 4721 int rc = 0; 4722 4723 DBENTER(BCE_VERBOSE_RESET); 4724 4725 bce_disable_intr(sc); 4726 4727 /* 4728 * Initialize DMA byte/word swapping, configure the number of DMA 4729 * channels and PCI clock compensation delay. 4730 */ 4731 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 4732 BCE_DMA_CONFIG_DATA_WORD_SWAP | 4733 #if BYTE_ORDER == BIG_ENDIAN 4734 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 4735 #endif 4736 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 4737 DMA_READ_CHANS << 12 | 4738 DMA_WRITE_CHANS << 16; 4739 4740 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 4741 4742 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 4743 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 4744 4745 /* 4746 * This setting resolves a problem observed on certain Intel PCI 4747 * chipsets that cannot handle multiple outstanding DMA operations. 4748 * See errata E9_5706A1_65. 4749 */ 4750 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 4751 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && 4752 !(sc->bce_flags & BCE_PCIX_FLAG)) 4753 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 4754 4755 REG_WR(sc, BCE_DMA_CONFIG, val); 4756 4757 /* Enable the RX_V2P and Context state machines before access. */ 4758 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4759 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 4760 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 4761 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 4762 4763 /* Initialize context mapping and zero out the quick contexts. */ 4764 if ((rc = bce_init_ctx(sc)) != 0) 4765 goto bce_chipinit_exit; 4766 4767 /* Initialize the on-boards CPUs */ 4768 bce_init_cpus(sc); 4769 4770 /* Enable management frames (NC-SI) to flow to the MCP. */ 4771 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4772 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4773 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4774 } 4775 4776 /* Prepare NVRAM for access. */ 4777 if ((rc = bce_init_nvram(sc)) != 0) 4778 goto bce_chipinit_exit; 4779 4780 /* Set the kernel bypass block size */ 4781 val = REG_RD(sc, BCE_MQ_CONFIG); 4782 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4783 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4784 4785 /* Enable bins used on the 5709. */ 4786 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4787 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4788 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 4789 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 4790 val |= BCE_MQ_CONFIG_HALT_DIS; 4791 } 4792 4793 REG_WR(sc, BCE_MQ_CONFIG, val); 4794 4795 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 4796 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 4797 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 4798 4799 /* Set the page size and clear the RV2P processor stall bits. */ 4800 val = (BCM_PAGE_BITS - 8) << 24; 4801 REG_WR(sc, BCE_RV2P_CONFIG, val); 4802 4803 /* Configure page size. */ 4804 val = REG_RD(sc, BCE_TBDR_CONFIG); 4805 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 4806 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4807 REG_WR(sc, BCE_TBDR_CONFIG, val); 4808 4809 /* Set the perfect match control register to default. */ 4810 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 4811 4812 bce_chipinit_exit: 4813 DBEXIT(BCE_VERBOSE_RESET); 4814 4815 return(rc); 4816 } 4817 4818 4819 /****************************************************************************/ 4820 /* Initialize the controller in preparation to send/receive traffic. */ 4821 /* */ 4822 /* Returns: */ 4823 /* 0 for success, positive value for failure. */ 4824 /****************************************************************************/ 4825 static int 4826 bce_blockinit(struct bce_softc *sc) 4827 { 4828 u32 reg, val; 4829 int rc = 0; 4830 4831 DBENTER(BCE_VERBOSE_RESET); 4832 4833 /* Load the hardware default MAC address. */ 4834 bce_set_mac_addr(sc); 4835 4836 /* Set the Ethernet backoff seed value */ 4837 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + 4838 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + 4839 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 4840 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 4841 4842 sc->last_status_idx = 0; 4843 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 4844 4845 /* Set up link change interrupt generation. */ 4846 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 4847 4848 /* Program the physical address of the status block. */ 4849 REG_WR(sc, BCE_HC_STATUS_ADDR_L, 4850 BCE_ADDR_LO(sc->status_block_paddr)); 4851 REG_WR(sc, BCE_HC_STATUS_ADDR_H, 4852 BCE_ADDR_HI(sc->status_block_paddr)); 4853 4854 /* Program the physical address of the statistics block. */ 4855 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 4856 BCE_ADDR_LO(sc->stats_block_paddr)); 4857 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 4858 BCE_ADDR_HI(sc->stats_block_paddr)); 4859 4860 /* Program various host coalescing parameters. */ 4861 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4862 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); 4863 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4864 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); 4865 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 4866 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 4867 REG_WR(sc, BCE_HC_TX_TICKS, 4868 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 4869 REG_WR(sc, BCE_HC_RX_TICKS, 4870 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 4871 REG_WR(sc, BCE_HC_COM_TICKS, 4872 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 4873 REG_WR(sc, BCE_HC_CMD_TICKS, 4874 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 4875 REG_WR(sc, BCE_HC_STATS_TICKS, 4876 (sc->bce_stats_ticks & 0xffff00)); 4877 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 4878 4879 /* Configure the Host Coalescing block. */ 4880 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | 4881 BCE_HC_CONFIG_COLLECT_STATS; 4882 4883 #if 0 4884 /* ToDo: Add MSI-X support. */ 4885 if (sc->bce_flags & BCE_USING_MSIX_FLAG) { 4886 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + 4887 BCE_HC_SB_CONFIG_1; 4888 4889 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 4890 4891 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 4892 BCE_HC_SB_CONFIG_1_ONE_SHOT); 4893 4894 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 4895 (sc->tx_quick_cons_trip_int << 16) | 4896 sc->tx_quick_cons_trip); 4897 4898 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 4899 (sc->tx_ticks_int << 16) | sc->tx_ticks); 4900 4901 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4902 } 4903 4904 /* 4905 * Tell the HC block to automatically set the 4906 * INT_MASK bit after an MSI/MSI-X interrupt 4907 * is generated so the driver doesn't have to. 4908 */ 4909 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) 4910 val |= BCE_HC_CONFIG_ONE_SHOT; 4911 4912 /* Set the MSI-X status blocks to 128 byte boundaries. */ 4913 if (sc->bce_flags & BCE_USING_MSIX_FLAG) 4914 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 4915 #endif 4916 4917 REG_WR(sc, BCE_HC_CONFIG, val); 4918 4919 /* Clear the internal statistics counters. */ 4920 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 4921 4922 /* Verify that bootcode is running. */ 4923 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 4924 4925 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), 4926 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", 4927 __FILE__, __LINE__); 4928 reg = 0); 4929 4930 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 4931 BCE_DEV_INFO_SIGNATURE_MAGIC) { 4932 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " 4933 "Expected: 08%08X\n", __FILE__, __LINE__, 4934 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), 4935 BCE_DEV_INFO_SIGNATURE_MAGIC); 4936 rc = ENODEV; 4937 goto bce_blockinit_exit; 4938 } 4939 4940 /* Enable DMA */ 4941 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4942 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 4943 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 4944 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 4945 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 4946 } 4947 4948 /* Allow bootcode to apply additional fixes before enabling MAC. */ 4949 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | 4950 BCE_DRV_MSG_CODE_RESET); 4951 4952 /* Enable link state change interrupt generation. */ 4953 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 4954 4955 /* Enable the RXP. */ 4956 bce_start_rxp_cpu(sc); 4957 4958 /* Disable management frames (NC-SI) from flowing to the MCP. */ 4959 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 4960 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 4961 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 4962 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 4963 } 4964 4965 /* Enable all remaining blocks in the MAC. */ 4966 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 4967 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 4968 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4969 BCE_MISC_ENABLE_DEFAULT_XI); 4970 else 4971 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4972 BCE_MISC_ENABLE_DEFAULT); 4973 4974 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4975 DELAY(20); 4976 4977 /* Save the current host coalescing block settings. */ 4978 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 4979 4980 bce_blockinit_exit: 4981 DBEXIT(BCE_VERBOSE_RESET); 4982 4983 return (rc); 4984 } 4985 4986 4987 /****************************************************************************/ 4988 /* Encapsulate an mbuf into the rx_bd chain. */ 4989 /* */ 4990 /* Returns: */ 4991 /* 0 for success, positive value for failure. */ 4992 /****************************************************************************/ 4993 static int 4994 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 4995 u16 *chain_prod, u32 *prod_bseq) 4996 { 4997 bus_dmamap_t map; 4998 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4999 struct mbuf *m_new = NULL; 5000 struct rx_bd *rxbd; 5001 int nsegs, error, rc = 0; 5002 #ifdef BCE_DEBUG 5003 u16 debug_chain_prod = *chain_prod; 5004 #endif 5005 5006 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5007 5008 /* Make sure the inputs are valid. */ 5009 DBRUNIF((*chain_prod > MAX_RX_BD), 5010 BCE_PRINTF("%s(%d): RX producer out of range: " 5011 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5012 *chain_prod, (u16) MAX_RX_BD)); 5013 5014 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5015 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, 5016 *prod, *chain_prod, *prod_bseq); 5017 5018 /* Update some debug statistic counters */ 5019 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 5020 sc->rx_low_watermark = sc->free_rx_bd); 5021 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 5022 sc->rx_empty_count++); 5023 5024 /* Check whether this is a new mbuf allocation. */ 5025 if (m == NULL) { 5026 5027 /* Simulate an mbuf allocation failure. */ 5028 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5029 sc->mbuf_alloc_failed_count++; 5030 sc->mbuf_alloc_failed_sim_count++; 5031 rc = ENOBUFS; 5032 goto bce_get_rx_buf_exit); 5033 5034 /* This is a new mbuf allocation. */ 5035 #ifdef BCE_JUMBO_HDRSPLIT 5036 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 5037 #else 5038 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, 5039 sc->rx_bd_mbuf_alloc_size); 5040 #endif 5041 5042 if (m_new == NULL) { 5043 sc->mbuf_alloc_failed_count++; 5044 rc = ENOBUFS; 5045 goto bce_get_rx_buf_exit; 5046 } 5047 5048 DBRUN(sc->debug_rx_mbuf_alloc++); 5049 } else { 5050 /* Reuse an existing mbuf. */ 5051 m_new = m; 5052 } 5053 5054 /* Make sure we have a valid packet header. */ 5055 M_ASSERTPKTHDR(m_new); 5056 5057 /* Initialize the mbuf size and pad if necessary for alignment. */ 5058 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; 5059 m_adj(m_new, sc->rx_bd_mbuf_align_pad); 5060 5061 /* ToDo: Consider calling m_fragment() to test error handling. */ 5062 5063 /* Map the mbuf cluster into device memory. */ 5064 map = sc->rx_mbuf_map[*chain_prod]; 5065 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new, 5066 segs, &nsegs, BUS_DMA_NOWAIT); 5067 5068 /* Handle any mapping errors. */ 5069 if (error) { 5070 BCE_PRINTF("%s(%d): Error mapping mbuf into RX " 5071 "chain (%d)!\n", __FILE__, __LINE__, error); 5072 5073 sc->dma_map_addr_rx_failed_count++; 5074 m_freem(m_new); 5075 5076 DBRUN(sc->debug_rx_mbuf_alloc--); 5077 5078 rc = ENOBUFS; 5079 goto bce_get_rx_buf_exit; 5080 } 5081 5082 /* All mbufs must map to a single segment. */ 5083 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", 5084 __FUNCTION__, nsegs)); 5085 5086 /* Setup the rx_bd for the segment. */ 5087 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 5088 5089 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); 5090 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); 5091 rxbd->rx_bd_len = htole32(segs[0].ds_len); 5092 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5093 *prod_bseq += segs[0].ds_len; 5094 5095 /* Save the mbuf and update our counter. */ 5096 sc->rx_mbuf_ptr[*chain_prod] = m_new; 5097 sc->free_rx_bd -= nsegs; 5098 5099 DBRUNMSG(BCE_INSANE_RECV, 5100 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); 5101 5102 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5103 "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", 5104 __FUNCTION__, *prod, *chain_prod, *prod_bseq); 5105 5106 bce_get_rx_buf_exit: 5107 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5108 5109 return(rc); 5110 } 5111 5112 5113 #ifdef BCE_JUMBO_HDRSPLIT 5114 /****************************************************************************/ 5115 /* Encapsulate an mbuf cluster into the page chain. */ 5116 /* */ 5117 /* Returns: */ 5118 /* 0 for success, positive value for failure. */ 5119 /****************************************************************************/ 5120 static int 5121 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, 5122 u16 *prod_idx) 5123 { 5124 bus_dmamap_t map; 5125 bus_addr_t busaddr; 5126 struct mbuf *m_new = NULL; 5127 struct rx_bd *pgbd; 5128 int error, rc = 0; 5129 #ifdef BCE_DEBUG 5130 u16 debug_prod_idx = *prod_idx; 5131 #endif 5132 5133 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5134 5135 /* Make sure the inputs are valid. */ 5136 DBRUNIF((*prod_idx > MAX_PG_BD), 5137 BCE_PRINTF("%s(%d): page producer out of range: " 5138 "0x%04X > 0x%04X\n", __FILE__, __LINE__, 5139 *prod_idx, (u16) MAX_PG_BD)); 5140 5141 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " 5142 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5143 5144 /* Update counters if we've hit a new low or run out of pages. */ 5145 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), 5146 sc->pg_low_watermark = sc->free_pg_bd); 5147 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); 5148 5149 /* Check whether this is a new mbuf allocation. */ 5150 if (m == NULL) { 5151 5152 /* Simulate an mbuf allocation failure. */ 5153 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), 5154 sc->mbuf_alloc_failed_count++; 5155 sc->mbuf_alloc_failed_sim_count++; 5156 rc = ENOBUFS; 5157 goto bce_get_pg_buf_exit); 5158 5159 /* This is a new mbuf allocation. */ 5160 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0); 5161 if (m_new == NULL) { 5162 sc->mbuf_alloc_failed_count++; 5163 rc = ENOBUFS; 5164 goto bce_get_pg_buf_exit; 5165 } 5166 5167 DBRUN(sc->debug_pg_mbuf_alloc++); 5168 } else { 5169 /* Reuse an existing mbuf. */ 5170 m_new = m; 5171 m_new->m_data = m_new->m_ext.ext_buf; 5172 } 5173 5174 m_new->m_len = sc->pg_bd_mbuf_alloc_size; 5175 5176 /* ToDo: Consider calling m_fragment() to test error handling. */ 5177 5178 /* Map the mbuf cluster into device memory. */ 5179 map = sc->pg_mbuf_map[*prod_idx]; 5180 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *), 5181 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, 5182 &busaddr, BUS_DMA_NOWAIT); 5183 5184 /* Handle any mapping errors. */ 5185 if (error) { 5186 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", 5187 __FILE__, __LINE__); 5188 5189 m_freem(m_new); 5190 DBRUN(sc->debug_pg_mbuf_alloc--); 5191 5192 rc = ENOBUFS; 5193 goto bce_get_pg_buf_exit; 5194 } 5195 5196 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ 5197 5198 /* 5199 * The page chain uses the same rx_bd data structure 5200 * as the receive chain but doesn't require a byte sequence (bseq). 5201 */ 5202 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)]; 5203 5204 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr)); 5205 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr)); 5206 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size); 5207 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); 5208 5209 /* Save the mbuf and update our counter. */ 5210 sc->pg_mbuf_ptr[*prod_idx] = m_new; 5211 sc->free_pg_bd--; 5212 5213 DBRUNMSG(BCE_INSANE_RECV, 5214 bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); 5215 5216 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " 5217 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx); 5218 5219 bce_get_pg_buf_exit: 5220 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); 5221 5222 return(rc); 5223 } 5224 #endif /* BCE_JUMBO_HDRSPLIT */ 5225 5226 5227 /****************************************************************************/ 5228 /* Initialize the TX context memory. */ 5229 /* */ 5230 /* Returns: */ 5231 /* Nothing */ 5232 /****************************************************************************/ 5233 static void 5234 bce_init_tx_context(struct bce_softc *sc) 5235 { 5236 u32 val; 5237 5238 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5239 5240 /* Initialize the context ID for an L2 TX chain. */ 5241 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5242 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5243 /* Set the CID type to support an L2 connection. */ 5244 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | 5245 BCE_L2CTX_TX_TYPE_SIZE_L2_XI; 5246 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 5247 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); 5248 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5249 BCE_L2CTX_TX_CMD_TYPE_XI, val); 5250 5251 /* Point the hardware to the first page in the chain. */ 5252 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5253 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5254 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 5255 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5256 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5257 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 5258 } else { 5259 /* Set the CID type to support an L2 connection. */ 5260 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 5261 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 5262 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 5263 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 5264 5265 /* Point the hardware to the first page in the chain. */ 5266 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 5267 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5268 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 5269 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 5270 CTX_WR(sc, GET_CID_ADDR(TX_CID), 5271 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 5272 } 5273 5274 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 5275 } 5276 5277 5278 /****************************************************************************/ 5279 /* Allocate memory and initialize the TX data structures. */ 5280 /* */ 5281 /* Returns: */ 5282 /* 0 for success, positive value for failure. */ 5283 /****************************************************************************/ 5284 static int 5285 bce_init_tx_chain(struct bce_softc *sc) 5286 { 5287 struct tx_bd *txbd; 5288 int i, rc = 0; 5289 5290 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5291 5292 /* Set the initial TX producer/consumer indices. */ 5293 sc->tx_prod = 0; 5294 sc->tx_cons = 0; 5295 sc->tx_prod_bseq = 0; 5296 sc->used_tx_bd = 0; 5297 sc->max_tx_bd = USABLE_TX_BD; 5298 DBRUN(sc->tx_hi_watermark = 0); 5299 DBRUN(sc->tx_full_count = 0); 5300 5301 /* 5302 * The NetXtreme II supports a linked-list structre called 5303 * a Buffer Descriptor Chain (or BD chain). A BD chain 5304 * consists of a series of 1 or more chain pages, each of which 5305 * consists of a fixed number of BD entries. 5306 * The last BD entry on each page is a pointer to the next page 5307 * in the chain, and the last pointer in the BD chain 5308 * points back to the beginning of the chain. 5309 */ 5310 5311 /* Set the TX next pointer chain entries. */ 5312 for (i = 0; i < TX_PAGES; i++) { 5313 int j; 5314 5315 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 5316 5317 /* Check if we've reached the last page. */ 5318 if (i == (TX_PAGES - 1)) 5319 j = 0; 5320 else 5321 j = i + 1; 5322 5323 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 5324 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 5325 } 5326 5327 bce_init_tx_context(sc); 5328 5329 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 5330 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); 5331 5332 return(rc); 5333 } 5334 5335 5336 /****************************************************************************/ 5337 /* Free memory and clear the TX data structures. */ 5338 /* */ 5339 /* Returns: */ 5340 /* Nothing. */ 5341 /****************************************************************************/ 5342 static void 5343 bce_free_tx_chain(struct bce_softc *sc) 5344 { 5345 int i; 5346 5347 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5348 5349 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 5350 for (i = 0; i < TOTAL_TX_BD; i++) { 5351 if (sc->tx_mbuf_ptr[i] != NULL) { 5352 if (sc->tx_mbuf_map[i] != NULL) 5353 bus_dmamap_sync(sc->tx_mbuf_tag, 5354 sc->tx_mbuf_map[i], 5355 BUS_DMASYNC_POSTWRITE); 5356 m_freem(sc->tx_mbuf_ptr[i]); 5357 sc->tx_mbuf_ptr[i] = NULL; 5358 DBRUN(sc->debug_tx_mbuf_alloc--); 5359 } 5360 } 5361 5362 /* Clear each TX chain page. */ 5363 for (i = 0; i < TX_PAGES; i++) 5364 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 5365 5366 sc->used_tx_bd = 0; 5367 5368 /* Check if we lost any mbufs in the process. */ 5369 DBRUNIF((sc->debug_tx_mbuf_alloc), 5370 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " 5371 "from tx chain!\n", __FILE__, __LINE__, 5372 sc->debug_tx_mbuf_alloc)); 5373 5374 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); 5375 } 5376 5377 5378 /****************************************************************************/ 5379 /* Initialize the RX context memory. */ 5380 /* */ 5381 /* Returns: */ 5382 /* Nothing */ 5383 /****************************************************************************/ 5384 static void 5385 bce_init_rx_context(struct bce_softc *sc) 5386 { 5387 u32 val; 5388 5389 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5390 5391 /* Init the type, size, and BD cache levels for the RX context. */ 5392 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 5393 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | 5394 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); 5395 5396 /* 5397 * Set the level for generating pause frames 5398 * when the number of available rx_bd's gets 5399 * too low (the low watermark) and the level 5400 * when pause frames can be stopped (the high 5401 * watermark). 5402 */ 5403 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5404 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5405 u32 lo_water, hi_water; 5406 5407 if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { 5408 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 5409 } else { 5410 lo_water = 0; 5411 } 5412 5413 if (lo_water >= USABLE_RX_BD) { 5414 lo_water = 0; 5415 } 5416 5417 hi_water = USABLE_RX_BD / 4; 5418 5419 if (hi_water <= lo_water) { 5420 lo_water = 0; 5421 } 5422 5423 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 5424 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 5425 5426 if (hi_water > 0xf) 5427 hi_water = 0xf; 5428 else if (hi_water == 0) 5429 lo_water = 0; 5430 5431 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | 5432 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 5433 } 5434 5435 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 5436 5437 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 5438 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5439 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 5440 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 5441 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 5442 } 5443 5444 /* Point the hardware to the first page in the chain. */ 5445 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 5446 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 5447 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 5448 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 5449 5450 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); 5451 } 5452 5453 5454 /****************************************************************************/ 5455 /* Allocate memory and initialize the RX data structures. */ 5456 /* */ 5457 /* Returns: */ 5458 /* 0 for success, positive value for failure. */ 5459 /****************************************************************************/ 5460 static int 5461 bce_init_rx_chain(struct bce_softc *sc) 5462 { 5463 struct rx_bd *rxbd; 5464 int i, rc = 0; 5465 5466 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5467 BCE_VERBOSE_CTX); 5468 5469 /* Initialize the RX producer and consumer indices. */ 5470 sc->rx_prod = 0; 5471 sc->rx_cons = 0; 5472 sc->rx_prod_bseq = 0; 5473 sc->free_rx_bd = USABLE_RX_BD; 5474 sc->max_rx_bd = USABLE_RX_BD; 5475 5476 /* Initialize the RX next pointer chain entries. */ 5477 for (i = 0; i < RX_PAGES; i++) { 5478 int j; 5479 5480 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 5481 5482 /* Check if we've reached the last page. */ 5483 if (i == (RX_PAGES - 1)) 5484 j = 0; 5485 else 5486 j = i + 1; 5487 5488 /* Setup the chain page pointers. */ 5489 rxbd->rx_bd_haddr_hi = 5490 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 5491 rxbd->rx_bd_haddr_lo = 5492 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 5493 } 5494 5495 /* Fill up the RX chain. */ 5496 bce_fill_rx_chain(sc); 5497 5498 DBRUN(sc->rx_low_watermark = USABLE_RX_BD); 5499 DBRUN(sc->rx_empty_count = 0); 5500 for (i = 0; i < RX_PAGES; i++) { 5501 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], 5502 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5503 } 5504 5505 bce_init_rx_context(sc); 5506 5507 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD)); 5508 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5509 BCE_VERBOSE_CTX); 5510 5511 /* ToDo: Are there possible failure modes here? */ 5512 5513 return(rc); 5514 } 5515 5516 5517 /****************************************************************************/ 5518 /* Add mbufs to the RX chain until its full or an mbuf allocation error */ 5519 /* occurs. */ 5520 /* */ 5521 /* Returns: */ 5522 /* Nothing */ 5523 /****************************************************************************/ 5524 static void 5525 bce_fill_rx_chain(struct bce_softc *sc) 5526 { 5527 u16 prod, prod_idx; 5528 u32 prod_bseq; 5529 5530 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5531 BCE_VERBOSE_CTX); 5532 5533 /* Get the RX chain producer indices. */ 5534 prod = sc->rx_prod; 5535 prod_bseq = sc->rx_prod_bseq; 5536 5537 /* Keep filling the RX chain until it's full. */ 5538 while (sc->free_rx_bd > 0) { 5539 prod_idx = RX_CHAIN_IDX(prod); 5540 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) { 5541 /* Bail out if we can't add an mbuf to the chain. */ 5542 break; 5543 } 5544 prod = NEXT_RX_BD(prod); 5545 } 5546 5547 /* Save the RX chain producer indices. */ 5548 sc->rx_prod = prod; 5549 sc->rx_prod_bseq = prod_bseq; 5550 5551 /* We should never end up pointing to a next page pointer. */ 5552 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5553 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", 5554 __FUNCTION__, sc->rx_prod)); 5555 5556 /* Write the mailbox and tell the chip about the waiting rx_bd's. */ 5557 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5558 BCE_L2MQ_RX_HOST_BDIDX, sc->rx_prod); 5559 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + 5560 BCE_L2MQ_RX_HOST_BSEQ, sc->rx_prod_bseq); 5561 5562 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5563 BCE_VERBOSE_CTX); 5564 } 5565 5566 5567 /****************************************************************************/ 5568 /* Free memory and clear the RX data structures. */ 5569 /* */ 5570 /* Returns: */ 5571 /* Nothing. */ 5572 /****************************************************************************/ 5573 static void 5574 bce_free_rx_chain(struct bce_softc *sc) 5575 { 5576 int i; 5577 5578 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5579 5580 /* Free any mbufs still in the RX mbuf chain. */ 5581 for (i = 0; i < TOTAL_RX_BD; i++) { 5582 if (sc->rx_mbuf_ptr[i] != NULL) { 5583 if (sc->rx_mbuf_map[i] != NULL) 5584 bus_dmamap_sync(sc->rx_mbuf_tag, 5585 sc->rx_mbuf_map[i], 5586 BUS_DMASYNC_POSTREAD); 5587 m_freem(sc->rx_mbuf_ptr[i]); 5588 sc->rx_mbuf_ptr[i] = NULL; 5589 DBRUN(sc->debug_rx_mbuf_alloc--); 5590 } 5591 } 5592 5593 /* Clear each RX chain page. */ 5594 for (i = 0; i < RX_PAGES; i++) 5595 if (sc->rx_bd_chain[i] != NULL) { 5596 bzero((char *)sc->rx_bd_chain[i], 5597 BCE_RX_CHAIN_PAGE_SZ); 5598 } 5599 5600 sc->free_rx_bd = sc->max_rx_bd; 5601 5602 /* Check if we lost any mbufs in the process. */ 5603 DBRUNIF((sc->debug_rx_mbuf_alloc), 5604 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", 5605 __FUNCTION__, sc->debug_rx_mbuf_alloc)); 5606 5607 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5608 } 5609 5610 5611 #ifdef BCE_JUMBO_HDRSPLIT 5612 /****************************************************************************/ 5613 /* Allocate memory and initialize the page data structures. */ 5614 /* Assumes that bce_init_rx_chain() has not already been called. */ 5615 /* */ 5616 /* Returns: */ 5617 /* 0 for success, positive value for failure. */ 5618 /****************************************************************************/ 5619 static int 5620 bce_init_pg_chain(struct bce_softc *sc) 5621 { 5622 struct rx_bd *pgbd; 5623 int i, rc = 0; 5624 u32 val; 5625 5626 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5627 BCE_VERBOSE_CTX); 5628 5629 /* Initialize the page producer and consumer indices. */ 5630 sc->pg_prod = 0; 5631 sc->pg_cons = 0; 5632 sc->free_pg_bd = USABLE_PG_BD; 5633 sc->max_pg_bd = USABLE_PG_BD; 5634 DBRUN(sc->pg_low_watermark = sc->max_pg_bd); 5635 DBRUN(sc->pg_empty_count = 0); 5636 5637 /* Initialize the page next pointer chain entries. */ 5638 for (i = 0; i < PG_PAGES; i++) { 5639 int j; 5640 5641 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; 5642 5643 /* Check if we've reached the last page. */ 5644 if (i == (PG_PAGES - 1)) 5645 j = 0; 5646 else 5647 j = i + 1; 5648 5649 /* Setup the chain page pointers. */ 5650 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); 5651 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); 5652 } 5653 5654 /* Setup the MQ BIN mapping for host_pg_bidx. */ 5655 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 5656 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 5657 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); 5658 5659 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); 5660 5661 /* Configure the rx_bd and page chain mbuf cluster size. */ 5662 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size; 5663 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); 5664 5665 /* Configure the context reserved for jumbo support. */ 5666 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, 5667 BCE_L2CTX_RX_RBDC_JUMBO_KEY); 5668 5669 /* Point the hardware to the first page in the page chain. */ 5670 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); 5671 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); 5672 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); 5673 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); 5674 5675 /* Fill up the page chain. */ 5676 bce_fill_pg_chain(sc); 5677 5678 for (i = 0; i < PG_PAGES; i++) { 5679 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], 5680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 5681 } 5682 5683 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD)); 5684 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | 5685 BCE_VERBOSE_CTX); 5686 return(rc); 5687 } 5688 5689 5690 /****************************************************************************/ 5691 /* Add mbufs to the page chain until its full or an mbuf allocation error */ 5692 /* occurs. */ 5693 /* */ 5694 /* Returns: */ 5695 /* Nothing */ 5696 /****************************************************************************/ 5697 static void 5698 bce_fill_pg_chain(struct bce_softc *sc) 5699 { 5700 u16 prod, prod_idx; 5701 5702 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5703 BCE_VERBOSE_CTX); 5704 5705 /* Get the page chain prodcuer index. */ 5706 prod = sc->pg_prod; 5707 5708 /* Keep filling the page chain until it's full. */ 5709 while (sc->free_pg_bd > 0) { 5710 prod_idx = PG_CHAIN_IDX(prod); 5711 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) { 5712 /* Bail out if we can't add an mbuf to the chain. */ 5713 break; 5714 } 5715 prod = NEXT_PG_BD(prod); 5716 } 5717 5718 /* Save the page chain producer index. */ 5719 sc->pg_prod = prod; 5720 5721 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), 5722 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", 5723 __FUNCTION__, sc->pg_prod)); 5724 5725 /* 5726 * Write the mailbox and tell the chip about 5727 * the new rx_bd's in the page chain. 5728 */ 5729 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + 5730 BCE_L2MQ_RX_HOST_PG_BDIDX, sc->pg_prod); 5731 5732 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | 5733 BCE_VERBOSE_CTX); 5734 } 5735 5736 5737 /****************************************************************************/ 5738 /* Free memory and clear the RX data structures. */ 5739 /* */ 5740 /* Returns: */ 5741 /* Nothing. */ 5742 /****************************************************************************/ 5743 static void 5744 bce_free_pg_chain(struct bce_softc *sc) 5745 { 5746 int i; 5747 5748 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5749 5750 /* Free any mbufs still in the mbuf page chain. */ 5751 for (i = 0; i < TOTAL_PG_BD; i++) { 5752 if (sc->pg_mbuf_ptr[i] != NULL) { 5753 if (sc->pg_mbuf_map[i] != NULL) 5754 bus_dmamap_sync(sc->pg_mbuf_tag, 5755 sc->pg_mbuf_map[i], 5756 BUS_DMASYNC_POSTREAD); 5757 m_freem(sc->pg_mbuf_ptr[i]); 5758 sc->pg_mbuf_ptr[i] = NULL; 5759 DBRUN(sc->debug_pg_mbuf_alloc--); 5760 } 5761 } 5762 5763 /* Clear each page chain pages. */ 5764 for (i = 0; i < PG_PAGES; i++) 5765 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); 5766 5767 sc->free_pg_bd = sc->max_pg_bd; 5768 5769 /* Check if we lost any mbufs in the process. */ 5770 DBRUNIF((sc->debug_pg_mbuf_alloc), 5771 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", 5772 __FUNCTION__, sc->debug_pg_mbuf_alloc)); 5773 5774 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); 5775 } 5776 #endif /* BCE_JUMBO_HDRSPLIT */ 5777 5778 5779 /****************************************************************************/ 5780 /* Set media options. */ 5781 /* */ 5782 /* Returns: */ 5783 /* 0 for success, positive value for failure. */ 5784 /****************************************************************************/ 5785 static int 5786 bce_ifmedia_upd(struct ifnet *ifp) 5787 { 5788 struct bce_softc *sc = ifp->if_softc; 5789 int error; 5790 5791 DBENTER(BCE_VERBOSE); 5792 5793 BCE_LOCK(sc); 5794 error = bce_ifmedia_upd_locked(ifp); 5795 BCE_UNLOCK(sc); 5796 5797 DBEXIT(BCE_VERBOSE); 5798 return (error); 5799 } 5800 5801 5802 /****************************************************************************/ 5803 /* Set media options. */ 5804 /* */ 5805 /* Returns: */ 5806 /* Nothing. */ 5807 /****************************************************************************/ 5808 static int 5809 bce_ifmedia_upd_locked(struct ifnet *ifp) 5810 { 5811 struct bce_softc *sc = ifp->if_softc; 5812 struct mii_data *mii; 5813 int error; 5814 5815 DBENTER(BCE_VERBOSE_PHY); 5816 5817 error = 0; 5818 BCE_LOCK_ASSERT(sc); 5819 5820 mii = device_get_softc(sc->bce_miibus); 5821 5822 /* Make sure the MII bus has been enumerated. */ 5823 if (mii) { 5824 sc->bce_link_up = FALSE; 5825 if (mii->mii_instance) { 5826 struct mii_softc *miisc; 5827 5828 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 5829 mii_phy_reset(miisc); 5830 } 5831 error = mii_mediachg(mii); 5832 } 5833 5834 DBEXIT(BCE_VERBOSE_PHY); 5835 return (error); 5836 } 5837 5838 5839 /****************************************************************************/ 5840 /* Reports current media status. */ 5841 /* */ 5842 /* Returns: */ 5843 /* Nothing. */ 5844 /****************************************************************************/ 5845 static void 5846 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5847 { 5848 struct bce_softc *sc = ifp->if_softc; 5849 struct mii_data *mii; 5850 5851 DBENTER(BCE_VERBOSE_PHY); 5852 5853 BCE_LOCK(sc); 5854 5855 if ((ifp->if_flags & IFF_UP) == 0) { 5856 BCE_UNLOCK(sc); 5857 return; 5858 } 5859 mii = device_get_softc(sc->bce_miibus); 5860 5861 mii_pollstat(mii); 5862 ifmr->ifm_active = mii->mii_media_active; 5863 ifmr->ifm_status = mii->mii_media_status; 5864 5865 BCE_UNLOCK(sc); 5866 5867 DBEXIT(BCE_VERBOSE_PHY); 5868 } 5869 5870 5871 /****************************************************************************/ 5872 /* Handles PHY generated interrupt events. */ 5873 /* */ 5874 /* Returns: */ 5875 /* Nothing. */ 5876 /****************************************************************************/ 5877 static void 5878 bce_phy_intr(struct bce_softc *sc) 5879 { 5880 u32 new_link_state, old_link_state; 5881 5882 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5883 5884 DBRUN(sc->phy_interrupts++); 5885 5886 new_link_state = sc->status_block->status_attn_bits & 5887 STATUS_ATTN_BITS_LINK_STATE; 5888 old_link_state = sc->status_block->status_attn_bits_ack & 5889 STATUS_ATTN_BITS_LINK_STATE; 5890 5891 /* Handle any changes if the link state has changed. */ 5892 if (new_link_state != old_link_state) { 5893 5894 /* Update the status_attn_bits_ack field. */ 5895 if (new_link_state) { 5896 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 5897 STATUS_ATTN_BITS_LINK_STATE); 5898 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", 5899 __FUNCTION__); 5900 } 5901 else { 5902 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 5903 STATUS_ATTN_BITS_LINK_STATE); 5904 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", 5905 __FUNCTION__); 5906 } 5907 5908 /* 5909 * Assume link is down and allow 5910 * tick routine to update the state 5911 * based on the actual media state. 5912 */ 5913 sc->bce_link_up = FALSE; 5914 callout_stop(&sc->bce_tick_callout); 5915 bce_tick(sc); 5916 } 5917 5918 /* Acknowledge the link change interrupt. */ 5919 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 5920 5921 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); 5922 } 5923 5924 5925 /****************************************************************************/ 5926 /* Reads the receive consumer value from the status block (skipping over */ 5927 /* chain page pointer if necessary). */ 5928 /* */ 5929 /* Returns: */ 5930 /* hw_cons */ 5931 /****************************************************************************/ 5932 static inline u16 5933 bce_get_hw_rx_cons(struct bce_softc *sc) 5934 { 5935 u16 hw_cons; 5936 5937 rmb(); 5938 hw_cons = sc->status_block->status_rx_quick_consumer_index0; 5939 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5940 hw_cons++; 5941 5942 return hw_cons; 5943 } 5944 5945 /****************************************************************************/ 5946 /* Handles received frame interrupt events. */ 5947 /* */ 5948 /* Returns: */ 5949 /* Nothing. */ 5950 /****************************************************************************/ 5951 static void 5952 bce_rx_intr(struct bce_softc *sc) 5953 { 5954 struct ifnet *ifp = sc->bce_ifp; 5955 struct l2_fhdr *l2fhdr; 5956 struct ether_vlan_header *vh; 5957 unsigned int pkt_len; 5958 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; 5959 u32 status; 5960 #ifdef BCE_JUMBO_HDRSPLIT 5961 unsigned int rem_len; 5962 u16 sw_pg_cons, sw_pg_cons_idx; 5963 #endif 5964 5965 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 5966 DBRUN(sc->interrupts_rx++); 5967 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " 5968 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 5969 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 5970 5971 /* Prepare the RX chain pages to be accessed by the host CPU. */ 5972 for (int i = 0; i < RX_PAGES; i++) 5973 bus_dmamap_sync(sc->rx_bd_chain_tag, 5974 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5975 5976 #ifdef BCE_JUMBO_HDRSPLIT 5977 /* Prepare the page chain pages to be accessed by the host CPU. */ 5978 for (int i = 0; i < PG_PAGES; i++) 5979 bus_dmamap_sync(sc->pg_bd_chain_tag, 5980 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 5981 #endif 5982 5983 /* Get the hardware's view of the RX consumer index. */ 5984 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 5985 5986 /* Get working copies of the driver's view of the consumer indices. */ 5987 sw_rx_cons = sc->rx_cons; 5988 5989 #ifdef BCE_JUMBO_HDRSPLIT 5990 sw_pg_cons = sc->pg_cons; 5991 #endif 5992 5993 /* Update some debug statistics counters */ 5994 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 5995 sc->rx_low_watermark = sc->free_rx_bd); 5996 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), 5997 sc->rx_empty_count++); 5998 5999 /* Scan through the receive chain as long as there is work to do */ 6000 /* ToDo: Consider setting a limit on the number of packets processed. */ 6001 rmb(); 6002 while (sw_rx_cons != hw_rx_cons) { 6003 struct mbuf *m0; 6004 6005 /* Convert the producer/consumer indices to an actual rx_bd index. */ 6006 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); 6007 6008 /* Unmap the mbuf from DMA space. */ 6009 bus_dmamap_sync(sc->rx_mbuf_tag, 6010 sc->rx_mbuf_map[sw_rx_cons_idx], 6011 BUS_DMASYNC_POSTREAD); 6012 bus_dmamap_unload(sc->rx_mbuf_tag, 6013 sc->rx_mbuf_map[sw_rx_cons_idx]); 6014 6015 /* Remove the mbuf from the RX chain. */ 6016 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; 6017 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; 6018 DBRUN(sc->debug_rx_mbuf_alloc--); 6019 sc->free_rx_bd++; 6020 6021 if(m0 == NULL) { 6022 DBPRINT(sc, BCE_EXTREME_RECV, 6023 "%s(): Oops! Empty mbuf pointer " 6024 "found in sc->rx_mbuf_ptr[0x%04X]!\n", 6025 __FUNCTION__, sw_rx_cons_idx); 6026 goto bce_rx_int_next_rx; 6027 } 6028 6029 /* 6030 * Frames received on the NetXteme II are prepended 6031 * with an l2_fhdr structure which provides status 6032 * information about the received frame (including 6033 * VLAN tags and checksum info). The frames are 6034 * also automatically adjusted to align the IP 6035 * header (i.e. two null bytes are inserted before 6036 * the Ethernet header). As a result the data 6037 * DMA'd by the controller into the mbuf looks 6038 * like this: 6039 * 6040 * +---------+-----+---------------------+-----+ 6041 * | l2_fhdr | pad | packet data | FCS | 6042 * +---------+-----+---------------------+-----+ 6043 * 6044 * The l2_fhdr needs to be checked and skipped and 6045 * the FCS needs to be stripped before sending the 6046 * packet up the stack. 6047 */ 6048 l2fhdr = mtod(m0, struct l2_fhdr *); 6049 6050 /* Get the packet data + FCS length and the status. */ 6051 pkt_len = l2fhdr->l2_fhdr_pkt_len; 6052 status = l2fhdr->l2_fhdr_status; 6053 6054 /* 6055 * Skip over the l2_fhdr and pad, resulting in the 6056 * following data in the mbuf: 6057 * +---------------------+-----+ 6058 * | packet data | FCS | 6059 * +---------------------+-----+ 6060 */ 6061 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); 6062 6063 #ifdef BCE_JUMBO_HDRSPLIT 6064 /* 6065 * Check whether the received frame fits in a single 6066 * mbuf or not (i.e. packet data + FCS <= 6067 * sc->rx_bd_mbuf_data_len bytes). 6068 */ 6069 if (pkt_len > m0->m_len) { 6070 /* 6071 * The received frame is larger than a single mbuf. 6072 * If the frame was a TCP frame then only the TCP 6073 * header is placed in the mbuf, the remaining 6074 * payload (including FCS) is placed in the page 6075 * chain, the SPLIT flag is set, and the header 6076 * length is placed in the IP checksum field. 6077 * If the frame is not a TCP frame then the mbuf 6078 * is filled and the remaining bytes are placed 6079 * in the page chain. 6080 */ 6081 6082 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " 6083 "packet.\n", __FUNCTION__); 6084 6085 /* 6086 * When the page chain is enabled and the TCP 6087 * header has been split from the TCP payload, 6088 * the ip_xsum structure will reflect the length 6089 * of the TCP header, not the IP checksum. Set 6090 * the packet length of the mbuf accordingly. 6091 */ 6092 if (status & L2_FHDR_STATUS_SPLIT) 6093 m0->m_len = l2fhdr->l2_fhdr_ip_xsum; 6094 6095 rem_len = pkt_len - m0->m_len; 6096 6097 /* Pull mbufs off the page chain for the remaining data. */ 6098 while (rem_len > 0) { 6099 struct mbuf *m_pg; 6100 6101 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); 6102 6103 /* Remove the mbuf from the page chain. */ 6104 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; 6105 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; 6106 DBRUN(sc->debug_pg_mbuf_alloc--); 6107 sc->free_pg_bd++; 6108 6109 /* Unmap the page chain mbuf from DMA space. */ 6110 bus_dmamap_sync(sc->pg_mbuf_tag, 6111 sc->pg_mbuf_map[sw_pg_cons_idx], 6112 BUS_DMASYNC_POSTREAD); 6113 bus_dmamap_unload(sc->pg_mbuf_tag, 6114 sc->pg_mbuf_map[sw_pg_cons_idx]); 6115 6116 /* Adjust the mbuf length. */ 6117 if (rem_len < m_pg->m_len) { 6118 /* The mbuf chain is complete. */ 6119 m_pg->m_len = rem_len; 6120 rem_len = 0; 6121 } else { 6122 /* More packet data is waiting. */ 6123 rem_len -= m_pg->m_len; 6124 } 6125 6126 /* Concatenate the mbuf cluster to the mbuf. */ 6127 m_cat(m0, m_pg); 6128 6129 sw_pg_cons = NEXT_PG_BD(sw_pg_cons); 6130 } 6131 6132 /* Set the total packet length. */ 6133 m0->m_pkthdr.len = pkt_len; 6134 6135 } else { 6136 /* 6137 * The received packet is small and fits in a 6138 * single mbuf (i.e. the l2_fhdr + pad + packet + 6139 * FCS <= MHLEN). In other words, the packet is 6140 * 154 bytes or less in size. 6141 */ 6142 6143 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " 6144 "packet.\n", __FUNCTION__); 6145 6146 /* Set the total packet length. */ 6147 m0->m_pkthdr.len = m0->m_len = pkt_len; 6148 } 6149 #else 6150 /* Set the total packet length. */ 6151 m0->m_pkthdr.len = m0->m_len = pkt_len; 6152 #endif 6153 6154 /* Remove the trailing Ethernet FCS. */ 6155 m_adj(m0, -ETHER_CRC_LEN); 6156 6157 /* Check that the resulting mbuf chain is valid. */ 6158 DBRUN(m_sanity(m0, FALSE)); 6159 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | 6160 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), 6161 BCE_PRINTF("Invalid Ethernet frame size!\n"); 6162 m_print(m0, 128)); 6163 6164 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), 6165 sc->l2fhdr_error_sim_count++; 6166 status = status | L2_FHDR_ERRORS_PHY_DECODE); 6167 6168 /* Check the received frame for errors. */ 6169 if (status & (L2_FHDR_ERRORS_BAD_CRC | 6170 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 6171 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { 6172 6173 /* Log the error and release the mbuf. */ 6174 ifp->if_ierrors++; 6175 sc->l2fhdr_error_count++; 6176 6177 m_freem(m0); 6178 m0 = NULL; 6179 goto bce_rx_int_next_rx; 6180 } 6181 6182 /* Send the packet to the appropriate interface. */ 6183 m0->m_pkthdr.rcvif = ifp; 6184 6185 /* Assume no hardware checksum. */ 6186 m0->m_pkthdr.csum_flags = 0; 6187 6188 /* Validate the checksum if offload enabled. */ 6189 if (ifp->if_capenable & IFCAP_RXCSUM) { 6190 6191 /* Check for an IP datagram. */ 6192 if (!(status & L2_FHDR_STATUS_SPLIT) && 6193 (status & L2_FHDR_STATUS_IP_DATAGRAM)) { 6194 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 6195 DBRUN(sc->csum_offload_ip++); 6196 /* Check if the IP checksum is valid. */ 6197 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 6198 m0->m_pkthdr.csum_flags |= 6199 CSUM_IP_VALID; 6200 } 6201 6202 /* Check for a valid TCP/UDP frame. */ 6203 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 6204 L2_FHDR_STATUS_UDP_DATAGRAM)) { 6205 6206 /* Check for a good TCP/UDP checksum. */ 6207 if ((status & (L2_FHDR_ERRORS_TCP_XSUM | 6208 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 6209 DBRUN(sc->csum_offload_tcp_udp++); 6210 m0->m_pkthdr.csum_data = 6211 l2fhdr->l2_fhdr_tcp_udp_xsum; 6212 m0->m_pkthdr.csum_flags |= 6213 (CSUM_DATA_VALID 6214 | CSUM_PSEUDO_HDR); 6215 } 6216 } 6217 } 6218 6219 /* Attach the VLAN tag. */ 6220 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 6221 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 6222 #if __FreeBSD_version < 700000 6223 VLAN_INPUT_TAG(ifp, m0, 6224 l2fhdr->l2_fhdr_vlan_tag, continue); 6225 #else 6226 m0->m_pkthdr.ether_vtag = 6227 l2fhdr->l2_fhdr_vlan_tag; 6228 m0->m_flags |= M_VLANTAG; 6229 #endif 6230 } else { 6231 /* 6232 * bce(4) controllers can't disable VLAN 6233 * tag stripping if management firmware 6234 * (ASF/IPMI/UMP) is running. So we always 6235 * strip VLAN tag and manually reconstruct 6236 * the VLAN frame by appending stripped 6237 * VLAN tag in driver if VLAN tag stripping 6238 * was disabled. 6239 * 6240 * TODO: LLC SNAP handling. 6241 */ 6242 bcopy(mtod(m0, uint8_t *), 6243 mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, 6244 ETHER_ADDR_LEN * 2); 6245 m0->m_data -= ETHER_VLAN_ENCAP_LEN; 6246 vh = mtod(m0, struct ether_vlan_header *); 6247 vh->evl_encap_proto = htons(ETHERTYPE_VLAN); 6248 vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); 6249 m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; 6250 m0->m_len += ETHER_VLAN_ENCAP_LEN; 6251 } 6252 } 6253 6254 /* Increment received packet statistics. */ 6255 ifp->if_ipackets++; 6256 6257 bce_rx_int_next_rx: 6258 sw_rx_cons = NEXT_RX_BD(sw_rx_cons); 6259 6260 /* If we have a packet, pass it up the stack */ 6261 if (m0) { 6262 /* Make sure we don't lose our place when we release the lock. */ 6263 sc->rx_cons = sw_rx_cons; 6264 #ifdef BCE_JUMBO_HDRSPLIT 6265 sc->pg_cons = sw_pg_cons; 6266 #endif 6267 6268 BCE_UNLOCK(sc); 6269 (*ifp->if_input)(ifp, m0); 6270 BCE_LOCK(sc); 6271 6272 /* Recover our place. */ 6273 sw_rx_cons = sc->rx_cons; 6274 #ifdef BCE_JUMBO_HDRSPLIT 6275 sw_pg_cons = sc->pg_cons; 6276 #endif 6277 } 6278 6279 /* Refresh hw_cons to see if there's new work */ 6280 if (sw_rx_cons == hw_rx_cons) 6281 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 6282 } 6283 6284 #ifdef BCE_JUMBO_HDRSPLIT 6285 /* No new packets. Refill the page chain. */ 6286 sc->pg_cons = sw_pg_cons; 6287 bce_fill_pg_chain(sc); 6288 #endif 6289 6290 /* No new packets. Refill the RX chain. */ 6291 sc->rx_cons = sw_rx_cons; 6292 bce_fill_rx_chain(sc); 6293 6294 /* Prepare the page chain pages to be accessed by the NIC. */ 6295 for (int i = 0; i < RX_PAGES; i++) 6296 bus_dmamap_sync(sc->rx_bd_chain_tag, 6297 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6298 6299 #ifdef BCE_JUMBO_HDRSPLIT 6300 for (int i = 0; i < PG_PAGES; i++) 6301 bus_dmamap_sync(sc->pg_bd_chain_tag, 6302 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 6303 #endif 6304 6305 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " 6306 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 6307 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 6308 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 6309 } 6310 6311 6312 /****************************************************************************/ 6313 /* Reads the transmit consumer value from the status block (skipping over */ 6314 /* chain page pointer if necessary). */ 6315 /* */ 6316 /* Returns: */ 6317 /* hw_cons */ 6318 /****************************************************************************/ 6319 static inline u16 6320 bce_get_hw_tx_cons(struct bce_softc *sc) 6321 { 6322 u16 hw_cons; 6323 6324 mb(); 6325 hw_cons = sc->status_block->status_tx_quick_consumer_index0; 6326 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6327 hw_cons++; 6328 6329 return hw_cons; 6330 } 6331 6332 6333 /****************************************************************************/ 6334 /* Handles transmit completion interrupt events. */ 6335 /* */ 6336 /* Returns: */ 6337 /* Nothing. */ 6338 /****************************************************************************/ 6339 static void 6340 bce_tx_intr(struct bce_softc *sc) 6341 { 6342 struct ifnet *ifp = sc->bce_ifp; 6343 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 6344 6345 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6346 DBRUN(sc->interrupts_tx++); 6347 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " 6348 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6349 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6350 6351 BCE_LOCK_ASSERT(sc); 6352 6353 /* Get the hardware's view of the TX consumer index. */ 6354 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6355 sw_tx_cons = sc->tx_cons; 6356 6357 /* Prevent speculative reads of the status block. */ 6358 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6359 BUS_SPACE_BARRIER_READ); 6360 6361 /* Cycle through any completed TX chain page entries. */ 6362 while (sw_tx_cons != hw_tx_cons) { 6363 #ifdef BCE_DEBUG 6364 struct tx_bd *txbd = NULL; 6365 #endif 6366 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 6367 6368 DBPRINT(sc, BCE_INFO_SEND, 6369 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 6370 "sw_tx_chain_cons = 0x%04X\n", 6371 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 6372 6373 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 6374 BCE_PRINTF("%s(%d): TX chain consumer out of range! " 6375 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, 6376 (int) MAX_TX_BD); 6377 bce_breakpoint(sc)); 6378 6379 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 6380 [TX_IDX(sw_tx_chain_cons)]); 6381 6382 DBRUNIF((txbd == NULL), 6383 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 6384 __FILE__, __LINE__, sw_tx_chain_cons); 6385 bce_breakpoint(sc)); 6386 6387 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); 6388 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 6389 6390 /* 6391 * Free the associated mbuf. Remember 6392 * that only the last tx_bd of a packet 6393 * has an mbuf pointer and DMA map. 6394 */ 6395 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 6396 6397 /* Validate that this is the last tx_bd. */ 6398 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 6399 BCE_PRINTF("%s(%d): tx_bd END flag not set but " 6400 "txmbuf == NULL!\n", __FILE__, __LINE__); 6401 bce_breakpoint(sc)); 6402 6403 DBRUNMSG(BCE_INFO_SEND, 6404 BCE_PRINTF("%s(): Unloading map/freeing mbuf " 6405 "from tx_bd[0x%04X]\n", __FUNCTION__, 6406 sw_tx_chain_cons)); 6407 6408 /* Unmap the mbuf. */ 6409 bus_dmamap_unload(sc->tx_mbuf_tag, 6410 sc->tx_mbuf_map[sw_tx_chain_cons]); 6411 6412 /* Free the mbuf. */ 6413 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 6414 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 6415 DBRUN(sc->debug_tx_mbuf_alloc--); 6416 6417 ifp->if_opackets++; 6418 } 6419 6420 sc->used_tx_bd--; 6421 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 6422 6423 /* Refresh hw_cons to see if there's new work. */ 6424 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 6425 6426 /* Prevent speculative reads of the status block. */ 6427 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 6428 BUS_SPACE_BARRIER_READ); 6429 } 6430 6431 /* Clear the TX timeout timer. */ 6432 sc->watchdog_timer = 0; 6433 6434 /* Clear the tx hardware queue full flag. */ 6435 if (sc->used_tx_bd < sc->max_tx_bd) { 6436 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), 6437 DBPRINT(sc, BCE_INFO_SEND, 6438 "%s(): Open TX chain! %d/%d (used/total)\n", 6439 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); 6440 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6441 } 6442 6443 sc->tx_cons = sw_tx_cons; 6444 6445 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " 6446 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", 6447 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); 6448 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); 6449 } 6450 6451 6452 /****************************************************************************/ 6453 /* Disables interrupt generation. */ 6454 /* */ 6455 /* Returns: */ 6456 /* Nothing. */ 6457 /****************************************************************************/ 6458 static void 6459 bce_disable_intr(struct bce_softc *sc) 6460 { 6461 DBENTER(BCE_VERBOSE_INTR); 6462 6463 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 6464 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 6465 6466 DBEXIT(BCE_VERBOSE_INTR); 6467 } 6468 6469 6470 /****************************************************************************/ 6471 /* Enables interrupt generation. */ 6472 /* */ 6473 /* Returns: */ 6474 /* Nothing. */ 6475 /****************************************************************************/ 6476 static void 6477 bce_enable_intr(struct bce_softc *sc, int coal_now) 6478 { 6479 DBENTER(BCE_VERBOSE_INTR); 6480 6481 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6482 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 6483 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 6484 6485 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 6486 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 6487 6488 /* Force an immediate interrupt (whether there is new data or not). */ 6489 if (coal_now) 6490 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 6491 6492 DBEXIT(BCE_VERBOSE_INTR); 6493 } 6494 6495 6496 /****************************************************************************/ 6497 /* Handles controller initialization. */ 6498 /* */ 6499 /* Returns: */ 6500 /* Nothing. */ 6501 /****************************************************************************/ 6502 static void 6503 bce_init_locked(struct bce_softc *sc) 6504 { 6505 struct ifnet *ifp; 6506 u32 ether_mtu = 0; 6507 6508 DBENTER(BCE_VERBOSE_RESET); 6509 6510 BCE_LOCK_ASSERT(sc); 6511 6512 ifp = sc->bce_ifp; 6513 6514 /* Check if the driver is still running and bail out if it is. */ 6515 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6516 goto bce_init_locked_exit; 6517 6518 bce_stop(sc); 6519 6520 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { 6521 BCE_PRINTF("%s(%d): Controller reset failed!\n", 6522 __FILE__, __LINE__); 6523 goto bce_init_locked_exit; 6524 } 6525 6526 if (bce_chipinit(sc)) { 6527 BCE_PRINTF("%s(%d): Controller initialization failed!\n", 6528 __FILE__, __LINE__); 6529 goto bce_init_locked_exit; 6530 } 6531 6532 if (bce_blockinit(sc)) { 6533 BCE_PRINTF("%s(%d): Block initialization failed!\n", 6534 __FILE__, __LINE__); 6535 goto bce_init_locked_exit; 6536 } 6537 6538 /* Load our MAC address. */ 6539 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); 6540 bce_set_mac_addr(sc); 6541 6542 /* 6543 * Calculate and program the hardware Ethernet MTU 6544 * size. Be generous on the receive if we have room. 6545 */ 6546 #ifdef BCE_JUMBO_HDRSPLIT 6547 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + 6548 sc->pg_bd_mbuf_alloc_size)) 6549 ether_mtu = sc->rx_bd_mbuf_data_len + 6550 sc->pg_bd_mbuf_alloc_size; 6551 #else 6552 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) 6553 ether_mtu = sc->rx_bd_mbuf_data_len; 6554 #endif 6555 else 6556 ether_mtu = ifp->if_mtu; 6557 6558 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 6559 6560 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", 6561 __FUNCTION__, ether_mtu); 6562 6563 /* Program the mtu, enabling jumbo frame support if necessary. */ 6564 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) 6565 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 6566 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 6567 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 6568 else 6569 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 6570 6571 DBPRINT(sc, BCE_INFO_LOAD, 6572 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, " 6573 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, 6574 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, 6575 sc->rx_bd_mbuf_align_pad); 6576 6577 /* Program appropriate promiscuous/multicast filtering. */ 6578 bce_set_rx_mode(sc); 6579 6580 #ifdef BCE_JUMBO_HDRSPLIT 6581 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n", 6582 __FUNCTION__, sc->pg_bd_mbuf_alloc_size); 6583 6584 /* Init page buffer descriptor chain. */ 6585 bce_init_pg_chain(sc); 6586 #endif 6587 6588 /* Init RX buffer descriptor chain. */ 6589 bce_init_rx_chain(sc); 6590 6591 /* Init TX buffer descriptor chain. */ 6592 bce_init_tx_chain(sc); 6593 6594 /* Enable host interrupts. */ 6595 bce_enable_intr(sc, 1); 6596 6597 bce_ifmedia_upd_locked(ifp); 6598 6599 /* Let the OS know the driver is up and running. */ 6600 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6601 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6602 6603 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 6604 6605 bce_init_locked_exit: 6606 DBEXIT(BCE_VERBOSE_RESET); 6607 } 6608 6609 6610 /****************************************************************************/ 6611 /* Initialize the controller just enough so that any management firmware */ 6612 /* running on the device will continue to operate correctly. */ 6613 /* */ 6614 /* Returns: */ 6615 /* Nothing. */ 6616 /****************************************************************************/ 6617 static void 6618 bce_mgmt_init_locked(struct bce_softc *sc) 6619 { 6620 struct ifnet *ifp; 6621 6622 DBENTER(BCE_VERBOSE_RESET); 6623 6624 BCE_LOCK_ASSERT(sc); 6625 6626 /* Bail out if management firmware is not running. */ 6627 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { 6628 DBPRINT(sc, BCE_VERBOSE_SPECIAL, 6629 "No management firmware running...\n"); 6630 goto bce_mgmt_init_locked_exit; 6631 } 6632 6633 ifp = sc->bce_ifp; 6634 6635 /* Enable all critical blocks in the MAC. */ 6636 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 6637 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 6638 DELAY(20); 6639 6640 bce_ifmedia_upd_locked(ifp); 6641 6642 bce_mgmt_init_locked_exit: 6643 DBEXIT(BCE_VERBOSE_RESET); 6644 } 6645 6646 6647 /****************************************************************************/ 6648 /* Handles controller initialization when called from an unlocked routine. */ 6649 /* */ 6650 /* Returns: */ 6651 /* Nothing. */ 6652 /****************************************************************************/ 6653 static void 6654 bce_init(void *xsc) 6655 { 6656 struct bce_softc *sc = xsc; 6657 6658 DBENTER(BCE_VERBOSE_RESET); 6659 6660 BCE_LOCK(sc); 6661 bce_init_locked(sc); 6662 BCE_UNLOCK(sc); 6663 6664 DBEXIT(BCE_VERBOSE_RESET); 6665 } 6666 6667 6668 /****************************************************************************/ 6669 /* Modifies an mbuf for TSO on the hardware. */ 6670 /* */ 6671 /* Returns: */ 6672 /* Pointer to a modified mbuf. */ 6673 /****************************************************************************/ 6674 static struct mbuf * 6675 bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) 6676 { 6677 struct mbuf *m; 6678 struct ether_header *eh; 6679 struct ip *ip; 6680 struct tcphdr *th; 6681 u16 etype; 6682 int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; 6683 6684 DBRUN(sc->tso_frames_requested++); 6685 6686 /* Controller may modify mbuf chains. */ 6687 if (M_WRITABLE(*m_head) == 0) { 6688 m = m_dup(*m_head, M_DONTWAIT); 6689 m_freem(*m_head); 6690 if (m == NULL) { 6691 sc->mbuf_alloc_failed_count++; 6692 *m_head = NULL; 6693 return (NULL); 6694 } 6695 *m_head = m; 6696 } 6697 6698 /* 6699 * For TSO the controller needs two pieces of info, 6700 * the MSS and the IP+TCP options length. 6701 */ 6702 m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); 6703 if (m == NULL) { 6704 *m_head = NULL; 6705 return (NULL); 6706 } 6707 eh = mtod(m, struct ether_header *); 6708 etype = ntohs(eh->ether_type); 6709 6710 /* Check for supported TSO Ethernet types (only IPv4 for now) */ 6711 switch (etype) { 6712 case ETHERTYPE_IP: 6713 ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); 6714 /* TSO only supported for TCP protocol. */ 6715 if (ip->ip_p != IPPROTO_TCP) { 6716 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", 6717 __FILE__, __LINE__); 6718 m_freem(*m_head); 6719 *m_head = NULL; 6720 return (NULL); 6721 } 6722 6723 /* Get IP header length in bytes (min 20) */ 6724 ip_hlen = ip->ip_hl << 2; 6725 m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + 6726 sizeof(struct tcphdr)); 6727 if (m == NULL) { 6728 *m_head = NULL; 6729 return (NULL); 6730 } 6731 6732 /* Get the TCP header length in bytes (min 20) */ 6733 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 6734 tcp_hlen = (th->th_off << 2); 6735 6736 /* Make sure all IP/TCP options live in the same buffer. */ 6737 m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + 6738 tcp_hlen); 6739 if (m == NULL) { 6740 *m_head = NULL; 6741 return (NULL); 6742 } 6743 6744 /* IP header length and checksum will be calc'd by hardware */ 6745 ip_len = ip->ip_len; 6746 ip->ip_len = 0; 6747 ip->ip_sum = 0; 6748 break; 6749 case ETHERTYPE_IPV6: 6750 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", 6751 __FILE__, __LINE__); 6752 m_freem(*m_head); 6753 *m_head = NULL; 6754 return (NULL); 6755 /* NOT REACHED */ 6756 default: 6757 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", 6758 __FILE__, __LINE__); 6759 m_freem(*m_head); 6760 *m_head = NULL; 6761 return (NULL); 6762 } 6763 6764 hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; 6765 6766 DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " 6767 "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", 6768 __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, 6769 tcp_hlen, ip_len); 6770 6771 /* Set the LSO flag in the TX BD */ 6772 *flags |= TX_BD_FLAGS_SW_LSO; 6773 6774 /* Set the length of IP + TCP options (in 32 bit words) */ 6775 *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - 6776 sizeof(struct tcphdr)) >> 2) << 8); 6777 6778 DBRUN(sc->tso_frames_completed++); 6779 return (*m_head); 6780 } 6781 6782 6783 /****************************************************************************/ 6784 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 6785 /* memory visible to the controller. */ 6786 /* */ 6787 /* Returns: */ 6788 /* 0 for success, positive value for failure. */ 6789 /* Modified: */ 6790 /* m_head: May be set to NULL if MBUF is excessively fragmented. */ 6791 /****************************************************************************/ 6792 static int 6793 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) 6794 { 6795 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 6796 bus_dmamap_t map; 6797 struct tx_bd *txbd = NULL; 6798 struct mbuf *m0; 6799 u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; 6800 u32 prod_bseq; 6801 6802 #ifdef BCE_DEBUG 6803 u16 debug_prod; 6804 #endif 6805 6806 int i, error, nsegs, rc = 0; 6807 6808 DBENTER(BCE_VERBOSE_SEND); 6809 6810 /* Make sure we have room in the TX chain. */ 6811 if (sc->used_tx_bd >= sc->max_tx_bd) 6812 goto bce_tx_encap_exit; 6813 6814 /* Transfer any checksum offload flags to the bd. */ 6815 m0 = *m_head; 6816 if (m0->m_pkthdr.csum_flags) { 6817 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 6818 m0 = bce_tso_setup(sc, m_head, &flags); 6819 if (m0 == NULL) { 6820 DBRUN(sc->tso_frames_failed++); 6821 goto bce_tx_encap_exit; 6822 } 6823 mss = htole16(m0->m_pkthdr.tso_segsz); 6824 } else { 6825 if (m0->m_pkthdr.csum_flags & CSUM_IP) 6826 flags |= TX_BD_FLAGS_IP_CKSUM; 6827 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 6828 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6829 } 6830 } 6831 6832 /* Transfer any VLAN tags to the bd. */ 6833 if (m0->m_flags & M_VLANTAG) { 6834 flags |= TX_BD_FLAGS_VLAN_TAG; 6835 vlan_tag = m0->m_pkthdr.ether_vtag; 6836 } 6837 6838 /* Map the mbuf into DMAable memory. */ 6839 prod = sc->tx_prod; 6840 chain_prod = TX_CHAIN_IDX(prod); 6841 map = sc->tx_mbuf_map[chain_prod]; 6842 6843 /* Map the mbuf into our DMA address space. */ 6844 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, 6845 segs, &nsegs, BUS_DMA_NOWAIT); 6846 6847 /* Check if the DMA mapping was successful */ 6848 if (error == EFBIG) { 6849 sc->mbuf_frag_count++; 6850 6851 /* Try to defrag the mbuf. */ 6852 m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS); 6853 if (m0 == NULL) { 6854 /* Defrag was unsuccessful */ 6855 m_freem(*m_head); 6856 *m_head = NULL; 6857 sc->mbuf_alloc_failed_count++; 6858 rc = ENOBUFS; 6859 goto bce_tx_encap_exit; 6860 } 6861 6862 /* Defrag was successful, try mapping again */ 6863 *m_head = m0; 6864 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, 6865 map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 6866 6867 /* Still getting an error after a defrag. */ 6868 if (error == ENOMEM) { 6869 /* Insufficient DMA buffers available. */ 6870 sc->dma_map_addr_tx_failed_count++; 6871 rc = error; 6872 goto bce_tx_encap_exit; 6873 } else if (error != 0) { 6874 /* Release it and return an error. */ 6875 BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " 6876 "TX chain!\n", __FILE__, __LINE__); 6877 m_freem(m0); 6878 *m_head = NULL; 6879 sc->dma_map_addr_tx_failed_count++; 6880 rc = ENOBUFS; 6881 goto bce_tx_encap_exit; 6882 } 6883 } else if (error == ENOMEM) { 6884 /* Insufficient DMA buffers available. */ 6885 sc->dma_map_addr_tx_failed_count++; 6886 rc = error; 6887 goto bce_tx_encap_exit; 6888 } else if (error != 0) { 6889 m_freem(m0); 6890 *m_head = NULL; 6891 sc->dma_map_addr_tx_failed_count++; 6892 rc = error; 6893 goto bce_tx_encap_exit; 6894 } 6895 6896 /* Make sure there's room in the chain */ 6897 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 6898 bus_dmamap_unload(sc->tx_mbuf_tag, map); 6899 rc = ENOBUFS; 6900 goto bce_tx_encap_exit; 6901 } 6902 6903 /* prod points to an empty tx_bd at this point. */ 6904 prod_bseq = sc->tx_prod_bseq; 6905 6906 #ifdef BCE_DEBUG 6907 debug_prod = chain_prod; 6908 #endif 6909 6910 DBPRINT(sc, BCE_INFO_SEND, 6911 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " 6912 "prod_bseq = 0x%08X\n", 6913 __FUNCTION__, prod, chain_prod, prod_bseq); 6914 6915 /* 6916 * Cycle through each mbuf segment that makes up 6917 * the outgoing frame, gathering the mapping info 6918 * for that segment and creating a tx_bd for 6919 * the mbuf. 6920 */ 6921 for (i = 0; i < nsegs ; i++) { 6922 6923 chain_prod = TX_CHAIN_IDX(prod); 6924 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] 6925 [TX_IDX(chain_prod)]; 6926 6927 txbd->tx_bd_haddr_lo = 6928 htole32(BCE_ADDR_LO(segs[i].ds_addr)); 6929 txbd->tx_bd_haddr_hi = 6930 htole32(BCE_ADDR_HI(segs[i].ds_addr)); 6931 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 6932 htole16(segs[i].ds_len); 6933 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 6934 txbd->tx_bd_flags = htole16(flags); 6935 prod_bseq += segs[i].ds_len; 6936 if (i == 0) 6937 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 6938 prod = NEXT_TX_BD(prod); 6939 } 6940 6941 /* Set the END flag on the last TX buffer descriptor. */ 6942 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 6943 6944 DBRUNMSG(BCE_EXTREME_SEND, 6945 bce_dump_tx_chain(sc, debug_prod, nsegs)); 6946 6947 /* 6948 * Ensure that the mbuf pointer for this transmission 6949 * is placed at the array index of the last 6950 * descriptor in this chain. This is done 6951 * because a single map is used for all 6952 * segments of the mbuf and we don't want to 6953 * unload the map before all of the segments 6954 * have been freed. 6955 */ 6956 sc->tx_mbuf_ptr[chain_prod] = m0; 6957 sc->used_tx_bd += nsegs; 6958 6959 /* Update some debug statistic counters */ 6960 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 6961 sc->tx_hi_watermark = sc->used_tx_bd); 6962 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 6963 DBRUNIF(sc->debug_tx_mbuf_alloc++); 6964 6965 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); 6966 6967 /* prod points to the next free tx_bd at this point. */ 6968 sc->tx_prod = prod; 6969 sc->tx_prod_bseq = prod_bseq; 6970 6971 /* Tell the chip about the waiting TX frames. */ 6972 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 6973 BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); 6974 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 6975 BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); 6976 6977 bce_tx_encap_exit: 6978 DBEXIT(BCE_VERBOSE_SEND); 6979 return(rc); 6980 } 6981 6982 6983 /****************************************************************************/ 6984 /* Main transmit routine when called from another routine with a lock. */ 6985 /* */ 6986 /* Returns: */ 6987 /* Nothing. */ 6988 /****************************************************************************/ 6989 static void 6990 bce_start_locked(struct ifnet *ifp) 6991 { 6992 struct bce_softc *sc = ifp->if_softc; 6993 struct mbuf *m_head = NULL; 6994 int count = 0; 6995 u16 tx_prod, tx_chain_prod; 6996 6997 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 6998 6999 BCE_LOCK_ASSERT(sc); 7000 7001 /* prod points to the next free tx_bd. */ 7002 tx_prod = sc->tx_prod; 7003 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 7004 7005 DBPRINT(sc, BCE_INFO_SEND, 7006 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 7007 "tx_prod_bseq = 0x%08X\n", 7008 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); 7009 7010 /* If there's no link or the transmit queue is empty then just exit. */ 7011 if (sc->bce_link_up == FALSE) { 7012 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", 7013 __FUNCTION__); 7014 goto bce_start_locked_exit; 7015 } 7016 7017 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7018 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", 7019 __FUNCTION__); 7020 goto bce_start_locked_exit; 7021 } 7022 7023 /* 7024 * Keep adding entries while there is space in the ring. 7025 */ 7026 while (sc->used_tx_bd < sc->max_tx_bd) { 7027 7028 /* Check for any frames to send. */ 7029 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 7030 7031 /* Stop when the transmit queue is empty. */ 7032 if (m_head == NULL) 7033 break; 7034 7035 /* 7036 * Pack the data into the transmit ring. If we 7037 * don't have room, place the mbuf back at the 7038 * head of the queue and set the OACTIVE flag 7039 * to wait for the NIC to drain the chain. 7040 */ 7041 if (bce_tx_encap(sc, &m_head)) { 7042 if (m_head != NULL) 7043 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 7044 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7045 DBPRINT(sc, BCE_INFO_SEND, 7046 "TX chain is closed for business! Total " 7047 "tx_bd used = %d\n", sc->used_tx_bd); 7048 break; 7049 } 7050 7051 count++; 7052 7053 /* Send a copy of the frame to any BPF listeners. */ 7054 ETHER_BPF_MTAP(ifp, m_head); 7055 } 7056 7057 /* Exit if no packets were dequeued. */ 7058 if (count == 0) { 7059 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " 7060 "dequeued\n", __FUNCTION__); 7061 goto bce_start_locked_exit; 7062 } 7063 7064 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " 7065 "send queue.\n", __FUNCTION__, count); 7066 7067 /* Set the tx timeout. */ 7068 sc->watchdog_timer = BCE_TX_TIMEOUT; 7069 7070 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); 7071 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); 7072 7073 bce_start_locked_exit: 7074 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); 7075 return; 7076 } 7077 7078 7079 /****************************************************************************/ 7080 /* Main transmit routine when called from another routine without a lock. */ 7081 /* */ 7082 /* Returns: */ 7083 /* Nothing. */ 7084 /****************************************************************************/ 7085 static void 7086 bce_start(struct ifnet *ifp) 7087 { 7088 struct bce_softc *sc = ifp->if_softc; 7089 7090 DBENTER(BCE_VERBOSE_SEND); 7091 7092 BCE_LOCK(sc); 7093 bce_start_locked(ifp); 7094 BCE_UNLOCK(sc); 7095 7096 DBEXIT(BCE_VERBOSE_SEND); 7097 } 7098 7099 7100 /****************************************************************************/ 7101 /* Handles any IOCTL calls from the operating system. */ 7102 /* */ 7103 /* Returns: */ 7104 /* 0 for success, positive value for failure. */ 7105 /****************************************************************************/ 7106 static int 7107 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 7108 { 7109 struct bce_softc *sc = ifp->if_softc; 7110 struct ifreq *ifr = (struct ifreq *) data; 7111 struct mii_data *mii; 7112 int mask, error = 0, reinit; 7113 7114 DBENTER(BCE_VERBOSE_MISC); 7115 7116 switch(command) { 7117 7118 /* Set the interface MTU. */ 7119 case SIOCSIFMTU: 7120 /* Check that the MTU setting is supported. */ 7121 if ((ifr->ifr_mtu < BCE_MIN_MTU) || 7122 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { 7123 error = EINVAL; 7124 break; 7125 } 7126 7127 DBPRINT(sc, BCE_INFO_MISC, 7128 "SIOCSIFMTU: Changing MTU from %d to %d\n", 7129 (int) ifp->if_mtu, (int) ifr->ifr_mtu); 7130 7131 BCE_LOCK(sc); 7132 ifp->if_mtu = ifr->ifr_mtu; 7133 reinit = 0; 7134 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7135 /* 7136 * Because allocation size is used in RX 7137 * buffer allocation, stop controller if 7138 * it is already running. 7139 */ 7140 bce_stop(sc); 7141 reinit = 1; 7142 } 7143 #ifdef BCE_JUMBO_HDRSPLIT 7144 /* No buffer allocation size changes are necessary. */ 7145 #else 7146 /* Recalculate our buffer allocation sizes. */ 7147 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 7148 ETHER_CRC_LEN) > MCLBYTES) { 7149 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; 7150 sc->rx_bd_mbuf_align_pad = 7151 roundup2(MJUM9BYTES, 16) - MJUM9BYTES; 7152 sc->rx_bd_mbuf_data_len = 7153 sc->rx_bd_mbuf_alloc_size - 7154 sc->rx_bd_mbuf_align_pad; 7155 } else { 7156 sc->rx_bd_mbuf_alloc_size = MCLBYTES; 7157 sc->rx_bd_mbuf_align_pad = 7158 roundup2(MCLBYTES, 16) - MCLBYTES; 7159 sc->rx_bd_mbuf_data_len = 7160 sc->rx_bd_mbuf_alloc_size - 7161 sc->rx_bd_mbuf_align_pad; 7162 } 7163 #endif 7164 7165 if (reinit != 0) 7166 bce_init_locked(sc); 7167 BCE_UNLOCK(sc); 7168 break; 7169 7170 /* Set interface flags. */ 7171 case SIOCSIFFLAGS: 7172 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); 7173 7174 BCE_LOCK(sc); 7175 7176 /* Check if the interface is up. */ 7177 if (ifp->if_flags & IFF_UP) { 7178 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7179 /* Change promiscuous/multicast flags as necessary. */ 7180 bce_set_rx_mode(sc); 7181 } else { 7182 /* Start the HW */ 7183 bce_init_locked(sc); 7184 } 7185 } else { 7186 /* The interface is down, check if driver is running. */ 7187 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 7188 bce_stop(sc); 7189 7190 /* If MFW is running, restart the controller a bit. */ 7191 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 7192 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 7193 bce_chipinit(sc); 7194 bce_mgmt_init_locked(sc); 7195 } 7196 } 7197 } 7198 7199 BCE_UNLOCK(sc); 7200 break; 7201 7202 /* Add/Delete multicast address */ 7203 case SIOCADDMULTI: 7204 case SIOCDELMULTI: 7205 DBPRINT(sc, BCE_VERBOSE_MISC, 7206 "Received SIOCADDMULTI/SIOCDELMULTI\n"); 7207 7208 BCE_LOCK(sc); 7209 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7210 bce_set_rx_mode(sc); 7211 BCE_UNLOCK(sc); 7212 7213 break; 7214 7215 /* Set/Get Interface media */ 7216 case SIOCSIFMEDIA: 7217 case SIOCGIFMEDIA: 7218 DBPRINT(sc, BCE_VERBOSE_MISC, 7219 "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); 7220 7221 mii = device_get_softc(sc->bce_miibus); 7222 error = ifmedia_ioctl(ifp, ifr, 7223 &mii->mii_media, command); 7224 break; 7225 7226 /* Set interface capability */ 7227 case SIOCSIFCAP: 7228 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 7229 DBPRINT(sc, BCE_INFO_MISC, 7230 "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); 7231 7232 /* Toggle the TX checksum capabilities enable flag. */ 7233 if (mask & IFCAP_TXCSUM && 7234 ifp->if_capabilities & IFCAP_TXCSUM) { 7235 ifp->if_capenable ^= IFCAP_TXCSUM; 7236 if (IFCAP_TXCSUM & ifp->if_capenable) 7237 ifp->if_hwassist |= BCE_IF_HWASSIST; 7238 else 7239 ifp->if_hwassist &= ~BCE_IF_HWASSIST; 7240 } 7241 7242 /* Toggle the RX checksum capabilities enable flag. */ 7243 if (mask & IFCAP_RXCSUM && 7244 ifp->if_capabilities & IFCAP_RXCSUM) 7245 ifp->if_capenable ^= IFCAP_RXCSUM; 7246 7247 /* Toggle the TSO capabilities enable flag. */ 7248 if (bce_tso_enable && (mask & IFCAP_TSO4) && 7249 ifp->if_capabilities & IFCAP_TSO4) { 7250 ifp->if_capenable ^= IFCAP_TSO4; 7251 if (IFCAP_TSO4 & ifp->if_capenable) 7252 ifp->if_hwassist |= CSUM_TSO; 7253 else 7254 ifp->if_hwassist &= ~CSUM_TSO; 7255 } 7256 7257 if (mask & IFCAP_VLAN_HWCSUM && 7258 ifp->if_capabilities & IFCAP_VLAN_HWCSUM) 7259 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 7260 7261 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 7262 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 7263 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 7264 /* 7265 * Don't actually disable VLAN tag stripping as 7266 * management firmware (ASF/IPMI/UMP) requires the 7267 * feature. If VLAN tag stripping is disabled driver 7268 * will manually reconstruct the VLAN frame by 7269 * appending stripped VLAN tag. 7270 */ 7271 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 7272 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 7273 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 7274 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 7275 == 0) 7276 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 7277 } 7278 VLAN_CAPABILITIES(ifp); 7279 break; 7280 default: 7281 /* We don't know how to handle the IOCTL, pass it on. */ 7282 error = ether_ioctl(ifp, command, data); 7283 break; 7284 } 7285 7286 DBEXIT(BCE_VERBOSE_MISC); 7287 return(error); 7288 } 7289 7290 7291 /****************************************************************************/ 7292 /* Transmit timeout handler. */ 7293 /* */ 7294 /* Returns: */ 7295 /* Nothing. */ 7296 /****************************************************************************/ 7297 static void 7298 bce_watchdog(struct bce_softc *sc) 7299 { 7300 DBENTER(BCE_EXTREME_SEND); 7301 7302 BCE_LOCK_ASSERT(sc); 7303 7304 /* If the watchdog timer hasn't expired then just exit. */ 7305 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 7306 goto bce_watchdog_exit; 7307 7308 /* If pause frames are active then don't reset the hardware. */ 7309 /* ToDo: Should we reset the timer here? */ 7310 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 7311 goto bce_watchdog_exit; 7312 7313 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", 7314 __FILE__, __LINE__); 7315 7316 DBRUNMSG(BCE_INFO, 7317 bce_dump_driver_state(sc); 7318 bce_dump_status_block(sc); 7319 bce_dump_stats_block(sc); 7320 bce_dump_ftqs(sc); 7321 bce_dump_txp_state(sc, 0); 7322 bce_dump_rxp_state(sc, 0); 7323 bce_dump_tpat_state(sc, 0); 7324 bce_dump_cp_state(sc, 0); 7325 bce_dump_com_state(sc, 0)); 7326 7327 DBRUN(bce_breakpoint(sc)); 7328 7329 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 7330 7331 bce_init_locked(sc); 7332 sc->bce_ifp->if_oerrors++; 7333 7334 bce_watchdog_exit: 7335 DBEXIT(BCE_EXTREME_SEND); 7336 } 7337 7338 7339 /* 7340 * Interrupt handler. 7341 */ 7342 /****************************************************************************/ 7343 /* Main interrupt entry point. Verifies that the controller generated the */ 7344 /* interrupt and then calls a separate routine for handle the various */ 7345 /* interrupt causes (PHY, TX, RX). */ 7346 /* */ 7347 /* Returns: */ 7348 /* 0 for success, positive value for failure. */ 7349 /****************************************************************************/ 7350 static void 7351 bce_intr(void *xsc) 7352 { 7353 struct bce_softc *sc; 7354 struct ifnet *ifp; 7355 u32 status_attn_bits; 7356 u16 hw_rx_cons, hw_tx_cons; 7357 7358 sc = xsc; 7359 ifp = sc->bce_ifp; 7360 7361 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7362 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 7363 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); 7364 7365 BCE_LOCK(sc); 7366 7367 DBRUN(sc->interrupts_generated++); 7368 7369 /* Synchnorize before we read from interface's status block */ 7370 bus_dmamap_sync(sc->status_tag, sc->status_map, 7371 BUS_DMASYNC_POSTREAD); 7372 7373 /* 7374 * If the hardware status block index 7375 * matches the last value read by the 7376 * driver and we haven't asserted our 7377 * interrupt then there's nothing to do. 7378 */ 7379 if ((sc->status_block->status_idx == sc->last_status_idx) && 7380 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 7381 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { 7382 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", 7383 __FUNCTION__); 7384 goto bce_intr_exit; 7385 } 7386 7387 /* Ack the interrupt and stop others from occuring. */ 7388 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 7389 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 7390 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 7391 7392 /* Check if the hardware has finished any work. */ 7393 hw_rx_cons = bce_get_hw_rx_cons(sc); 7394 hw_tx_cons = bce_get_hw_tx_cons(sc); 7395 7396 /* Keep processing data as long as there is work to do. */ 7397 for (;;) { 7398 7399 status_attn_bits = sc->status_block->status_attn_bits; 7400 7401 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), 7402 BCE_PRINTF("Simulating unexpected status attention " 7403 "bit set."); 7404 sc->unexpected_attention_sim_count++; 7405 status_attn_bits = status_attn_bits | 7406 STATUS_ATTN_BITS_PARITY_ERROR); 7407 7408 /* Was it a link change interrupt? */ 7409 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 7410 (sc->status_block->status_attn_bits_ack & 7411 STATUS_ATTN_BITS_LINK_STATE)) { 7412 bce_phy_intr(sc); 7413 7414 /* Clear transient updates during link state change. */ 7415 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | 7416 BCE_HC_COMMAND_COAL_NOW_WO_INT); 7417 REG_RD(sc, BCE_HC_COMMAND); 7418 } 7419 7420 /* If any other attention is asserted, the chip is toast. */ 7421 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 7422 (sc->status_block->status_attn_bits_ack & 7423 ~STATUS_ATTN_BITS_LINK_STATE))) { 7424 7425 sc->unexpected_attention_count++; 7426 7427 BCE_PRINTF("%s(%d): Fatal attention detected: " 7428 "0x%08X\n", __FILE__, __LINE__, 7429 sc->status_block->status_attn_bits); 7430 7431 DBRUNMSG(BCE_FATAL, 7432 if (unexpected_attention_sim_control == 0) 7433 bce_breakpoint(sc)); 7434 7435 bce_init_locked(sc); 7436 goto bce_intr_exit; 7437 } 7438 7439 /* Check for any completed RX frames. */ 7440 if (hw_rx_cons != sc->hw_rx_cons) 7441 bce_rx_intr(sc); 7442 7443 /* Check for any completed TX frames. */ 7444 if (hw_tx_cons != sc->hw_tx_cons) 7445 bce_tx_intr(sc); 7446 7447 /* Save status block index value for the next interrupt. */ 7448 sc->last_status_idx = sc->status_block->status_idx; 7449 7450 /* 7451 * Prevent speculative reads from getting 7452 * ahead of the status block. 7453 */ 7454 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 7455 BUS_SPACE_BARRIER_READ); 7456 7457 /* 7458 * If there's no work left then exit the 7459 * interrupt service routine. 7460 */ 7461 hw_rx_cons = bce_get_hw_rx_cons(sc); 7462 hw_tx_cons = bce_get_hw_tx_cons(sc); 7463 7464 if ((hw_rx_cons == sc->hw_rx_cons) && 7465 (hw_tx_cons == sc->hw_tx_cons)) 7466 break; 7467 7468 } 7469 7470 bus_dmamap_sync(sc->status_tag, sc->status_map, 7471 BUS_DMASYNC_PREREAD); 7472 7473 /* Re-enable interrupts. */ 7474 bce_enable_intr(sc, 0); 7475 7476 /* Handle any frames that arrived while handling the interrupt. */ 7477 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 7478 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 7479 bce_start_locked(ifp); 7480 7481 bce_intr_exit: 7482 BCE_UNLOCK(sc); 7483 7484 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); 7485 } 7486 7487 7488 /****************************************************************************/ 7489 /* Programs the various packet receive modes (broadcast and multicast). */ 7490 /* */ 7491 /* Returns: */ 7492 /* Nothing. */ 7493 /****************************************************************************/ 7494 static void 7495 bce_set_rx_mode(struct bce_softc *sc) 7496 { 7497 struct ifnet *ifp; 7498 struct ifmultiaddr *ifma; 7499 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 7500 u32 rx_mode, sort_mode; 7501 int h, i; 7502 7503 DBENTER(BCE_VERBOSE_MISC); 7504 7505 BCE_LOCK_ASSERT(sc); 7506 7507 ifp = sc->bce_ifp; 7508 7509 /* Initialize receive mode default settings. */ 7510 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 7511 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 7512 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 7513 7514 /* 7515 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 7516 * be enbled. 7517 */ 7518 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 7519 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) 7520 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 7521 7522 /* 7523 * Check for promiscuous, all multicast, or selected 7524 * multicast address filtering. 7525 */ 7526 if (ifp->if_flags & IFF_PROMISC) { 7527 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); 7528 7529 /* Enable promiscuous mode. */ 7530 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 7531 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 7532 } else if (ifp->if_flags & IFF_ALLMULTI) { 7533 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); 7534 7535 /* Enable all multicast addresses. */ 7536 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 7537 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); 7538 } 7539 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 7540 } else { 7541 /* Accept one or more multicast(s). */ 7542 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); 7543 7544 if_maddr_rlock(ifp); 7545 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 7546 if (ifma->ifma_addr->sa_family != AF_LINK) 7547 continue; 7548 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 7549 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; 7550 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 7551 } 7552 if_maddr_runlock(ifp); 7553 7554 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 7555 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); 7556 7557 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 7558 } 7559 7560 /* Only make changes if the recive mode has actually changed. */ 7561 if (rx_mode != sc->rx_mode) { 7562 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " 7563 "0x%08X\n", rx_mode); 7564 7565 sc->rx_mode = rx_mode; 7566 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 7567 } 7568 7569 /* Disable and clear the exisitng sort before enabling a new sort. */ 7570 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 7571 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 7572 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 7573 7574 DBEXIT(BCE_VERBOSE_MISC); 7575 } 7576 7577 7578 /****************************************************************************/ 7579 /* Called periodically to updates statistics from the controllers */ 7580 /* statistics block. */ 7581 /* */ 7582 /* Returns: */ 7583 /* Nothing. */ 7584 /****************************************************************************/ 7585 static void 7586 bce_stats_update(struct bce_softc *sc) 7587 { 7588 struct ifnet *ifp; 7589 struct statistics_block *stats; 7590 7591 DBENTER(BCE_EXTREME_MISC); 7592 7593 ifp = sc->bce_ifp; 7594 7595 stats = (struct statistics_block *) sc->stats_block; 7596 7597 /* 7598 * Certain controllers don't report 7599 * carrier sense errors correctly. 7600 * See errata E11_5708CA0_1165. 7601 */ 7602 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 7603 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) 7604 ifp->if_oerrors += 7605 (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 7606 7607 /* 7608 * Update the sysctl statistics from the 7609 * hardware statistics. 7610 */ 7611 sc->stat_IfHCInOctets = 7612 ((u64) stats->stat_IfHCInOctets_hi << 32) + 7613 (u64) stats->stat_IfHCInOctets_lo; 7614 7615 sc->stat_IfHCInBadOctets = 7616 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 7617 (u64) stats->stat_IfHCInBadOctets_lo; 7618 7619 sc->stat_IfHCOutOctets = 7620 ((u64) stats->stat_IfHCOutOctets_hi << 32) + 7621 (u64) stats->stat_IfHCOutOctets_lo; 7622 7623 sc->stat_IfHCOutBadOctets = 7624 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + 7625 (u64) stats->stat_IfHCOutBadOctets_lo; 7626 7627 sc->stat_IfHCInUcastPkts = 7628 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + 7629 (u64) stats->stat_IfHCInUcastPkts_lo; 7630 7631 sc->stat_IfHCInMulticastPkts = 7632 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + 7633 (u64) stats->stat_IfHCInMulticastPkts_lo; 7634 7635 sc->stat_IfHCInBroadcastPkts = 7636 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + 7637 (u64) stats->stat_IfHCInBroadcastPkts_lo; 7638 7639 sc->stat_IfHCOutUcastPkts = 7640 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + 7641 (u64) stats->stat_IfHCOutUcastPkts_lo; 7642 7643 sc->stat_IfHCOutMulticastPkts = 7644 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + 7645 (u64) stats->stat_IfHCOutMulticastPkts_lo; 7646 7647 sc->stat_IfHCOutBroadcastPkts = 7648 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 7649 (u64) stats->stat_IfHCOutBroadcastPkts_lo; 7650 7651 /* ToDo: Preserve counters beyond 32 bits? */ 7652 /* ToDo: Read the statistics from auto-clear regs? */ 7653 7654 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 7655 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 7656 7657 sc->stat_Dot3StatsCarrierSenseErrors = 7658 stats->stat_Dot3StatsCarrierSenseErrors; 7659 7660 sc->stat_Dot3StatsFCSErrors = 7661 stats->stat_Dot3StatsFCSErrors; 7662 7663 sc->stat_Dot3StatsAlignmentErrors = 7664 stats->stat_Dot3StatsAlignmentErrors; 7665 7666 sc->stat_Dot3StatsSingleCollisionFrames = 7667 stats->stat_Dot3StatsSingleCollisionFrames; 7668 7669 sc->stat_Dot3StatsMultipleCollisionFrames = 7670 stats->stat_Dot3StatsMultipleCollisionFrames; 7671 7672 sc->stat_Dot3StatsDeferredTransmissions = 7673 stats->stat_Dot3StatsDeferredTransmissions; 7674 7675 sc->stat_Dot3StatsExcessiveCollisions = 7676 stats->stat_Dot3StatsExcessiveCollisions; 7677 7678 sc->stat_Dot3StatsLateCollisions = 7679 stats->stat_Dot3StatsLateCollisions; 7680 7681 sc->stat_EtherStatsCollisions = 7682 stats->stat_EtherStatsCollisions; 7683 7684 sc->stat_EtherStatsFragments = 7685 stats->stat_EtherStatsFragments; 7686 7687 sc->stat_EtherStatsJabbers = 7688 stats->stat_EtherStatsJabbers; 7689 7690 sc->stat_EtherStatsUndersizePkts = 7691 stats->stat_EtherStatsUndersizePkts; 7692 7693 sc->stat_EtherStatsOversizePkts = 7694 stats->stat_EtherStatsOversizePkts; 7695 7696 sc->stat_EtherStatsPktsRx64Octets = 7697 stats->stat_EtherStatsPktsRx64Octets; 7698 7699 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 7700 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 7701 7702 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 7703 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 7704 7705 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 7706 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 7707 7708 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 7709 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 7710 7711 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 7712 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 7713 7714 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 7715 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 7716 7717 sc->stat_EtherStatsPktsTx64Octets = 7718 stats->stat_EtherStatsPktsTx64Octets; 7719 7720 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 7721 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 7722 7723 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 7724 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 7725 7726 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 7727 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 7728 7729 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 7730 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 7731 7732 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 7733 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 7734 7735 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 7736 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 7737 7738 sc->stat_XonPauseFramesReceived = 7739 stats->stat_XonPauseFramesReceived; 7740 7741 sc->stat_XoffPauseFramesReceived = 7742 stats->stat_XoffPauseFramesReceived; 7743 7744 sc->stat_OutXonSent = 7745 stats->stat_OutXonSent; 7746 7747 sc->stat_OutXoffSent = 7748 stats->stat_OutXoffSent; 7749 7750 sc->stat_FlowControlDone = 7751 stats->stat_FlowControlDone; 7752 7753 sc->stat_MacControlFramesReceived = 7754 stats->stat_MacControlFramesReceived; 7755 7756 sc->stat_XoffStateEntered = 7757 stats->stat_XoffStateEntered; 7758 7759 sc->stat_IfInFramesL2FilterDiscards = 7760 stats->stat_IfInFramesL2FilterDiscards; 7761 7762 sc->stat_IfInRuleCheckerDiscards = 7763 stats->stat_IfInRuleCheckerDiscards; 7764 7765 sc->stat_IfInFTQDiscards = 7766 stats->stat_IfInFTQDiscards; 7767 7768 sc->stat_IfInMBUFDiscards = 7769 stats->stat_IfInMBUFDiscards; 7770 7771 sc->stat_IfInRuleCheckerP4Hit = 7772 stats->stat_IfInRuleCheckerP4Hit; 7773 7774 sc->stat_CatchupInRuleCheckerDiscards = 7775 stats->stat_CatchupInRuleCheckerDiscards; 7776 7777 sc->stat_CatchupInFTQDiscards = 7778 stats->stat_CatchupInFTQDiscards; 7779 7780 sc->stat_CatchupInMBUFDiscards = 7781 stats->stat_CatchupInMBUFDiscards; 7782 7783 sc->stat_CatchupInRuleCheckerP4Hit = 7784 stats->stat_CatchupInRuleCheckerP4Hit; 7785 7786 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 7787 7788 /* 7789 * Update the interface statistics from the 7790 * hardware statistics. 7791 */ 7792 ifp->if_collisions = 7793 (u_long) sc->stat_EtherStatsCollisions; 7794 7795 /* ToDo: This method loses soft errors. */ 7796 ifp->if_ierrors = 7797 (u_long) sc->stat_EtherStatsUndersizePkts + 7798 (u_long) sc->stat_EtherStatsOversizePkts + 7799 (u_long) sc->stat_IfInMBUFDiscards + 7800 (u_long) sc->stat_Dot3StatsAlignmentErrors + 7801 (u_long) sc->stat_Dot3StatsFCSErrors + 7802 (u_long) sc->stat_IfInRuleCheckerDiscards + 7803 (u_long) sc->stat_IfInFTQDiscards + 7804 (u_long) sc->com_no_buffers; 7805 7806 /* ToDo: This method loses soft errors. */ 7807 ifp->if_oerrors = 7808 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 7809 (u_long) sc->stat_Dot3StatsExcessiveCollisions + 7810 (u_long) sc->stat_Dot3StatsLateCollisions; 7811 7812 /* ToDo: Add additional statistics? */ 7813 7814 DBEXIT(BCE_EXTREME_MISC); 7815 } 7816 7817 7818 /****************************************************************************/ 7819 /* Periodic function to notify the bootcode that the driver is still */ 7820 /* present. */ 7821 /* */ 7822 /* Returns: */ 7823 /* Nothing. */ 7824 /****************************************************************************/ 7825 static void 7826 bce_pulse(void *xsc) 7827 { 7828 struct bce_softc *sc = xsc; 7829 u32 msg; 7830 7831 DBENTER(BCE_EXTREME_MISC); 7832 7833 BCE_LOCK_ASSERT(sc); 7834 7835 /* Tell the firmware that the driver is still running. */ 7836 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; 7837 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 7838 7839 /* Update the bootcode condition. */ 7840 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 7841 7842 /* Report whether the bootcode still knows the driver is running. */ 7843 if (bootverbose) { 7844 if (sc->bce_drv_cardiac_arrest == FALSE) { 7845 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 7846 sc->bce_drv_cardiac_arrest = TRUE; 7847 BCE_PRINTF("%s(): Warning: bootcode " 7848 "thinks driver is absent! " 7849 "(bc_state = 0x%08X)\n", 7850 __FUNCTION__, sc->bc_state); 7851 } 7852 } else { 7853 /* 7854 * Not supported by all bootcode versions. 7855 * (v5.0.11+ and v5.2.1+) Older bootcode 7856 * will require the driver to reset the 7857 * controller to clear this condition. 7858 */ 7859 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 7860 sc->bce_drv_cardiac_arrest = FALSE; 7861 BCE_PRINTF("%s(): Bootcode found the " 7862 "driver pulse! (bc_state = 0x%08X)\n", 7863 __FUNCTION__, sc->bc_state); 7864 } 7865 } 7866 } 7867 7868 7869 /* Schedule the next pulse. */ 7870 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 7871 7872 DBEXIT(BCE_EXTREME_MISC); 7873 } 7874 7875 7876 /****************************************************************************/ 7877 /* Periodic function to perform maintenance tasks. */ 7878 /* */ 7879 /* Returns: */ 7880 /* Nothing. */ 7881 /****************************************************************************/ 7882 static void 7883 bce_tick(void *xsc) 7884 { 7885 struct bce_softc *sc = xsc; 7886 struct mii_data *mii; 7887 struct ifnet *ifp; 7888 7889 ifp = sc->bce_ifp; 7890 7891 DBENTER(BCE_EXTREME_MISC); 7892 7893 BCE_LOCK_ASSERT(sc); 7894 7895 /* Schedule the next tick. */ 7896 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 7897 7898 /* Update the statistics from the hardware statistics block. */ 7899 bce_stats_update(sc); 7900 7901 /* Top off the receive and page chains. */ 7902 #ifdef BCE_JUMBO_HDRSPLIT 7903 bce_fill_pg_chain(sc); 7904 #endif 7905 bce_fill_rx_chain(sc); 7906 7907 /* Check that chip hasn't hung. */ 7908 bce_watchdog(sc); 7909 7910 /* If link is up already up then we're done. */ 7911 if (sc->bce_link_up == TRUE) 7912 goto bce_tick_exit; 7913 7914 /* Link is down. Check what the PHY's doing. */ 7915 mii = device_get_softc(sc->bce_miibus); 7916 mii_tick(mii); 7917 7918 /* Check if the link has come up. */ 7919 if ((mii->mii_media_status & IFM_ACTIVE) && 7920 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { 7921 DBPRINT(sc, BCE_VERBOSE_MISC, 7922 "%s(): Link up!\n", __FUNCTION__); 7923 sc->bce_link_up = TRUE; 7924 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 7925 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || 7926 IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && 7927 bootverbose) 7928 BCE_PRINTF("Gigabit link up!\n"); 7929 7930 /* Now that link is up, handle any outstanding TX traffic. */ 7931 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 7932 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " 7933 "pending TX traffic.\n", __FUNCTION__); 7934 bce_start_locked(ifp); 7935 } 7936 } 7937 7938 bce_tick_exit: 7939 DBEXIT(BCE_EXTREME_MISC); 7940 return; 7941 } 7942 7943 7944 #ifdef BCE_DEBUG 7945 /****************************************************************************/ 7946 /* Allows the driver state to be dumped through the sysctl interface. */ 7947 /* */ 7948 /* Returns: */ 7949 /* 0 for success, positive value for failure. */ 7950 /****************************************************************************/ 7951 static int 7952 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 7953 { 7954 int error; 7955 int result; 7956 struct bce_softc *sc; 7957 7958 result = -1; 7959 error = sysctl_handle_int(oidp, &result, 0, req); 7960 7961 if (error || !req->newptr) 7962 return (error); 7963 7964 if (result == 1) { 7965 sc = (struct bce_softc *)arg1; 7966 bce_dump_driver_state(sc); 7967 } 7968 7969 return error; 7970 } 7971 7972 7973 /****************************************************************************/ 7974 /* Allows the hardware state to be dumped through the sysctl interface. */ 7975 /* */ 7976 /* Returns: */ 7977 /* 0 for success, positive value for failure. */ 7978 /****************************************************************************/ 7979 static int 7980 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 7981 { 7982 int error; 7983 int result; 7984 struct bce_softc *sc; 7985 7986 result = -1; 7987 error = sysctl_handle_int(oidp, &result, 0, req); 7988 7989 if (error || !req->newptr) 7990 return (error); 7991 7992 if (result == 1) { 7993 sc = (struct bce_softc *)arg1; 7994 bce_dump_hw_state(sc); 7995 } 7996 7997 return error; 7998 } 7999 8000 8001 /****************************************************************************/ 8002 /* Allows the status block to be dumped through the sysctl interface. */ 8003 /* */ 8004 /* Returns: */ 8005 /* 0 for success, positive value for failure. */ 8006 /****************************************************************************/ 8007 static int 8008 bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) 8009 { 8010 int error; 8011 int result; 8012 struct bce_softc *sc; 8013 8014 result = -1; 8015 error = sysctl_handle_int(oidp, &result, 0, req); 8016 8017 if (error || !req->newptr) 8018 return (error); 8019 8020 if (result == 1) { 8021 sc = (struct bce_softc *)arg1; 8022 bce_dump_status_block(sc); 8023 } 8024 8025 return error; 8026 } 8027 8028 8029 /****************************************************************************/ 8030 /* Allows the stats block to be dumped through the sysctl interface. */ 8031 /* */ 8032 /* Returns: */ 8033 /* 0 for success, positive value for failure. */ 8034 /****************************************************************************/ 8035 static int 8036 bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) 8037 { 8038 int error; 8039 int result; 8040 struct bce_softc *sc; 8041 8042 result = -1; 8043 error = sysctl_handle_int(oidp, &result, 0, req); 8044 8045 if (error || !req->newptr) 8046 return (error); 8047 8048 if (result == 1) { 8049 sc = (struct bce_softc *)arg1; 8050 bce_dump_stats_block(sc); 8051 } 8052 8053 return error; 8054 } 8055 8056 8057 /****************************************************************************/ 8058 /* Allows the stat counters to be cleared without unloading/reloading the */ 8059 /* driver. */ 8060 /* */ 8061 /* Returns: */ 8062 /* 0 for success, positive value for failure. */ 8063 /****************************************************************************/ 8064 static int 8065 bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) 8066 { 8067 int error; 8068 int result; 8069 struct bce_softc *sc; 8070 8071 result = -1; 8072 error = sysctl_handle_int(oidp, &result, 0, req); 8073 8074 if (error || !req->newptr) 8075 return (error); 8076 8077 if (result == 1) { 8078 sc = (struct bce_softc *)arg1; 8079 8080 /* Clear the internal H/W statistics counters. */ 8081 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 8082 8083 /* Reset the driver maintained statistics. */ 8084 sc->interrupts_rx = 8085 sc->interrupts_tx = 0; 8086 sc->tso_frames_requested = 8087 sc->tso_frames_completed = 8088 sc->tso_frames_failed = 0; 8089 sc->rx_empty_count = 8090 sc->tx_full_count = 0; 8091 sc->rx_low_watermark = USABLE_RX_BD; 8092 sc->tx_hi_watermark = 0; 8093 sc->l2fhdr_error_count = 8094 sc->l2fhdr_error_sim_count = 0; 8095 sc->mbuf_alloc_failed_count = 8096 sc->mbuf_alloc_failed_sim_count = 0; 8097 sc->dma_map_addr_rx_failed_count = 8098 sc->dma_map_addr_tx_failed_count = 0; 8099 sc->mbuf_frag_count = 0; 8100 sc->csum_offload_tcp_udp = 8101 sc->csum_offload_ip = 0; 8102 sc->vlan_tagged_frames_rcvd = 8103 sc->vlan_tagged_frames_stripped = 0; 8104 8105 /* Clear firmware maintained statistics. */ 8106 REG_WR_IND(sc, 0x120084, 0); 8107 } 8108 8109 return error; 8110 } 8111 8112 8113 /****************************************************************************/ 8114 /* Allows the bootcode state to be dumped through the sysctl interface. */ 8115 /* */ 8116 /* Returns: */ 8117 /* 0 for success, positive value for failure. */ 8118 /****************************************************************************/ 8119 static int 8120 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) 8121 { 8122 int error; 8123 int result; 8124 struct bce_softc *sc; 8125 8126 result = -1; 8127 error = sysctl_handle_int(oidp, &result, 0, req); 8128 8129 if (error || !req->newptr) 8130 return (error); 8131 8132 if (result == 1) { 8133 sc = (struct bce_softc *)arg1; 8134 bce_dump_bc_state(sc); 8135 } 8136 8137 return error; 8138 } 8139 8140 8141 /****************************************************************************/ 8142 /* Provides a sysctl interface to allow dumping the RX BD chain. */ 8143 /* */ 8144 /* Returns: */ 8145 /* 0 for success, positive value for failure. */ 8146 /****************************************************************************/ 8147 static int 8148 bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) 8149 { 8150 int error; 8151 int result; 8152 struct bce_softc *sc; 8153 8154 result = -1; 8155 error = sysctl_handle_int(oidp, &result, 0, req); 8156 8157 if (error || !req->newptr) 8158 return (error); 8159 8160 if (result == 1) { 8161 sc = (struct bce_softc *)arg1; 8162 bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD); 8163 } 8164 8165 return error; 8166 } 8167 8168 8169 /****************************************************************************/ 8170 /* Provides a sysctl interface to allow dumping the RX MBUF chain. */ 8171 /* */ 8172 /* Returns: */ 8173 /* 0 for success, positive value for failure. */ 8174 /****************************************************************************/ 8175 static int 8176 bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) 8177 { 8178 int error; 8179 int result; 8180 struct bce_softc *sc; 8181 8182 result = -1; 8183 error = sysctl_handle_int(oidp, &result, 0, req); 8184 8185 if (error || !req->newptr) 8186 return (error); 8187 8188 if (result == 1) { 8189 sc = (struct bce_softc *)arg1; 8190 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 8191 } 8192 8193 return error; 8194 } 8195 8196 8197 /****************************************************************************/ 8198 /* Provides a sysctl interface to allow dumping the TX chain. */ 8199 /* */ 8200 /* Returns: */ 8201 /* 0 for success, positive value for failure. */ 8202 /****************************************************************************/ 8203 static int 8204 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 8205 { 8206 int error; 8207 int result; 8208 struct bce_softc *sc; 8209 8210 result = -1; 8211 error = sysctl_handle_int(oidp, &result, 0, req); 8212 8213 if (error || !req->newptr) 8214 return (error); 8215 8216 if (result == 1) { 8217 sc = (struct bce_softc *)arg1; 8218 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD); 8219 } 8220 8221 return error; 8222 } 8223 8224 8225 #ifdef BCE_JUMBO_HDRSPLIT 8226 /****************************************************************************/ 8227 /* Provides a sysctl interface to allow dumping the page chain. */ 8228 /* */ 8229 /* Returns: */ 8230 /* 0 for success, positive value for failure. */ 8231 /****************************************************************************/ 8232 static int 8233 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) 8234 { 8235 int error; 8236 int result; 8237 struct bce_softc *sc; 8238 8239 result = -1; 8240 error = sysctl_handle_int(oidp, &result, 0, req); 8241 8242 if (error || !req->newptr) 8243 return (error); 8244 8245 if (result == 1) { 8246 sc = (struct bce_softc *)arg1; 8247 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD); 8248 } 8249 8250 return error; 8251 } 8252 #endif 8253 8254 /****************************************************************************/ 8255 /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ 8256 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8257 /* */ 8258 /* Returns: */ 8259 /* 0 for success, positive value for failure. */ 8260 /****************************************************************************/ 8261 static int 8262 bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) 8263 { 8264 struct bce_softc *sc = (struct bce_softc *)arg1; 8265 int error; 8266 u32 result; 8267 u32 val[1]; 8268 u8 *data = (u8 *) val; 8269 8270 result = -1; 8271 error = sysctl_handle_int(oidp, &result, 0, req); 8272 if (error || (req->newptr == NULL)) 8273 return (error); 8274 8275 bce_nvram_read(sc, result, data, 4); 8276 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); 8277 8278 return (error); 8279 } 8280 8281 8282 /****************************************************************************/ 8283 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 8284 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8285 /* */ 8286 /* Returns: */ 8287 /* 0 for success, positive value for failure. */ 8288 /****************************************************************************/ 8289 static int 8290 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 8291 { 8292 struct bce_softc *sc = (struct bce_softc *)arg1; 8293 int error; 8294 u32 val, result; 8295 8296 result = -1; 8297 error = sysctl_handle_int(oidp, &result, 0, req); 8298 if (error || (req->newptr == NULL)) 8299 return (error); 8300 8301 /* Make sure the register is accessible. */ 8302 if (result < 0x8000) { 8303 val = REG_RD(sc, result); 8304 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8305 } else if (result < 0x0280000) { 8306 val = REG_RD_IND(sc, result); 8307 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); 8308 } 8309 8310 return (error); 8311 } 8312 8313 8314 /****************************************************************************/ 8315 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 8316 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8317 /* */ 8318 /* Returns: */ 8319 /* 0 for success, positive value for failure. */ 8320 /****************************************************************************/ 8321 static int 8322 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 8323 { 8324 struct bce_softc *sc; 8325 device_t dev; 8326 int error, result; 8327 u16 val; 8328 8329 result = -1; 8330 error = sysctl_handle_int(oidp, &result, 0, req); 8331 if (error || (req->newptr == NULL)) 8332 return (error); 8333 8334 /* Make sure the register is accessible. */ 8335 if (result < 0x20) { 8336 sc = (struct bce_softc *)arg1; 8337 dev = sc->bce_dev; 8338 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 8339 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); 8340 } 8341 return (error); 8342 } 8343 8344 8345 /****************************************************************************/ 8346 /* Provides a sysctl interface to allow reading a CID. */ 8347 /* */ 8348 /* Returns: */ 8349 /* 0 for success, positive value for failure. */ 8350 /****************************************************************************/ 8351 static int 8352 bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) 8353 { 8354 struct bce_softc *sc; 8355 int error, result; 8356 8357 result = -1; 8358 error = sysctl_handle_int(oidp, &result, 0, req); 8359 if (error || (req->newptr == NULL)) 8360 return (error); 8361 8362 /* Make sure the register is accessible. */ 8363 if (result <= TX_CID) { 8364 sc = (struct bce_softc *)arg1; 8365 bce_dump_ctx(sc, result); 8366 } 8367 8368 return (error); 8369 } 8370 8371 8372 /****************************************************************************/ 8373 /* Provides a sysctl interface to forcing the driver to dump state and */ 8374 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 8375 /* */ 8376 /* Returns: */ 8377 /* 0 for success, positive value for failure. */ 8378 /****************************************************************************/ 8379 static int 8380 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 8381 { 8382 int error; 8383 int result; 8384 struct bce_softc *sc; 8385 8386 result = -1; 8387 error = sysctl_handle_int(oidp, &result, 0, req); 8388 8389 if (error || !req->newptr) 8390 return (error); 8391 8392 if (result == 1) { 8393 sc = (struct bce_softc *)arg1; 8394 bce_breakpoint(sc); 8395 } 8396 8397 return error; 8398 } 8399 #endif 8400 8401 8402 /****************************************************************************/ 8403 /* Adds any sysctl parameters for tuning or debugging purposes. */ 8404 /* */ 8405 /* Returns: */ 8406 /* 0 for success, positive value for failure. */ 8407 /****************************************************************************/ 8408 static void 8409 bce_add_sysctls(struct bce_softc *sc) 8410 { 8411 struct sysctl_ctx_list *ctx; 8412 struct sysctl_oid_list *children; 8413 8414 DBENTER(BCE_VERBOSE_MISC); 8415 8416 ctx = device_get_sysctl_ctx(sc->bce_dev); 8417 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 8418 8419 #ifdef BCE_DEBUG 8420 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8421 "l2fhdr_error_sim_control", 8422 CTLFLAG_RW, &l2fhdr_error_sim_control, 8423 0, "Debug control to force l2fhdr errors"); 8424 8425 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8426 "l2fhdr_error_sim_count", 8427 CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 8428 0, "Number of simulated l2_fhdr errors"); 8429 #endif 8430 8431 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8432 "l2fhdr_error_count", 8433 CTLFLAG_RD, &sc->l2fhdr_error_count, 8434 0, "Number of l2_fhdr errors"); 8435 8436 #ifdef BCE_DEBUG 8437 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8438 "mbuf_alloc_failed_sim_control", 8439 CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 8440 0, "Debug control to force mbuf allocation failures"); 8441 8442 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8443 "mbuf_alloc_failed_sim_count", 8444 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 8445 0, "Number of simulated mbuf cluster allocation failures"); 8446 #endif 8447 8448 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8449 "mbuf_alloc_failed_count", 8450 CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 8451 0, "Number of mbuf allocation failures"); 8452 8453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8454 "mbuf_frag_count", 8455 CTLFLAG_RD, &sc->mbuf_frag_count, 8456 0, "Number of fragmented mbufs"); 8457 8458 #ifdef BCE_DEBUG 8459 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8460 "dma_map_addr_failed_sim_control", 8461 CTLFLAG_RW, &dma_map_addr_failed_sim_control, 8462 0, "Debug control to force DMA mapping failures"); 8463 8464 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ 8465 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8466 "dma_map_addr_failed_sim_count", 8467 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 8468 0, "Number of simulated DMA mapping failures"); 8469 8470 #endif 8471 8472 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8473 "dma_map_addr_rx_failed_count", 8474 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 8475 0, "Number of RX DMA mapping failures"); 8476 8477 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8478 "dma_map_addr_tx_failed_count", 8479 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 8480 0, "Number of TX DMA mapping failures"); 8481 8482 #ifdef BCE_DEBUG 8483 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8484 "unexpected_attention_sim_control", 8485 CTLFLAG_RW, &unexpected_attention_sim_control, 8486 0, "Debug control to simulate unexpected attentions"); 8487 8488 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8489 "unexpected_attention_sim_count", 8490 CTLFLAG_RW, &sc->unexpected_attention_sim_count, 8491 0, "Number of simulated unexpected attentions"); 8492 #endif 8493 8494 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8495 "unexpected_attention_count", 8496 CTLFLAG_RW, &sc->unexpected_attention_count, 8497 0, "Number of unexpected attentions"); 8498 8499 #ifdef BCE_DEBUG 8500 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8501 "debug_bootcode_running_failure", 8502 CTLFLAG_RW, &bootcode_running_failure_sim_control, 8503 0, "Debug control to force bootcode running failures"); 8504 8505 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8506 "rx_low_watermark", 8507 CTLFLAG_RD, &sc->rx_low_watermark, 8508 0, "Lowest level of free rx_bd's"); 8509 8510 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8511 "rx_empty_count", 8512 CTLFLAG_RD, &sc->rx_empty_count, 8513 0, "Number of times the RX chain was empty"); 8514 8515 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8516 "tx_hi_watermark", 8517 CTLFLAG_RD, &sc->tx_hi_watermark, 8518 0, "Highest level of used tx_bd's"); 8519 8520 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8521 "tx_full_count", 8522 CTLFLAG_RD, &sc->tx_full_count, 8523 0, "Number of times the TX chain was full"); 8524 8525 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8526 "tso_frames_requested", 8527 CTLFLAG_RD, &sc->tso_frames_requested, 8528 0, "Number of TSO frames requested"); 8529 8530 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8531 "tso_frames_completed", 8532 CTLFLAG_RD, &sc->tso_frames_completed, 8533 0, "Number of TSO frames completed"); 8534 8535 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8536 "tso_frames_failed", 8537 CTLFLAG_RD, &sc->tso_frames_failed, 8538 0, "Number of TSO frames failed"); 8539 8540 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8541 "csum_offload_ip", 8542 CTLFLAG_RD, &sc->csum_offload_ip, 8543 0, "Number of IP checksum offload frames"); 8544 8545 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8546 "csum_offload_tcp_udp", 8547 CTLFLAG_RD, &sc->csum_offload_tcp_udp, 8548 0, "Number of TCP/UDP checksum offload frames"); 8549 8550 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8551 "vlan_tagged_frames_rcvd", 8552 CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, 8553 0, "Number of VLAN tagged frames received"); 8554 8555 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 8556 "vlan_tagged_frames_stripped", 8557 CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, 8558 0, "Number of VLAN tagged frames stripped"); 8559 8560 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8561 "interrupts_rx", 8562 CTLFLAG_RD, &sc->interrupts_rx, 8563 0, "Number of RX interrupts"); 8564 8565 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8566 "interrupts_tx", 8567 CTLFLAG_RD, &sc->interrupts_tx, 8568 0, "Number of TX interrupts"); 8569 #endif 8570 8571 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8572 "stat_IfHcInOctets", 8573 CTLFLAG_RD, &sc->stat_IfHCInOctets, 8574 "Bytes received"); 8575 8576 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8577 "stat_IfHCInBadOctets", 8578 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 8579 "Bad bytes received"); 8580 8581 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8582 "stat_IfHCOutOctets", 8583 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 8584 "Bytes sent"); 8585 8586 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8587 "stat_IfHCOutBadOctets", 8588 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 8589 "Bad bytes sent"); 8590 8591 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8592 "stat_IfHCInUcastPkts", 8593 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 8594 "Unicast packets received"); 8595 8596 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8597 "stat_IfHCInMulticastPkts", 8598 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 8599 "Multicast packets received"); 8600 8601 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8602 "stat_IfHCInBroadcastPkts", 8603 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 8604 "Broadcast packets received"); 8605 8606 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8607 "stat_IfHCOutUcastPkts", 8608 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 8609 "Unicast packets sent"); 8610 8611 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8612 "stat_IfHCOutMulticastPkts", 8613 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 8614 "Multicast packets sent"); 8615 8616 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 8617 "stat_IfHCOutBroadcastPkts", 8618 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 8619 "Broadcast packets sent"); 8620 8621 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8622 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 8623 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 8624 0, "Internal MAC transmit errors"); 8625 8626 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8627 "stat_Dot3StatsCarrierSenseErrors", 8628 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 8629 0, "Carrier sense errors"); 8630 8631 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8632 "stat_Dot3StatsFCSErrors", 8633 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 8634 0, "Frame check sequence errors"); 8635 8636 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8637 "stat_Dot3StatsAlignmentErrors", 8638 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 8639 0, "Alignment errors"); 8640 8641 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8642 "stat_Dot3StatsSingleCollisionFrames", 8643 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 8644 0, "Single Collision Frames"); 8645 8646 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8647 "stat_Dot3StatsMultipleCollisionFrames", 8648 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 8649 0, "Multiple Collision Frames"); 8650 8651 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8652 "stat_Dot3StatsDeferredTransmissions", 8653 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 8654 0, "Deferred Transmissions"); 8655 8656 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8657 "stat_Dot3StatsExcessiveCollisions", 8658 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 8659 0, "Excessive Collisions"); 8660 8661 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8662 "stat_Dot3StatsLateCollisions", 8663 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 8664 0, "Late Collisions"); 8665 8666 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8667 "stat_EtherStatsCollisions", 8668 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 8669 0, "Collisions"); 8670 8671 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8672 "stat_EtherStatsFragments", 8673 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 8674 0, "Fragments"); 8675 8676 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8677 "stat_EtherStatsJabbers", 8678 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 8679 0, "Jabbers"); 8680 8681 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8682 "stat_EtherStatsUndersizePkts", 8683 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 8684 0, "Undersize packets"); 8685 8686 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8687 "stat_EtherStatsOversizePkts", 8688 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 8689 0, "stat_EtherStatsOversizePkts"); 8690 8691 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8692 "stat_EtherStatsPktsRx64Octets", 8693 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 8694 0, "Bytes received in 64 byte packets"); 8695 8696 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8697 "stat_EtherStatsPktsRx65Octetsto127Octets", 8698 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 8699 0, "Bytes received in 65 to 127 byte packets"); 8700 8701 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8702 "stat_EtherStatsPktsRx128Octetsto255Octets", 8703 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 8704 0, "Bytes received in 128 to 255 byte packets"); 8705 8706 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8707 "stat_EtherStatsPktsRx256Octetsto511Octets", 8708 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 8709 0, "Bytes received in 256 to 511 byte packets"); 8710 8711 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8712 "stat_EtherStatsPktsRx512Octetsto1023Octets", 8713 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 8714 0, "Bytes received in 512 to 1023 byte packets"); 8715 8716 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8717 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 8718 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 8719 0, "Bytes received in 1024 t0 1522 byte packets"); 8720 8721 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8722 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 8723 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 8724 0, "Bytes received in 1523 to 9022 byte packets"); 8725 8726 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8727 "stat_EtherStatsPktsTx64Octets", 8728 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 8729 0, "Bytes sent in 64 byte packets"); 8730 8731 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8732 "stat_EtherStatsPktsTx65Octetsto127Octets", 8733 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 8734 0, "Bytes sent in 65 to 127 byte packets"); 8735 8736 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8737 "stat_EtherStatsPktsTx128Octetsto255Octets", 8738 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 8739 0, "Bytes sent in 128 to 255 byte packets"); 8740 8741 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8742 "stat_EtherStatsPktsTx256Octetsto511Octets", 8743 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 8744 0, "Bytes sent in 256 to 511 byte packets"); 8745 8746 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8747 "stat_EtherStatsPktsTx512Octetsto1023Octets", 8748 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 8749 0, "Bytes sent in 512 to 1023 byte packets"); 8750 8751 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8752 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 8753 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 8754 0, "Bytes sent in 1024 to 1522 byte packets"); 8755 8756 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8757 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 8758 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 8759 0, "Bytes sent in 1523 to 9022 byte packets"); 8760 8761 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8762 "stat_XonPauseFramesReceived", 8763 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 8764 0, "XON pause frames receved"); 8765 8766 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8767 "stat_XoffPauseFramesReceived", 8768 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 8769 0, "XOFF pause frames received"); 8770 8771 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8772 "stat_OutXonSent", 8773 CTLFLAG_RD, &sc->stat_OutXonSent, 8774 0, "XON pause frames sent"); 8775 8776 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8777 "stat_OutXoffSent", 8778 CTLFLAG_RD, &sc->stat_OutXoffSent, 8779 0, "XOFF pause frames sent"); 8780 8781 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8782 "stat_FlowControlDone", 8783 CTLFLAG_RD, &sc->stat_FlowControlDone, 8784 0, "Flow control done"); 8785 8786 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8787 "stat_MacControlFramesReceived", 8788 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 8789 0, "MAC control frames received"); 8790 8791 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8792 "stat_XoffStateEntered", 8793 CTLFLAG_RD, &sc->stat_XoffStateEntered, 8794 0, "XOFF state entered"); 8795 8796 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8797 "stat_IfInFramesL2FilterDiscards", 8798 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 8799 0, "Received L2 packets discarded"); 8800 8801 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8802 "stat_IfInRuleCheckerDiscards", 8803 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 8804 0, "Received packets discarded by rule"); 8805 8806 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8807 "stat_IfInFTQDiscards", 8808 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 8809 0, "Received packet FTQ discards"); 8810 8811 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8812 "stat_IfInMBUFDiscards", 8813 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 8814 0, "Received packets discarded due to lack " 8815 "of controller buffer memory"); 8816 8817 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8818 "stat_IfInRuleCheckerP4Hit", 8819 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 8820 0, "Received packets rule checker hits"); 8821 8822 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8823 "stat_CatchupInRuleCheckerDiscards", 8824 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 8825 0, "Received packets discarded in Catchup path"); 8826 8827 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8828 "stat_CatchupInFTQDiscards", 8829 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 8830 0, "Received packets discarded in FTQ in Catchup path"); 8831 8832 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8833 "stat_CatchupInMBUFDiscards", 8834 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 8835 0, "Received packets discarded in controller " 8836 "buffer memory in Catchup path"); 8837 8838 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8839 "stat_CatchupInRuleCheckerP4Hit", 8840 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 8841 0, "Received packets rule checker hits in Catchup path"); 8842 8843 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 8844 "com_no_buffers", 8845 CTLFLAG_RD, &sc->com_no_buffers, 8846 0, "Valid packets received but no RX buffers available"); 8847 8848 #ifdef BCE_DEBUG 8849 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8850 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 8851 (void *)sc, 0, 8852 bce_sysctl_driver_state, "I", "Drive state information"); 8853 8854 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8855 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 8856 (void *)sc, 0, 8857 bce_sysctl_hw_state, "I", "Hardware state information"); 8858 8859 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8860 "status_block", CTLTYPE_INT | CTLFLAG_RW, 8861 (void *)sc, 0, 8862 bce_sysctl_status_block, "I", "Dump status block"); 8863 8864 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8865 "stats_block", CTLTYPE_INT | CTLFLAG_RW, 8866 (void *)sc, 0, 8867 bce_sysctl_stats_block, "I", "Dump statistics block"); 8868 8869 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8870 "stats_clear", CTLTYPE_INT | CTLFLAG_RW, 8871 (void *)sc, 0, 8872 bce_sysctl_stats_clear, "I", "Clear statistics block"); 8873 8874 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8875 "bc_state", CTLTYPE_INT | CTLFLAG_RW, 8876 (void *)sc, 0, 8877 bce_sysctl_bc_state, "I", "Bootcode state information"); 8878 8879 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8880 "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW, 8881 (void *)sc, 0, 8882 bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); 8883 8884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8885 "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW, 8886 (void *)sc, 0, 8887 bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); 8888 8889 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8890 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 8891 (void *)sc, 0, 8892 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 8893 8894 #ifdef BCE_JUMBO_HDRSPLIT 8895 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8896 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, 8897 (void *)sc, 0, 8898 bce_sysctl_dump_pg_chain, "I", "Dump page chain"); 8899 #endif 8900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8901 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW, 8902 (void *)sc, 0, 8903 bce_sysctl_dump_ctx, "I", "Dump context memory"); 8904 8905 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8906 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 8907 (void *)sc, 0, 8908 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 8909 8910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8911 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 8912 (void *)sc, 0, 8913 bce_sysctl_reg_read, "I", "Register read"); 8914 8915 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8916 "nvram_read", CTLTYPE_INT | CTLFLAG_RW, 8917 (void *)sc, 0, 8918 bce_sysctl_nvram_read, "I", "NVRAM read"); 8919 8920 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 8921 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 8922 (void *)sc, 0, 8923 bce_sysctl_phy_read, "I", "PHY register read"); 8924 8925 #endif 8926 8927 DBEXIT(BCE_VERBOSE_MISC); 8928 } 8929 8930 8931 /****************************************************************************/ 8932 /* BCE Debug Routines */ 8933 /****************************************************************************/ 8934 #ifdef BCE_DEBUG 8935 8936 /****************************************************************************/ 8937 /* Freezes the controller to allow for a cohesive state dump. */ 8938 /* */ 8939 /* Returns: */ 8940 /* Nothing. */ 8941 /****************************************************************************/ 8942 static __attribute__ ((noinline)) void 8943 bce_freeze_controller(struct bce_softc *sc) 8944 { 8945 u32 val; 8946 val = REG_RD(sc, BCE_MISC_COMMAND); 8947 val |= BCE_MISC_COMMAND_DISABLE_ALL; 8948 REG_WR(sc, BCE_MISC_COMMAND, val); 8949 } 8950 8951 8952 /****************************************************************************/ 8953 /* Unfreezes the controller after a freeze operation. This may not always */ 8954 /* work and the controller will require a reset! */ 8955 /* */ 8956 /* Returns: */ 8957 /* Nothing. */ 8958 /****************************************************************************/ 8959 static __attribute__ ((noinline)) void 8960 bce_unfreeze_controller(struct bce_softc *sc) 8961 { 8962 u32 val; 8963 val = REG_RD(sc, BCE_MISC_COMMAND); 8964 val |= BCE_MISC_COMMAND_ENABLE_ALL; 8965 REG_WR(sc, BCE_MISC_COMMAND, val); 8966 } 8967 8968 8969 /****************************************************************************/ 8970 /* Prints out Ethernet frame information from an mbuf. */ 8971 /* */ 8972 /* Partially decode an Ethernet frame to look at some important headers. */ 8973 /* */ 8974 /* Returns: */ 8975 /* Nothing. */ 8976 /****************************************************************************/ 8977 static __attribute__ ((noinline)) void 8978 bce_dump_enet(struct bce_softc *sc, struct mbuf *m) 8979 { 8980 struct ether_vlan_header *eh; 8981 u16 etype; 8982 int ehlen; 8983 struct ip *ip; 8984 struct tcphdr *th; 8985 struct udphdr *uh; 8986 struct arphdr *ah; 8987 8988 BCE_PRINTF( 8989 "-----------------------------" 8990 " Frame Decode " 8991 "-----------------------------\n"); 8992 8993 eh = mtod(m, struct ether_vlan_header *); 8994 8995 /* Handle VLAN encapsulation if present. */ 8996 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 8997 etype = ntohs(eh->evl_proto); 8998 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 8999 } else { 9000 etype = ntohs(eh->evl_encap_proto); 9001 ehlen = ETHER_HDR_LEN; 9002 } 9003 9004 /* ToDo: Add VLAN output. */ 9005 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", 9006 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); 9007 9008 switch (etype) { 9009 case ETHERTYPE_IP: 9010 ip = (struct ip *)(m->m_data + ehlen); 9011 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " 9012 "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", 9013 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), 9014 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); 9015 9016 switch (ip->ip_p) { 9017 case IPPROTO_TCP: 9018 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9019 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " 9020 "%d bytes, flags = 0x%b, csum = 0x%04X\n", 9021 ntohs(th->th_dport), ntohs(th->th_sport), 9022 (th->th_off << 2), th->th_flags, 9023 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" 9024 "\02SYN\01FIN", ntohs(th->th_sum)); 9025 break; 9026 case IPPROTO_UDP: 9027 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 9028 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " 9029 "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), 9030 ntohs(uh->uh_sport), ntohs(uh->uh_ulen), 9031 ntohs(uh->uh_sum)); 9032 break; 9033 case IPPROTO_ICMP: 9034 BCE_PRINTF("icmp:\n"); 9035 break; 9036 default: 9037 BCE_PRINTF("----: Other IP protocol.\n"); 9038 } 9039 break; 9040 case ETHERTYPE_IPV6: 9041 BCE_PRINTF("ipv6: No decode supported.\n"); 9042 break; 9043 case ETHERTYPE_ARP: 9044 BCE_PRINTF("-arp: "); 9045 ah = (struct arphdr *) (m->m_data + ehlen); 9046 switch (ntohs(ah->ar_op)) { 9047 case ARPOP_REVREQUEST: 9048 printf("reverse ARP request\n"); 9049 break; 9050 case ARPOP_REVREPLY: 9051 printf("reverse ARP reply\n"); 9052 break; 9053 case ARPOP_REQUEST: 9054 printf("ARP request\n"); 9055 break; 9056 case ARPOP_REPLY: 9057 printf("ARP reply\n"); 9058 break; 9059 default: 9060 printf("other ARP operation\n"); 9061 } 9062 break; 9063 default: 9064 BCE_PRINTF("----: Other protocol.\n"); 9065 } 9066 9067 BCE_PRINTF( 9068 "-----------------------------" 9069 "--------------" 9070 "-----------------------------\n"); 9071 } 9072 9073 9074 /****************************************************************************/ 9075 /* Prints out information about an mbuf. */ 9076 /* */ 9077 /* Returns: */ 9078 /* Nothing. */ 9079 /****************************************************************************/ 9080 static __attribute__ ((noinline)) void 9081 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 9082 { 9083 struct mbuf *mp = m; 9084 9085 if (m == NULL) { 9086 BCE_PRINTF("mbuf: null pointer\n"); 9087 return; 9088 } 9089 9090 while (mp) { 9091 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " 9092 "m_data = %p\n", mp, mp->m_len, mp->m_flags, 9093 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); 9094 9095 if (mp->m_flags & M_PKTHDR) { 9096 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " 9097 "csum_flags = %b\n", mp->m_pkthdr.len, 9098 mp->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG" 9099 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" 9100 "\22M_PROMISC\23M_NOFREE", 9101 mp->m_pkthdr.csum_flags, 9102 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" 9103 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" 9104 "\12CSUM_IP_VALID\13CSUM_DATA_VALID" 9105 "\14CSUM_PSEUDO_HDR"); 9106 } 9107 9108 if (mp->m_flags & M_EXT) { 9109 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", 9110 mp->m_ext.ext_buf, mp->m_ext.ext_size); 9111 switch (mp->m_ext.ext_type) { 9112 case EXT_CLUSTER: 9113 printf("EXT_CLUSTER\n"); break; 9114 case EXT_SFBUF: 9115 printf("EXT_SFBUF\n"); break; 9116 case EXT_JUMBO9: 9117 printf("EXT_JUMBO9\n"); break; 9118 case EXT_JUMBO16: 9119 printf("EXT_JUMBO16\n"); break; 9120 case EXT_PACKET: 9121 printf("EXT_PACKET\n"); break; 9122 case EXT_MBUF: 9123 printf("EXT_MBUF\n"); break; 9124 case EXT_NET_DRV: 9125 printf("EXT_NET_DRV\n"); break; 9126 case EXT_MOD_TYPE: 9127 printf("EXT_MDD_TYPE\n"); break; 9128 case EXT_DISPOSABLE: 9129 printf("EXT_DISPOSABLE\n"); break; 9130 case EXT_EXTREF: 9131 printf("EXT_EXTREF\n"); break; 9132 default: 9133 printf("UNKNOWN\n"); 9134 } 9135 } 9136 9137 mp = mp->m_next; 9138 } 9139 } 9140 9141 9142 /****************************************************************************/ 9143 /* Prints out the mbufs in the TX mbuf chain. */ 9144 /* */ 9145 /* Returns: */ 9146 /* Nothing. */ 9147 /****************************************************************************/ 9148 static __attribute__ ((noinline)) void 9149 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9150 { 9151 struct mbuf *m; 9152 9153 BCE_PRINTF( 9154 "----------------------------" 9155 " tx mbuf data " 9156 "----------------------------\n"); 9157 9158 for (int i = 0; i < count; i++) { 9159 m = sc->tx_mbuf_ptr[chain_prod]; 9160 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); 9161 bce_dump_mbuf(sc, m); 9162 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 9163 } 9164 9165 BCE_PRINTF( 9166 "----------------------------" 9167 "----------------" 9168 "----------------------------\n"); 9169 } 9170 9171 9172 /****************************************************************************/ 9173 /* Prints out the mbufs in the RX mbuf chain. */ 9174 /* */ 9175 /* Returns: */ 9176 /* Nothing. */ 9177 /****************************************************************************/ 9178 static __attribute__ ((noinline)) void 9179 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9180 { 9181 struct mbuf *m; 9182 9183 BCE_PRINTF( 9184 "----------------------------" 9185 " rx mbuf data " 9186 "----------------------------\n"); 9187 9188 for (int i = 0; i < count; i++) { 9189 m = sc->rx_mbuf_ptr[chain_prod]; 9190 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); 9191 bce_dump_mbuf(sc, m); 9192 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 9193 } 9194 9195 9196 BCE_PRINTF( 9197 "----------------------------" 9198 "----------------" 9199 "----------------------------\n"); 9200 } 9201 9202 9203 #ifdef BCE_JUMBO_HDRSPLIT 9204 /****************************************************************************/ 9205 /* Prints out the mbufs in the mbuf page chain. */ 9206 /* */ 9207 /* Returns: */ 9208 /* Nothing. */ 9209 /****************************************************************************/ 9210 static __attribute__ ((noinline)) void 9211 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) 9212 { 9213 struct mbuf *m; 9214 9215 BCE_PRINTF( 9216 "----------------------------" 9217 " pg mbuf data " 9218 "----------------------------\n"); 9219 9220 for (int i = 0; i < count; i++) { 9221 m = sc->pg_mbuf_ptr[chain_prod]; 9222 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); 9223 bce_dump_mbuf(sc, m); 9224 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); 9225 } 9226 9227 9228 BCE_PRINTF( 9229 "----------------------------" 9230 "----------------" 9231 "----------------------------\n"); 9232 } 9233 #endif 9234 9235 9236 /****************************************************************************/ 9237 /* Prints out a tx_bd structure. */ 9238 /* */ 9239 /* Returns: */ 9240 /* Nothing. */ 9241 /****************************************************************************/ 9242 static __attribute__ ((noinline)) void 9243 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 9244 { 9245 int i = 0; 9246 9247 if (idx > MAX_TX_BD) 9248 /* Index out of range. */ 9249 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 9250 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 9251 /* TX Chain page pointer. */ 9252 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9253 "pointer\n", idx, txbd->tx_bd_haddr_hi, 9254 txbd->tx_bd_haddr_lo); 9255 else { 9256 /* Normal tx_bd entry. */ 9257 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 9258 "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " 9259 "0x%04X (", idx, txbd->tx_bd_haddr_hi, 9260 txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, 9261 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 9262 9263 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { 9264 if (i>0) 9265 printf("|"); 9266 printf("CONN_FAULT"); 9267 i++; 9268 } 9269 9270 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { 9271 if (i>0) 9272 printf("|"); 9273 printf("TCP_UDP_CKSUM"); 9274 i++; 9275 } 9276 9277 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { 9278 if (i>0) 9279 printf("|"); 9280 printf("IP_CKSUM"); 9281 i++; 9282 } 9283 9284 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { 9285 if (i>0) 9286 printf("|"); 9287 printf("VLAN"); 9288 i++; 9289 } 9290 9291 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { 9292 if (i>0) 9293 printf("|"); 9294 printf("COAL_NOW"); 9295 i++; 9296 } 9297 9298 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { 9299 if (i>0) 9300 printf("|"); 9301 printf("DONT_GEN_CRC"); 9302 i++; 9303 } 9304 9305 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { 9306 if (i>0) 9307 printf("|"); 9308 printf("START"); 9309 i++; 9310 } 9311 9312 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { 9313 if (i>0) 9314 printf("|"); 9315 printf("END"); 9316 i++; 9317 } 9318 9319 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { 9320 if (i>0) 9321 printf("|"); 9322 printf("LSO"); 9323 i++; 9324 } 9325 9326 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { 9327 if (i>0) 9328 printf("|"); 9329 printf("SW_OPTION=%d", ((txbd->tx_bd_flags & 9330 TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; 9331 } 9332 9333 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { 9334 if (i>0) 9335 printf("|"); 9336 printf("SW_FLAGS"); 9337 i++; 9338 } 9339 9340 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { 9341 if (i>0) 9342 printf("|"); 9343 printf("SNAP)"); 9344 } else { 9345 printf(")\n"); 9346 } 9347 } 9348 } 9349 9350 9351 /****************************************************************************/ 9352 /* Prints out a rx_bd structure. */ 9353 /* */ 9354 /* Returns: */ 9355 /* Nothing. */ 9356 /****************************************************************************/ 9357 static __attribute__ ((noinline)) void 9358 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 9359 { 9360 if (idx > MAX_RX_BD) 9361 /* Index out of range. */ 9362 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 9363 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 9364 /* RX Chain page pointer. */ 9365 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 9366 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 9367 rxbd->rx_bd_haddr_lo); 9368 else 9369 /* Normal rx_bd entry. */ 9370 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 9371 "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, 9372 rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, 9373 rxbd->rx_bd_flags); 9374 } 9375 9376 9377 #ifdef BCE_JUMBO_HDRSPLIT 9378 /****************************************************************************/ 9379 /* Prints out a rx_bd structure in the page chain. */ 9380 /* */ 9381 /* Returns: */ 9382 /* Nothing. */ 9383 /****************************************************************************/ 9384 static __attribute__ ((noinline)) void 9385 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) 9386 { 9387 if (idx > MAX_PG_BD) 9388 /* Index out of range. */ 9389 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); 9390 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) 9391 /* Page Chain page pointer. */ 9392 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 9393 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); 9394 else 9395 /* Normal rx_bd entry. */ 9396 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " 9397 "flags = 0x%08X\n", idx, 9398 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, 9399 pgbd->rx_bd_len, pgbd->rx_bd_flags); 9400 } 9401 #endif 9402 9403 9404 /****************************************************************************/ 9405 /* Prints out a l2_fhdr structure. */ 9406 /* */ 9407 /* Returns: */ 9408 /* Nothing. */ 9409 /****************************************************************************/ 9410 static __attribute__ ((noinline)) void 9411 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 9412 { 9413 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " 9414 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " 9415 "tcp_udp_xsum = 0x%04X\n", idx, 9416 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, 9417 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 9418 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 9419 } 9420 9421 9422 /****************************************************************************/ 9423 /* Prints out context memory info. (Only useful for CID 0 to 16.) */ 9424 /* */ 9425 /* Returns: */ 9426 /* Nothing. */ 9427 /****************************************************************************/ 9428 static __attribute__ ((noinline)) void 9429 bce_dump_ctx(struct bce_softc *sc, u16 cid) 9430 { 9431 if (cid > TX_CID) { 9432 BCE_PRINTF(" Unknown CID\n"); 9433 return; 9434 } 9435 9436 BCE_PRINTF( 9437 "----------------------------" 9438 " CTX Data " 9439 "----------------------------\n"); 9440 9441 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); 9442 9443 if (cid == RX_CID) { 9444 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " 9445 "producer index\n", 9446 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); 9447 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " 9448 "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9449 BCE_L2CTX_RX_HOST_BSEQ)); 9450 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", 9451 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); 9452 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " 9453 "descriptor address\n", 9454 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); 9455 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " 9456 "descriptor address\n", 9457 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); 9458 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " 9459 "index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9460 BCE_L2CTX_RX_NX_BDIDX)); 9461 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " 9462 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9463 BCE_L2CTX_RX_HOST_PG_BDIDX)); 9464 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " 9465 "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), 9466 BCE_L2CTX_RX_PG_BUF_SIZE)); 9467 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " 9468 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9469 BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); 9470 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " 9471 "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), 9472 BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); 9473 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " 9474 "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9475 BCE_L2CTX_RX_NX_PG_BDIDX)); 9476 } else if (cid == TX_CID) { 9477 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9478 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9479 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", 9480 CTX_RD(sc, GET_CID_ADDR(cid), 9481 BCE_L2CTX_TX_TYPE_XI)); 9482 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " 9483 "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), 9484 BCE_L2CTX_TX_CMD_TYPE_XI)); 9485 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " 9486 "h/w buffer descriptor address\n", 9487 CTX_RD(sc, GET_CID_ADDR(cid), 9488 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); 9489 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " 9490 "h/w buffer descriptor address\n", 9491 CTX_RD(sc, GET_CID_ADDR(cid), 9492 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); 9493 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " 9494 "host producer index\n", 9495 CTX_RD(sc, GET_CID_ADDR(cid), 9496 BCE_L2CTX_TX_HOST_BIDX_XI)); 9497 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " 9498 "host byte sequence\n", 9499 CTX_RD(sc, GET_CID_ADDR(cid), 9500 BCE_L2CTX_TX_HOST_BSEQ_XI)); 9501 } else { 9502 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", 9503 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); 9504 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", 9505 CTX_RD(sc, GET_CID_ADDR(cid), 9506 BCE_L2CTX_TX_CMD_TYPE)); 9507 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " 9508 "h/w buffer descriptor address\n", 9509 CTX_RD(sc, GET_CID_ADDR(cid), 9510 BCE_L2CTX_TX_TBDR_BHADDR_HI)); 9511 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " 9512 "h/w buffer descriptor address\n", 9513 CTX_RD(sc, GET_CID_ADDR(cid), 9514 BCE_L2CTX_TX_TBDR_BHADDR_LO)); 9515 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " 9516 "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), 9517 BCE_L2CTX_TX_HOST_BIDX)); 9518 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " 9519 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), 9520 BCE_L2CTX_TX_HOST_BSEQ)); 9521 } 9522 } 9523 9524 BCE_PRINTF( 9525 "----------------------------" 9526 " Raw CTX " 9527 "----------------------------\n"); 9528 9529 for (int i = 0x0; i < 0x300; i += 0x10) { 9530 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 9531 CTX_RD(sc, GET_CID_ADDR(cid), i), 9532 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), 9533 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), 9534 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); 9535 } 9536 9537 9538 BCE_PRINTF( 9539 "----------------------------" 9540 "----------------" 9541 "----------------------------\n"); 9542 } 9543 9544 9545 /****************************************************************************/ 9546 /* Prints out the FTQ data. */ 9547 /* */ 9548 /* Returns: */ 9549 /* Nothing. */ 9550 /****************************************************************************/ 9551 static __attribute__ ((noinline)) void 9552 bce_dump_ftqs(struct bce_softc *sc) 9553 { 9554 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; 9555 9556 BCE_PRINTF( 9557 "----------------------------" 9558 " FTQ Data " 9559 "----------------------------\n"); 9560 9561 BCE_PRINTF(" FTQ Command Control Depth_Now " 9562 "Max_Depth Valid_Cnt \n"); 9563 BCE_PRINTF(" ------- ---------- ---------- ---------- " 9564 "---------- ----------\n"); 9565 9566 /* Setup the generic statistic counters for the FTQ valid count. */ 9567 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | 9568 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | 9569 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | 9570 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); 9571 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9572 9573 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | 9574 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | 9575 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | 9576 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); 9577 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); 9578 9579 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | 9580 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | 9581 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | 9582 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); 9583 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); 9584 9585 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | 9586 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | 9587 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | 9588 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); 9589 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); 9590 9591 /* Input queue to the Receive Lookup state machine */ 9592 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); 9593 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); 9594 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; 9595 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; 9596 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9597 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9598 cmd, ctl, cur_depth, max_depth, valid_cnt); 9599 9600 /* Input queue to the Receive Processor */ 9601 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); 9602 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); 9603 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; 9604 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; 9605 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9606 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9607 cmd, ctl, cur_depth, max_depth, valid_cnt); 9608 9609 /* Input queue to the Recevie Processor */ 9610 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); 9611 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); 9612 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; 9613 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; 9614 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9615 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9616 cmd, ctl, cur_depth, max_depth, valid_cnt); 9617 9618 /* Input queue to the Receive Virtual to Physical state machine */ 9619 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); 9620 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); 9621 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; 9622 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; 9623 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9624 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9625 cmd, ctl, cur_depth, max_depth, valid_cnt); 9626 9627 /* Input queue to the Recevie Virtual to Physical state machine */ 9628 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); 9629 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); 9630 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; 9631 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; 9632 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); 9633 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9634 cmd, ctl, cur_depth, max_depth, valid_cnt); 9635 9636 /* Input queue to the Receive Virtual to Physical state machine */ 9637 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); 9638 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); 9639 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; 9640 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; 9641 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); 9642 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9643 cmd, ctl, cur_depth, max_depth, valid_cnt); 9644 9645 /* Input queue to the Receive DMA state machine */ 9646 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); 9647 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); 9648 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9649 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9650 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); 9651 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9652 cmd, ctl, cur_depth, max_depth, valid_cnt); 9653 9654 /* Input queue to the Transmit Scheduler state machine */ 9655 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); 9656 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); 9657 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; 9658 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; 9659 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); 9660 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9661 cmd, ctl, cur_depth, max_depth, valid_cnt); 9662 9663 /* Input queue to the Transmit Buffer Descriptor state machine */ 9664 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); 9665 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); 9666 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; 9667 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; 9668 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); 9669 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9670 cmd, ctl, cur_depth, max_depth, valid_cnt); 9671 9672 /* Input queue to the Transmit Processor */ 9673 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); 9674 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); 9675 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; 9676 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; 9677 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); 9678 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9679 cmd, ctl, cur_depth, max_depth, valid_cnt); 9680 9681 /* Input queue to the Transmit DMA state machine */ 9682 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); 9683 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); 9684 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; 9685 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; 9686 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); 9687 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9688 cmd, ctl, cur_depth, max_depth, valid_cnt); 9689 9690 /* Input queue to the Transmit Patch-Up Processor */ 9691 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); 9692 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); 9693 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; 9694 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; 9695 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); 9696 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9697 cmd, ctl, cur_depth, max_depth, valid_cnt); 9698 9699 /* Input queue to the Transmit Assembler state machine */ 9700 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); 9701 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); 9702 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; 9703 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; 9704 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); 9705 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9706 cmd, ctl, cur_depth, max_depth, valid_cnt); 9707 9708 /* Input queue to the Completion Processor */ 9709 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); 9710 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); 9711 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; 9712 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; 9713 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); 9714 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9715 cmd, ctl, cur_depth, max_depth, valid_cnt); 9716 9717 /* Input queue to the Completion Processor */ 9718 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); 9719 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); 9720 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; 9721 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; 9722 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); 9723 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9724 cmd, ctl, cur_depth, max_depth, valid_cnt); 9725 9726 /* Input queue to the Completion Processor */ 9727 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); 9728 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); 9729 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; 9730 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; 9731 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); 9732 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9733 cmd, ctl, cur_depth, max_depth, valid_cnt); 9734 9735 /* Setup the generic statistic counters for the FTQ valid count. */ 9736 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | 9737 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | 9738 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); 9739 9740 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9741 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) 9742 val = val | 9743 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 9744 24); 9745 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); 9746 9747 /* Input queue to the Management Control Processor */ 9748 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); 9749 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); 9750 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9751 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9752 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); 9753 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9754 cmd, ctl, cur_depth, max_depth, valid_cnt); 9755 9756 /* Input queue to the Command Processor */ 9757 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); 9758 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); 9759 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; 9760 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; 9761 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); 9762 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9763 cmd, ctl, cur_depth, max_depth, valid_cnt); 9764 9765 /* Input queue to the Completion Scheduler state machine */ 9766 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); 9767 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); 9768 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; 9769 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; 9770 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); 9771 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9772 cmd, ctl, cur_depth, max_depth, valid_cnt); 9773 9774 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) || 9775 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) { 9776 /* Input queue to the RV2P Command Scheduler */ 9777 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); 9778 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); 9779 cur_depth = (ctl & 0xFFC00000) >> 22; 9780 max_depth = (ctl & 0x003FF000) >> 12; 9781 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); 9782 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 9783 cmd, ctl, cur_depth, max_depth, valid_cnt); 9784 } 9785 9786 BCE_PRINTF( 9787 "----------------------------" 9788 "----------------" 9789 "----------------------------\n"); 9790 } 9791 9792 9793 /****************************************************************************/ 9794 /* Prints out the TX chain. */ 9795 /* */ 9796 /* Returns: */ 9797 /* Nothing. */ 9798 /****************************************************************************/ 9799 static __attribute__ ((noinline)) void 9800 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) 9801 { 9802 struct tx_bd *txbd; 9803 9804 /* First some info about the tx_bd chain structure. */ 9805 BCE_PRINTF( 9806 "----------------------------" 9807 " tx_bd chain " 9808 "----------------------------\n"); 9809 9810 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", 9811 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES); 9812 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 9813 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); 9814 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD); 9815 9816 BCE_PRINTF( 9817 "----------------------------" 9818 " tx_bd data " 9819 "----------------------------\n"); 9820 9821 /* Now print out a decoded list of TX buffer descriptors. */ 9822 for (int i = 0; i < count; i++) { 9823 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 9824 bce_dump_txbd(sc, tx_prod, txbd); 9825 tx_prod++; 9826 } 9827 9828 BCE_PRINTF( 9829 "----------------------------" 9830 "----------------" 9831 "----------------------------\n"); 9832 } 9833 9834 9835 /****************************************************************************/ 9836 /* Prints out the RX chain. */ 9837 /* */ 9838 /* Returns: */ 9839 /* Nothing. */ 9840 /****************************************************************************/ 9841 static __attribute__ ((noinline)) void 9842 bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) 9843 { 9844 struct rx_bd *rxbd; 9845 9846 /* First some info about the rx_bd chain structure. */ 9847 BCE_PRINTF( 9848 "----------------------------" 9849 " rx_bd chain " 9850 "----------------------------\n"); 9851 9852 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", 9853 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES); 9854 9855 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9856 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); 9857 9858 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD); 9859 9860 BCE_PRINTF( 9861 "----------------------------" 9862 " rx_bd data " 9863 "----------------------------\n"); 9864 9865 /* Now print out the rx_bd's themselves. */ 9866 for (int i = 0; i < count; i++) { 9867 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 9868 bce_dump_rxbd(sc, rx_prod, rxbd); 9869 rx_prod = RX_CHAIN_IDX(rx_prod + 1); 9870 } 9871 9872 BCE_PRINTF( 9873 "----------------------------" 9874 "----------------" 9875 "----------------------------\n"); 9876 } 9877 9878 9879 #ifdef BCE_JUMBO_HDRSPLIT 9880 /****************************************************************************/ 9881 /* Prints out the page chain. */ 9882 /* */ 9883 /* Returns: */ 9884 /* Nothing. */ 9885 /****************************************************************************/ 9886 static __attribute__ ((noinline)) void 9887 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) 9888 { 9889 struct rx_bd *pgbd; 9890 9891 /* First some info about the page chain structure. */ 9892 BCE_PRINTF( 9893 "----------------------------" 9894 " page chain " 9895 "----------------------------\n"); 9896 9897 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", 9898 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES); 9899 9900 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 9901 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); 9902 9903 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n", 9904 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD); 9905 9906 BCE_PRINTF( 9907 "----------------------------" 9908 " page data " 9909 "----------------------------\n"); 9910 9911 /* Now print out the rx_bd's themselves. */ 9912 for (int i = 0; i < count; i++) { 9913 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; 9914 bce_dump_pgbd(sc, pg_prod, pgbd); 9915 pg_prod = PG_CHAIN_IDX(pg_prod + 1); 9916 } 9917 9918 BCE_PRINTF( 9919 "----------------------------" 9920 "----------------" 9921 "----------------------------\n"); 9922 } 9923 #endif 9924 9925 9926 #define BCE_PRINT_RX_CONS(arg) \ 9927 if (sblk->status_rx_quick_consumer_index##arg) \ 9928 BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ 9929 sblk->status_rx_quick_consumer_index##arg, (u16) \ 9930 RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ 9931 arg); 9932 9933 9934 #define BCE_PRINT_TX_CONS(arg) \ 9935 if (sblk->status_tx_quick_consumer_index##arg) \ 9936 BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ 9937 sblk->status_tx_quick_consumer_index##arg, (u16) \ 9938 TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ 9939 arg); 9940 9941 /****************************************************************************/ 9942 /* Prints out the status block from host memory. */ 9943 /* */ 9944 /* Returns: */ 9945 /* Nothing. */ 9946 /****************************************************************************/ 9947 static __attribute__ ((noinline)) void 9948 bce_dump_status_block(struct bce_softc *sc) 9949 { 9950 struct status_block *sblk; 9951 9952 sblk = sc->status_block; 9953 9954 BCE_PRINTF( 9955 "----------------------------" 9956 " Status Block " 9957 "----------------------------\n"); 9958 9959 /* Theses indices are used for normal L2 drivers. */ 9960 BCE_PRINTF(" 0x%08X - attn_bits\n", 9961 sblk->status_attn_bits); 9962 9963 BCE_PRINTF(" 0x%08X - attn_bits_ack\n", 9964 sblk->status_attn_bits_ack); 9965 9966 BCE_PRINT_RX_CONS(0); 9967 BCE_PRINT_TX_CONS(0) 9968 9969 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); 9970 9971 /* Theses indices are not used for normal L2 drivers. */ 9972 BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); 9973 BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); 9974 BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); 9975 BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); 9976 BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); 9977 9978 BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); 9979 9980 if (sblk->status_completion_producer_index || 9981 sblk->status_cmd_consumer_index) 9982 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", 9983 sblk->status_completion_producer_index, 9984 sblk->status_cmd_consumer_index); 9985 9986 BCE_PRINTF( 9987 "----------------------------" 9988 "----------------" 9989 "----------------------------\n"); 9990 } 9991 9992 9993 #define BCE_PRINT_64BIT_STAT(arg) \ 9994 if (sblk->arg##_lo || sblk->arg##_hi) \ 9995 BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ 9996 sblk->arg##_lo, #arg); 9997 9998 #define BCE_PRINT_32BIT_STAT(arg) \ 9999 if (sblk->arg) \ 10000 BCE_PRINTF(" 0x%08X : %s\n", \ 10001 sblk->arg, #arg); 10002 10003 /****************************************************************************/ 10004 /* Prints out the statistics block from host memory. */ 10005 /* */ 10006 /* Returns: */ 10007 /* Nothing. */ 10008 /****************************************************************************/ 10009 static __attribute__ ((noinline)) void 10010 bce_dump_stats_block(struct bce_softc *sc) 10011 { 10012 struct statistics_block *sblk; 10013 10014 sblk = sc->stats_block; 10015 10016 BCE_PRINTF( 10017 "---------------" 10018 " Stats Block (All Stats Not Shown Are 0) " 10019 "---------------\n"); 10020 10021 BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); 10022 BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); 10023 BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); 10024 BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); 10025 BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); 10026 BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); 10027 BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); 10028 BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); 10029 BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); 10030 BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); 10031 BCE_PRINT_32BIT_STAT( 10032 stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 10033 BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); 10034 BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); 10035 BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); 10036 BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); 10037 BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); 10038 BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); 10039 BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); 10040 BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); 10041 BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); 10042 BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); 10043 BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); 10044 BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); 10045 BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); 10046 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); 10047 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); 10048 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); 10049 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); 10050 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); 10051 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); 10052 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); 10053 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); 10054 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); 10055 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); 10056 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); 10057 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); 10058 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); 10059 BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); 10060 BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); 10061 BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); 10062 BCE_PRINT_32BIT_STAT(stat_OutXonSent); 10063 BCE_PRINT_32BIT_STAT(stat_OutXoffSent); 10064 BCE_PRINT_32BIT_STAT(stat_FlowControlDone); 10065 BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); 10066 BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); 10067 BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); 10068 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); 10069 BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); 10070 BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); 10071 BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); 10072 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); 10073 BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); 10074 BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); 10075 BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); 10076 10077 BCE_PRINTF( 10078 "----------------------------" 10079 "----------------" 10080 "----------------------------\n"); 10081 } 10082 10083 10084 /****************************************************************************/ 10085 /* Prints out a summary of the driver state. */ 10086 /* */ 10087 /* Returns: */ 10088 /* Nothing. */ 10089 /****************************************************************************/ 10090 static __attribute__ ((noinline)) void 10091 bce_dump_driver_state(struct bce_softc *sc) 10092 { 10093 u32 val_hi, val_lo; 10094 10095 BCE_PRINTF( 10096 "-----------------------------" 10097 " Driver State " 10098 "-----------------------------\n"); 10099 10100 val_hi = BCE_ADDR_HI(sc); 10101 val_lo = BCE_ADDR_LO(sc); 10102 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " 10103 "address\n", val_hi, val_lo); 10104 10105 val_hi = BCE_ADDR_HI(sc->bce_vhandle); 10106 val_lo = BCE_ADDR_LO(sc->bce_vhandle); 10107 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " 10108 "address\n", val_hi, val_lo); 10109 10110 val_hi = BCE_ADDR_HI(sc->status_block); 10111 val_lo = BCE_ADDR_LO(sc->status_block); 10112 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " 10113 "virtual address\n", val_hi, val_lo); 10114 10115 val_hi = BCE_ADDR_HI(sc->stats_block); 10116 val_lo = BCE_ADDR_LO(sc->stats_block); 10117 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " 10118 "virtual address\n", val_hi, val_lo); 10119 10120 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 10121 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 10122 BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 10123 "virtual adddress\n", val_hi, val_lo); 10124 10125 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 10126 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 10127 BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 10128 "virtual address\n", val_hi, val_lo); 10129 10130 #ifdef BCE_JUMBO_HDRSPLIT 10131 val_hi = BCE_ADDR_HI(sc->pg_bd_chain); 10132 val_lo = BCE_ADDR_LO(sc->pg_bd_chain); 10133 BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " 10134 "virtual address\n", val_hi, val_lo); 10135 #endif 10136 10137 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 10138 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 10139 BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 10140 "virtual address\n", val_hi, val_lo); 10141 10142 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 10143 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 10144 BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 10145 "virtual address\n", val_hi, val_lo); 10146 10147 #ifdef BCE_JUMBO_HDRSPLIT 10148 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); 10149 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); 10150 BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " 10151 "virtual address\n", val_hi, val_lo); 10152 #endif 10153 10154 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) " 10155 "h/w intrs\n", sc->interrupts_generated); 10156 10157 BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) " 10158 "rx interrupts handled\n", sc->interrupts_rx); 10159 10160 BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) " 10161 "tx interrupts handled\n", sc->interrupts_tx); 10162 10163 BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) " 10164 "phy interrupts handled\n", sc->phy_interrupts); 10165 10166 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " 10167 "status block index\n", sc->last_status_idx); 10168 10169 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " 10170 "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); 10171 10172 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " 10173 "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); 10174 10175 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " 10176 "byte seq index\n", sc->tx_prod_bseq); 10177 10178 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " 10179 "mbufs allocated\n", sc->debug_tx_mbuf_alloc); 10180 10181 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " 10182 "tx_bd's\n", sc->used_tx_bd); 10183 10184 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi " 10185 "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd); 10186 10187 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " 10188 "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); 10189 10190 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " 10191 "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); 10192 10193 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " 10194 "byte seq index\n", sc->rx_prod_bseq); 10195 10196 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " 10197 "mbufs allocated\n", sc->debug_rx_mbuf_alloc); 10198 10199 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " 10200 "rx_bd's\n", sc->free_rx_bd); 10201 10202 #ifdef BCE_JUMBO_HDRSPLIT 10203 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " 10204 "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); 10205 10206 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " 10207 "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); 10208 10209 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " 10210 "mbufs allocated\n", sc->debug_pg_mbuf_alloc); 10211 10212 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " 10213 "rx_bd's\n", sc->free_pg_bd); 10214 10215 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low " 10216 "watermark\n", sc->pg_low_watermark, sc->max_pg_bd); 10217 #endif 10218 10219 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " 10220 "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); 10221 10222 BCE_PRINTF(" 0x%08X - (sc->bce_flags) " 10223 "bce mac flags\n", sc->bce_flags); 10224 10225 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " 10226 "bce phy flags\n", sc->bce_phy_flags); 10227 10228 BCE_PRINTF( 10229 "----------------------------" 10230 "----------------" 10231 "----------------------------\n"); 10232 } 10233 10234 10235 /****************************************************************************/ 10236 /* Prints out the hardware state through a summary of important register, */ 10237 /* followed by a complete register dump. */ 10238 /* */ 10239 /* Returns: */ 10240 /* Nothing. */ 10241 /****************************************************************************/ 10242 static __attribute__ ((noinline)) void 10243 bce_dump_hw_state(struct bce_softc *sc) 10244 { 10245 u32 val; 10246 10247 BCE_PRINTF( 10248 "----------------------------" 10249 " Hardware State " 10250 "----------------------------\n"); 10251 10252 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10253 10254 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 10255 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", 10256 val, BCE_MISC_ENABLE_STATUS_BITS); 10257 10258 val = REG_RD(sc, BCE_DMA_STATUS); 10259 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", 10260 val, BCE_DMA_STATUS); 10261 10262 val = REG_RD(sc, BCE_CTX_STATUS); 10263 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", 10264 val, BCE_CTX_STATUS); 10265 10266 val = REG_RD(sc, BCE_EMAC_STATUS); 10267 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", 10268 val, BCE_EMAC_STATUS); 10269 10270 val = REG_RD(sc, BCE_RPM_STATUS); 10271 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", 10272 val, BCE_RPM_STATUS); 10273 10274 /* ToDo: Create a #define for this constant. */ 10275 val = REG_RD(sc, 0x2004); 10276 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", 10277 val, 0x2004); 10278 10279 val = REG_RD(sc, BCE_RV2P_STATUS); 10280 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", 10281 val, BCE_RV2P_STATUS); 10282 10283 /* ToDo: Create a #define for this constant. */ 10284 val = REG_RD(sc, 0x2c04); 10285 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", 10286 val, 0x2c04); 10287 10288 val = REG_RD(sc, BCE_TBDR_STATUS); 10289 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", 10290 val, BCE_TBDR_STATUS); 10291 10292 val = REG_RD(sc, BCE_TDMA_STATUS); 10293 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", 10294 val, BCE_TDMA_STATUS); 10295 10296 val = REG_RD(sc, BCE_HC_STATUS); 10297 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", 10298 val, BCE_HC_STATUS); 10299 10300 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10301 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10302 val, BCE_TXP_CPU_STATE); 10303 10304 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10305 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10306 val, BCE_TPAT_CPU_STATE); 10307 10308 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10309 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10310 val, BCE_RXP_CPU_STATE); 10311 10312 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10313 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10314 val, BCE_COM_CPU_STATE); 10315 10316 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 10317 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", 10318 val, BCE_MCP_CPU_STATE); 10319 10320 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10321 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10322 val, BCE_CP_CPU_STATE); 10323 10324 BCE_PRINTF( 10325 "----------------------------" 10326 "----------------" 10327 "----------------------------\n"); 10328 10329 BCE_PRINTF( 10330 "----------------------------" 10331 " Register Dump " 10332 "----------------------------\n"); 10333 10334 for (int i = 0x400; i < 0x8000; i += 0x10) { 10335 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10336 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10337 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10338 } 10339 10340 BCE_PRINTF( 10341 "----------------------------" 10342 "----------------" 10343 "----------------------------\n"); 10344 } 10345 10346 10347 /****************************************************************************/ 10348 /* Prints out the mailbox queue registers. */ 10349 /* */ 10350 /* Returns: */ 10351 /* Nothing. */ 10352 /****************************************************************************/ 10353 static __attribute__ ((noinline)) void 10354 bce_dump_mq_regs(struct bce_softc *sc) 10355 { 10356 BCE_PRINTF( 10357 "----------------------------" 10358 " MQ Regs " 10359 "----------------------------\n"); 10360 10361 BCE_PRINTF( 10362 "----------------------------" 10363 "----------------" 10364 "----------------------------\n"); 10365 10366 for (int i = 0x3c00; i < 0x4000; i += 0x10) { 10367 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 10368 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 10369 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 10370 } 10371 10372 BCE_PRINTF( 10373 "----------------------------" 10374 "----------------" 10375 "----------------------------\n"); 10376 } 10377 10378 10379 /****************************************************************************/ 10380 /* Prints out the bootcode state. */ 10381 /* */ 10382 /* Returns: */ 10383 /* Nothing. */ 10384 /****************************************************************************/ 10385 static __attribute__ ((noinline)) void 10386 bce_dump_bc_state(struct bce_softc *sc) 10387 { 10388 u32 val; 10389 10390 BCE_PRINTF( 10391 "----------------------------" 10392 " Bootcode State " 10393 "----------------------------\n"); 10394 10395 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); 10396 10397 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); 10398 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", 10399 val, BCE_BC_RESET_TYPE); 10400 10401 val = bce_shmem_rd(sc, BCE_BC_STATE); 10402 BCE_PRINTF("0x%08X - (0x%06X) state\n", 10403 val, BCE_BC_STATE); 10404 10405 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 10406 BCE_PRINTF("0x%08X - (0x%06X) condition\n", 10407 val, BCE_BC_STATE_CONDITION); 10408 10409 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); 10410 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", 10411 val, BCE_BC_STATE_DEBUG_CMD); 10412 10413 BCE_PRINTF( 10414 "----------------------------" 10415 "----------------" 10416 "----------------------------\n"); 10417 } 10418 10419 10420 /****************************************************************************/ 10421 /* Prints out the TXP processor state. */ 10422 /* */ 10423 /* Returns: */ 10424 /* Nothing. */ 10425 /****************************************************************************/ 10426 static __attribute__ ((noinline)) void 10427 bce_dump_txp_state(struct bce_softc *sc, int regs) 10428 { 10429 u32 val; 10430 u32 fw_version[3]; 10431 10432 BCE_PRINTF( 10433 "----------------------------" 10434 " TXP State " 10435 "----------------------------\n"); 10436 10437 for (int i = 0; i < 3; i++) 10438 fw_version[i] = htonl(REG_RD_IND(sc, 10439 (BCE_TXP_SCRATCH + 0x10 + i * 4))); 10440 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10441 10442 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 10443 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", 10444 val, BCE_TXP_CPU_MODE); 10445 10446 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 10447 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", 10448 val, BCE_TXP_CPU_STATE); 10449 10450 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 10451 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", 10452 val, BCE_TXP_CPU_EVENT_MASK); 10453 10454 if (regs) { 10455 BCE_PRINTF( 10456 "----------------------------" 10457 " Register Dump " 10458 "----------------------------\n"); 10459 10460 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 10461 /* Skip the big blank spaces */ 10462 if (i < 0x454000 && i > 0x5ffff) 10463 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10464 "0x%08X 0x%08X\n", i, 10465 REG_RD_IND(sc, i), 10466 REG_RD_IND(sc, i + 0x4), 10467 REG_RD_IND(sc, i + 0x8), 10468 REG_RD_IND(sc, i + 0xC)); 10469 } 10470 } 10471 10472 BCE_PRINTF( 10473 "----------------------------" 10474 "----------------" 10475 "----------------------------\n"); 10476 } 10477 10478 10479 /****************************************************************************/ 10480 /* Prints out the RXP processor state. */ 10481 /* */ 10482 /* Returns: */ 10483 /* Nothing. */ 10484 /****************************************************************************/ 10485 static __attribute__ ((noinline)) void 10486 bce_dump_rxp_state(struct bce_softc *sc, int regs) 10487 { 10488 u32 val; 10489 u32 fw_version[3]; 10490 10491 BCE_PRINTF( 10492 "----------------------------" 10493 " RXP State " 10494 "----------------------------\n"); 10495 10496 for (int i = 0; i < 3; i++) 10497 fw_version[i] = htonl(REG_RD_IND(sc, 10498 (BCE_RXP_SCRATCH + 0x10 + i * 4))); 10499 10500 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10501 10502 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 10503 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", 10504 val, BCE_RXP_CPU_MODE); 10505 10506 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 10507 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", 10508 val, BCE_RXP_CPU_STATE); 10509 10510 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 10511 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", 10512 val, BCE_RXP_CPU_EVENT_MASK); 10513 10514 if (regs) { 10515 BCE_PRINTF( 10516 "----------------------------" 10517 " Register Dump " 10518 "----------------------------\n"); 10519 10520 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 10521 /* Skip the big blank sapces */ 10522 if (i < 0xc5400 && i > 0xdffff) 10523 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10524 "0x%08X 0x%08X\n", i, 10525 REG_RD_IND(sc, i), 10526 REG_RD_IND(sc, i + 0x4), 10527 REG_RD_IND(sc, i + 0x8), 10528 REG_RD_IND(sc, i + 0xC)); 10529 } 10530 } 10531 10532 BCE_PRINTF( 10533 "----------------------------" 10534 "----------------" 10535 "----------------------------\n"); 10536 } 10537 10538 10539 /****************************************************************************/ 10540 /* Prints out the TPAT processor state. */ 10541 /* */ 10542 /* Returns: */ 10543 /* Nothing. */ 10544 /****************************************************************************/ 10545 static __attribute__ ((noinline)) void 10546 bce_dump_tpat_state(struct bce_softc *sc, int regs) 10547 { 10548 u32 val; 10549 u32 fw_version[3]; 10550 10551 BCE_PRINTF( 10552 "----------------------------" 10553 " TPAT State " 10554 "----------------------------\n"); 10555 10556 for (int i = 0; i < 3; i++) 10557 fw_version[i] = htonl(REG_RD_IND(sc, 10558 (BCE_TPAT_SCRATCH + 0x410 + i * 4))); 10559 10560 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10561 10562 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 10563 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", 10564 val, BCE_TPAT_CPU_MODE); 10565 10566 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 10567 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", 10568 val, BCE_TPAT_CPU_STATE); 10569 10570 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 10571 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", 10572 val, BCE_TPAT_CPU_EVENT_MASK); 10573 10574 if (regs) { 10575 BCE_PRINTF( 10576 "----------------------------" 10577 " Register Dump " 10578 "----------------------------\n"); 10579 10580 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 10581 /* Skip the big blank spaces */ 10582 if (i < 0x854000 && i > 0x9ffff) 10583 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10584 "0x%08X 0x%08X\n", i, 10585 REG_RD_IND(sc, i), 10586 REG_RD_IND(sc, i + 0x4), 10587 REG_RD_IND(sc, i + 0x8), 10588 REG_RD_IND(sc, i + 0xC)); 10589 } 10590 } 10591 10592 BCE_PRINTF( 10593 "----------------------------" 10594 "----------------" 10595 "----------------------------\n"); 10596 } 10597 10598 10599 /****************************************************************************/ 10600 /* Prints out the Command Procesor (CP) state. */ 10601 /* */ 10602 /* Returns: */ 10603 /* Nothing. */ 10604 /****************************************************************************/ 10605 static __attribute__ ((noinline)) void 10606 bce_dump_cp_state(struct bce_softc *sc, int regs) 10607 { 10608 u32 val; 10609 u32 fw_version[3]; 10610 10611 BCE_PRINTF( 10612 "----------------------------" 10613 " CP State " 10614 "----------------------------\n"); 10615 10616 for (int i = 0; i < 3; i++) 10617 fw_version[i] = htonl(REG_RD_IND(sc, 10618 (BCE_CP_SCRATCH + 0x10 + i * 4))); 10619 10620 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10621 10622 val = REG_RD_IND(sc, BCE_CP_CPU_MODE); 10623 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", 10624 val, BCE_CP_CPU_MODE); 10625 10626 val = REG_RD_IND(sc, BCE_CP_CPU_STATE); 10627 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", 10628 val, BCE_CP_CPU_STATE); 10629 10630 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); 10631 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, 10632 BCE_CP_CPU_EVENT_MASK); 10633 10634 if (regs) { 10635 BCE_PRINTF( 10636 "----------------------------" 10637 " Register Dump " 10638 "----------------------------\n"); 10639 10640 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { 10641 /* Skip the big blank spaces */ 10642 if (i < 0x185400 && i > 0x19ffff) 10643 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10644 "0x%08X 0x%08X\n", i, 10645 REG_RD_IND(sc, i), 10646 REG_RD_IND(sc, i + 0x4), 10647 REG_RD_IND(sc, i + 0x8), 10648 REG_RD_IND(sc, i + 0xC)); 10649 } 10650 } 10651 10652 BCE_PRINTF( 10653 "----------------------------" 10654 "----------------" 10655 "----------------------------\n"); 10656 } 10657 10658 10659 /****************************************************************************/ 10660 /* Prints out the Completion Procesor (COM) state. */ 10661 /* */ 10662 /* Returns: */ 10663 /* Nothing. */ 10664 /****************************************************************************/ 10665 static __attribute__ ((noinline)) void 10666 bce_dump_com_state(struct bce_softc *sc, int regs) 10667 { 10668 u32 val; 10669 u32 fw_version[4]; 10670 10671 BCE_PRINTF( 10672 "----------------------------" 10673 " COM State " 10674 "----------------------------\n"); 10675 10676 for (int i = 0; i < 3; i++) 10677 fw_version[i] = htonl(REG_RD_IND(sc, 10678 (BCE_COM_SCRATCH + 0x10 + i * 4))); 10679 10680 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); 10681 10682 val = REG_RD_IND(sc, BCE_COM_CPU_MODE); 10683 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", 10684 val, BCE_COM_CPU_MODE); 10685 10686 val = REG_RD_IND(sc, BCE_COM_CPU_STATE); 10687 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", 10688 val, BCE_COM_CPU_STATE); 10689 10690 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); 10691 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, 10692 BCE_COM_CPU_EVENT_MASK); 10693 10694 if (regs) { 10695 BCE_PRINTF( 10696 "----------------------------" 10697 " Register Dump " 10698 "----------------------------\n"); 10699 10700 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { 10701 BCE_PRINTF("0x%04X: 0x%08X 0x%08X " 10702 "0x%08X 0x%08X\n", i, 10703 REG_RD_IND(sc, i), 10704 REG_RD_IND(sc, i + 0x4), 10705 REG_RD_IND(sc, i + 0x8), 10706 REG_RD_IND(sc, i + 0xC)); 10707 } 10708 } 10709 10710 BCE_PRINTF( 10711 "----------------------------" 10712 "----------------" 10713 "----------------------------\n"); 10714 } 10715 10716 10717 /****************************************************************************/ 10718 /* Prints out the Receive Virtual 2 Physical (RV2P) state. */ 10719 /* */ 10720 /* Returns: */ 10721 /* Nothing. */ 10722 /****************************************************************************/ 10723 static __attribute__ ((noinline)) void 10724 bce_dump_rv2p_state(struct bce_softc *sc) 10725 { 10726 u32 val, pc1, pc2, fw_ver_high, fw_ver_low; 10727 10728 BCE_PRINTF( 10729 "----------------------------" 10730 " RV2P State " 10731 "----------------------------\n"); 10732 10733 /* Stall the RV2P processors. */ 10734 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10735 val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; 10736 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10737 10738 /* Read the firmware version. */ 10739 val = 0x00000001; 10740 REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 10741 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10742 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10743 BCE_RV2P_INSTR_HIGH_HIGH; 10744 BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", 10745 fw_ver_high, fw_ver_low); 10746 10747 val = 0x00000001; 10748 REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 10749 fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); 10750 fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & 10751 BCE_RV2P_INSTR_HIGH_HIGH; 10752 BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", 10753 fw_ver_high, fw_ver_low); 10754 10755 /* Resume the RV2P processors. */ 10756 val = REG_RD_IND(sc, BCE_RV2P_CONFIG); 10757 val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); 10758 REG_WR_IND(sc, BCE_RV2P_CONFIG, val); 10759 10760 /* Fetch the program counter value. */ 10761 val = 0x68007800; 10762 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10763 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10764 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10765 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10766 BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); 10767 BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); 10768 10769 /* Fetch the program counter value again to see if it is advancing. */ 10770 val = 0x68007800; 10771 REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); 10772 val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); 10773 pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); 10774 pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; 10775 BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); 10776 BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); 10777 10778 BCE_PRINTF( 10779 "----------------------------" 10780 "----------------" 10781 "----------------------------\n"); 10782 } 10783 10784 10785 /****************************************************************************/ 10786 /* Prints out the driver state and then enters the debugger. */ 10787 /* */ 10788 /* Returns: */ 10789 /* Nothing. */ 10790 /****************************************************************************/ 10791 static __attribute__ ((noinline)) void 10792 bce_breakpoint(struct bce_softc *sc) 10793 { 10794 10795 /* 10796 * Unreachable code to silence compiler warnings 10797 * about unused functions. 10798 */ 10799 if (0) { 10800 bce_freeze_controller(sc); 10801 bce_unfreeze_controller(sc); 10802 bce_dump_enet(sc, NULL); 10803 bce_dump_txbd(sc, 0, NULL); 10804 bce_dump_rxbd(sc, 0, NULL); 10805 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 10806 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD); 10807 bce_dump_l2fhdr(sc, 0, NULL); 10808 bce_dump_ctx(sc, RX_CID); 10809 bce_dump_ftqs(sc); 10810 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 10811 bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD); 10812 bce_dump_status_block(sc); 10813 bce_dump_stats_block(sc); 10814 bce_dump_driver_state(sc); 10815 bce_dump_hw_state(sc); 10816 bce_dump_bc_state(sc); 10817 bce_dump_txp_state(sc, 0); 10818 bce_dump_rxp_state(sc, 0); 10819 bce_dump_tpat_state(sc, 0); 10820 bce_dump_cp_state(sc, 0); 10821 bce_dump_com_state(sc, 0); 10822 bce_dump_rv2p_state(sc); 10823 10824 #ifdef BCE_JUMBO_HDRSPLIT 10825 bce_dump_pgbd(sc, 0, NULL); 10826 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD); 10827 bce_dump_pg_chain(sc, 0, USABLE_PG_BD); 10828 #endif 10829 } 10830 10831 bce_dump_status_block(sc); 10832 bce_dump_driver_state(sc); 10833 10834 /* Call the debugger. */ 10835 breakpoint(); 10836 10837 return; 10838 } 10839 #endif 10840 10841