1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/kernel.h> 23 #include <linux/device.h> /* for dev_info() */ 24 #include <linux/timer.h> 25 #include <linux/errno.h> 26 #include <linux/ioport.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/pci.h> 30 #include <linux/init.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/bitops.h> 36 #include <linux/irq.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/time.h> 40 #include <linux/ethtool.h> 41 #include <linux/mii.h> 42 #include <linux/if_vlan.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/tcp.h> 46 #include <net/checksum.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/workqueue.h> 49 #include <linux/crc32.h> 50 #include <linux/crc32c.h> 51 #include <linux/prefetch.h> 52 #include <linux/zlib.h> 53 #include <linux/io.h> 54 #include <linux/semaphore.h> 55 #include <linux/stringify.h> 56 #include <linux/vmalloc.h> 57 58 #include "bnx2x.h" 59 #include "bnx2x_init.h" 60 #include "bnx2x_init_ops.h" 61 #include "bnx2x_cmn.h" 62 #include "bnx2x_dcb.h" 63 #include "bnx2x_sp.h" 64 65 #include <linux/firmware.h> 66 #include "bnx2x_fw_file_hdr.h" 67 /* FW files */ 68 #define FW_FILE_VERSION \ 69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 72 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 73 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 74 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 75 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 76 77 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) 78 79 /* Time in jiffies before concluding the transmitter is hung */ 80 #define TX_TIMEOUT (5*HZ) 81 82 static char version[] __devinitdata = 83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 85 86 MODULE_AUTHOR("Eliezer Tamir"); 87 MODULE_DESCRIPTION("Broadcom NetXtreme II " 88 "BCM57710/57711/57711E/" 89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 90 "57840/57840_MF Driver"); 91 MODULE_LICENSE("GPL"); 92 MODULE_VERSION(DRV_MODULE_VERSION); 93 MODULE_FIRMWARE(FW_FILE_NAME_E1); 94 MODULE_FIRMWARE(FW_FILE_NAME_E1H); 95 MODULE_FIRMWARE(FW_FILE_NAME_E2); 96 97 98 int num_queues; 99 module_param(num_queues, int, 0); 100 MODULE_PARM_DESC(num_queues, 101 " Set number of queues (default is as a number of CPUs)"); 102 103 static int disable_tpa; 104 module_param(disable_tpa, int, 0); 105 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 106 107 #define INT_MODE_INTx 1 108 #define INT_MODE_MSI 2 109 int int_mode; 110 module_param(int_mode, int, 0); 111 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 112 "(1 INT#x; 2 MSI)"); 113 114 static int dropless_fc; 115 module_param(dropless_fc, int, 0); 116 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 117 118 static int mrrs = -1; 119 module_param(mrrs, int, 0); 120 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 121 122 static int debug; 123 module_param(debug, int, 0); 124 MODULE_PARM_DESC(debug, " Default debug msglevel"); 125 126 127 128 struct workqueue_struct *bnx2x_wq; 129 130 enum bnx2x_board_type { 131 BCM57710 = 0, 132 BCM57711, 133 BCM57711E, 134 BCM57712, 135 BCM57712_MF, 136 BCM57800, 137 BCM57800_MF, 138 BCM57810, 139 BCM57810_MF, 140 BCM57840_O, 141 BCM57840_4_10, 142 BCM57840_2_20, 143 BCM57840_MFO, 144 BCM57840_MF, 145 BCM57811, 146 BCM57811_MF 147 }; 148 149 /* indexed by board_type, above */ 150 static struct { 151 char *name; 152 } board_info[] __devinitdata = { 153 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 154 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 155 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 156 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 157 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 158 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 159 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 160 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 161 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 162 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 163 { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, 164 { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, 165 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, 166 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"}, 167 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"}, 168 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"}, 169 }; 170 171 #ifndef PCI_DEVICE_ID_NX2_57710 172 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 173 #endif 174 #ifndef PCI_DEVICE_ID_NX2_57711 175 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 176 #endif 177 #ifndef PCI_DEVICE_ID_NX2_57711E 178 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 179 #endif 180 #ifndef PCI_DEVICE_ID_NX2_57712 181 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 182 #endif 183 #ifndef PCI_DEVICE_ID_NX2_57712_MF 184 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 185 #endif 186 #ifndef PCI_DEVICE_ID_NX2_57800 187 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 188 #endif 189 #ifndef PCI_DEVICE_ID_NX2_57800_MF 190 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 191 #endif 192 #ifndef PCI_DEVICE_ID_NX2_57810 193 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 194 #endif 195 #ifndef PCI_DEVICE_ID_NX2_57810_MF 196 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 197 #endif 198 #ifndef PCI_DEVICE_ID_NX2_57840_O 199 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 200 #endif 201 #ifndef PCI_DEVICE_ID_NX2_57840_4_10 202 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 203 #endif 204 #ifndef PCI_DEVICE_ID_NX2_57840_2_20 205 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 206 #endif 207 #ifndef PCI_DEVICE_ID_NX2_57840_MFO 208 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 209 #endif 210 #ifndef PCI_DEVICE_ID_NX2_57840_MF 211 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 212 #endif 213 #ifndef PCI_DEVICE_ID_NX2_57811 214 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 215 #endif 216 #ifndef PCI_DEVICE_ID_NX2_57811_MF 217 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 218 #endif 219 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 220 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 221 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 222 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 223 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 224 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 225 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 226 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 227 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 228 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 229 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 230 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 231 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 232 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 233 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 234 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 235 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 236 { 0 } 237 }; 238 239 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 240 241 /* Global resources for unloading a previously loaded device */ 242 #define BNX2X_PREV_WAIT_NEEDED 1 243 static DEFINE_SEMAPHORE(bnx2x_prev_sem); 244 static LIST_HEAD(bnx2x_prev_list); 245 /**************************************************************************** 246 * General service functions 247 ****************************************************************************/ 248 249 static void __storm_memset_dma_mapping(struct bnx2x *bp, 250 u32 addr, dma_addr_t mapping) 251 { 252 REG_WR(bp, addr, U64_LO(mapping)); 253 REG_WR(bp, addr + 4, U64_HI(mapping)); 254 } 255 256 static void storm_memset_spq_addr(struct bnx2x *bp, 257 dma_addr_t mapping, u16 abs_fid) 258 { 259 u32 addr = XSEM_REG_FAST_MEMORY + 260 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 261 262 __storm_memset_dma_mapping(bp, addr, mapping); 263 } 264 265 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 266 u16 pf_id) 267 { 268 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 269 pf_id); 270 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 271 pf_id); 272 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 273 pf_id); 274 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 275 pf_id); 276 } 277 278 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 279 u8 enable) 280 { 281 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 282 enable); 283 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 284 enable); 285 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 286 enable); 287 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 288 enable); 289 } 290 291 static void storm_memset_eq_data(struct bnx2x *bp, 292 struct event_ring_data *eq_data, 293 u16 pfid) 294 { 295 size_t size = sizeof(struct event_ring_data); 296 297 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 298 299 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 300 } 301 302 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 303 u16 pfid) 304 { 305 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 306 REG_WR16(bp, addr, eq_prod); 307 } 308 309 /* used only at init 310 * locking is done by mcp 311 */ 312 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 313 { 314 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 315 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 316 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 317 PCICFG_VENDOR_ID_OFFSET); 318 } 319 320 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 321 { 322 u32 val; 323 324 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 325 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 326 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 327 PCICFG_VENDOR_ID_OFFSET); 328 329 return val; 330 } 331 332 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 333 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 334 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 335 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 336 #define DMAE_DP_DST_NONE "dst_addr [none]" 337 338 339 /* copy command into DMAE command memory and set DMAE command go */ 340 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 341 { 342 u32 cmd_offset; 343 int i; 344 345 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 346 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 347 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 348 } 349 REG_WR(bp, dmae_reg_go_c[idx], 1); 350 } 351 352 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 353 { 354 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 355 DMAE_CMD_C_ENABLE); 356 } 357 358 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 359 { 360 return opcode & ~DMAE_CMD_SRC_RESET; 361 } 362 363 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 364 bool with_comp, u8 comp_type) 365 { 366 u32 opcode = 0; 367 368 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 369 (dst_type << DMAE_COMMAND_DST_SHIFT)); 370 371 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 372 373 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 374 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 375 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 376 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 377 378 #ifdef __BIG_ENDIAN 379 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 380 #else 381 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 382 #endif 383 if (with_comp) 384 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 385 return opcode; 386 } 387 388 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 389 struct dmae_command *dmae, 390 u8 src_type, u8 dst_type) 391 { 392 memset(dmae, 0, sizeof(struct dmae_command)); 393 394 /* set the opcode */ 395 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 396 true, DMAE_COMP_PCI); 397 398 /* fill in the completion parameters */ 399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 401 dmae->comp_val = DMAE_COMP_VAL; 402 } 403 404 /* issue a dmae command over the init-channel and wailt for completion */ 405 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, 406 struct dmae_command *dmae) 407 { 408 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 409 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 410 int rc = 0; 411 412 /* 413 * Lock the dmae channel. Disable BHs to prevent a dead-lock 414 * as long as this code is called both from syscall context and 415 * from ndo_set_rx_mode() flow that may be called from BH. 416 */ 417 spin_lock_bh(&bp->dmae_lock); 418 419 /* reset completion */ 420 *wb_comp = 0; 421 422 /* post the command on the channel used for initializations */ 423 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 424 425 /* wait for completion */ 426 udelay(5); 427 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 428 429 if (!cnt || 430 (bp->recovery_state != BNX2X_RECOVERY_DONE && 431 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 432 BNX2X_ERR("DMAE timeout!\n"); 433 rc = DMAE_TIMEOUT; 434 goto unlock; 435 } 436 cnt--; 437 udelay(50); 438 } 439 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 440 BNX2X_ERR("DMAE PCI error!\n"); 441 rc = DMAE_PCI_ERROR; 442 } 443 444 unlock: 445 spin_unlock_bh(&bp->dmae_lock); 446 return rc; 447 } 448 449 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 450 u32 len32) 451 { 452 struct dmae_command dmae; 453 454 if (!bp->dmae_ready) { 455 u32 *data = bnx2x_sp(bp, wb_data[0]); 456 457 if (CHIP_IS_E1(bp)) 458 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 459 else 460 bnx2x_init_str_wr(bp, dst_addr, data, len32); 461 return; 462 } 463 464 /* set opcode and fixed command fields */ 465 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 466 467 /* fill in addresses and len */ 468 dmae.src_addr_lo = U64_LO(dma_addr); 469 dmae.src_addr_hi = U64_HI(dma_addr); 470 dmae.dst_addr_lo = dst_addr >> 2; 471 dmae.dst_addr_hi = 0; 472 dmae.len = len32; 473 474 /* issue the command and wait for completion */ 475 bnx2x_issue_dmae_with_comp(bp, &dmae); 476 } 477 478 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 479 { 480 struct dmae_command dmae; 481 482 if (!bp->dmae_ready) { 483 u32 *data = bnx2x_sp(bp, wb_data[0]); 484 int i; 485 486 if (CHIP_IS_E1(bp)) 487 for (i = 0; i < len32; i++) 488 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 489 else 490 for (i = 0; i < len32; i++) 491 data[i] = REG_RD(bp, src_addr + i*4); 492 493 return; 494 } 495 496 /* set opcode and fixed command fields */ 497 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 498 499 /* fill in addresses and len */ 500 dmae.src_addr_lo = src_addr >> 2; 501 dmae.src_addr_hi = 0; 502 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 503 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 504 dmae.len = len32; 505 506 /* issue the command and wait for completion */ 507 bnx2x_issue_dmae_with_comp(bp, &dmae); 508 } 509 510 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 511 u32 addr, u32 len) 512 { 513 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 514 int offset = 0; 515 516 while (len > dmae_wr_max) { 517 bnx2x_write_dmae(bp, phys_addr + offset, 518 addr + offset, dmae_wr_max); 519 offset += dmae_wr_max * 4; 520 len -= dmae_wr_max; 521 } 522 523 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 524 } 525 526 static int bnx2x_mc_assert(struct bnx2x *bp) 527 { 528 char last_idx; 529 int i, rc = 0; 530 u32 row0, row1, row2, row3; 531 532 /* XSTORM */ 533 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 534 XSTORM_ASSERT_LIST_INDEX_OFFSET); 535 if (last_idx) 536 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 537 538 /* print the asserts */ 539 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 540 541 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 542 XSTORM_ASSERT_LIST_OFFSET(i)); 543 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 544 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 545 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 546 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 547 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 548 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 549 550 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 551 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 552 i, row3, row2, row1, row0); 553 rc++; 554 } else { 555 break; 556 } 557 } 558 559 /* TSTORM */ 560 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 561 TSTORM_ASSERT_LIST_INDEX_OFFSET); 562 if (last_idx) 563 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 564 565 /* print the asserts */ 566 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 567 568 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 569 TSTORM_ASSERT_LIST_OFFSET(i)); 570 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 571 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 572 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 573 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 574 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 575 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 576 577 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 578 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 579 i, row3, row2, row1, row0); 580 rc++; 581 } else { 582 break; 583 } 584 } 585 586 /* CSTORM */ 587 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 588 CSTORM_ASSERT_LIST_INDEX_OFFSET); 589 if (last_idx) 590 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 591 592 /* print the asserts */ 593 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 594 595 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 596 CSTORM_ASSERT_LIST_OFFSET(i)); 597 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 598 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 599 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 600 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 601 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 602 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 603 604 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 605 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 606 i, row3, row2, row1, row0); 607 rc++; 608 } else { 609 break; 610 } 611 } 612 613 /* USTORM */ 614 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 615 USTORM_ASSERT_LIST_INDEX_OFFSET); 616 if (last_idx) 617 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 618 619 /* print the asserts */ 620 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 621 622 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 623 USTORM_ASSERT_LIST_OFFSET(i)); 624 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 625 USTORM_ASSERT_LIST_OFFSET(i) + 4); 626 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 627 USTORM_ASSERT_LIST_OFFSET(i) + 8); 628 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 629 USTORM_ASSERT_LIST_OFFSET(i) + 12); 630 631 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 632 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 633 i, row3, row2, row1, row0); 634 rc++; 635 } else { 636 break; 637 } 638 } 639 640 return rc; 641 } 642 643 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 644 { 645 u32 addr, val; 646 u32 mark, offset; 647 __be32 data[9]; 648 int word; 649 u32 trace_shmem_base; 650 if (BP_NOMCP(bp)) { 651 BNX2X_ERR("NO MCP - can not dump\n"); 652 return; 653 } 654 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 655 (bp->common.bc_ver & 0xff0000) >> 16, 656 (bp->common.bc_ver & 0xff00) >> 8, 657 (bp->common.bc_ver & 0xff)); 658 659 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 660 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 661 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 662 663 if (BP_PATH(bp) == 0) 664 trace_shmem_base = bp->common.shmem_base; 665 else 666 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 667 addr = trace_shmem_base - 0x800; 668 669 /* validate TRCB signature */ 670 mark = REG_RD(bp, addr); 671 if (mark != MFW_TRACE_SIGNATURE) { 672 BNX2X_ERR("Trace buffer signature is missing."); 673 return ; 674 } 675 676 /* read cyclic buffer pointer */ 677 addr += 4; 678 mark = REG_RD(bp, addr); 679 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 680 + ((mark + 0x3) & ~0x3) - 0x08000000; 681 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 682 683 printk("%s", lvl); 684 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 685 for (word = 0; word < 8; word++) 686 data[word] = htonl(REG_RD(bp, offset + 4*word)); 687 data[8] = 0x0; 688 pr_cont("%s", (char *)data); 689 } 690 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 691 for (word = 0; word < 8; word++) 692 data[word] = htonl(REG_RD(bp, offset + 4*word)); 693 data[8] = 0x0; 694 pr_cont("%s", (char *)data); 695 } 696 printk("%s" "end of fw dump\n", lvl); 697 } 698 699 static void bnx2x_fw_dump(struct bnx2x *bp) 700 { 701 bnx2x_fw_dump_lvl(bp, KERN_ERR); 702 } 703 704 void bnx2x_panic_dump(struct bnx2x *bp) 705 { 706 int i; 707 u16 j; 708 struct hc_sp_status_block_data sp_sb_data; 709 int func = BP_FUNC(bp); 710 #ifdef BNX2X_STOP_ON_ERROR 711 u16 start = 0, end = 0; 712 u8 cos; 713 #endif 714 715 bp->stats_state = STATS_STATE_DISABLED; 716 bp->eth_stats.unrecoverable_error++; 717 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 718 719 BNX2X_ERR("begin crash dump -----------------\n"); 720 721 /* Indices */ 722 /* Common */ 723 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 724 bp->def_idx, bp->def_att_idx, bp->attn_state, 725 bp->spq_prod_idx, bp->stats_counter); 726 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 727 bp->def_status_blk->atten_status_block.attn_bits, 728 bp->def_status_blk->atten_status_block.attn_bits_ack, 729 bp->def_status_blk->atten_status_block.status_block_id, 730 bp->def_status_blk->atten_status_block.attn_bits_index); 731 BNX2X_ERR(" def ("); 732 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 733 pr_cont("0x%x%s", 734 bp->def_status_blk->sp_sb.index_values[i], 735 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 736 737 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 738 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 739 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 740 i*sizeof(u32)); 741 742 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 743 sp_sb_data.igu_sb_id, 744 sp_sb_data.igu_seg_id, 745 sp_sb_data.p_func.pf_id, 746 sp_sb_data.p_func.vnic_id, 747 sp_sb_data.p_func.vf_id, 748 sp_sb_data.p_func.vf_valid, 749 sp_sb_data.state); 750 751 752 for_each_eth_queue(bp, i) { 753 struct bnx2x_fastpath *fp = &bp->fp[i]; 754 int loop; 755 struct hc_status_block_data_e2 sb_data_e2; 756 struct hc_status_block_data_e1x sb_data_e1x; 757 struct hc_status_block_sm *hc_sm_p = 758 CHIP_IS_E1x(bp) ? 759 sb_data_e1x.common.state_machine : 760 sb_data_e2.common.state_machine; 761 struct hc_index_data *hc_index_p = 762 CHIP_IS_E1x(bp) ? 763 sb_data_e1x.index_data : 764 sb_data_e2.index_data; 765 u8 data_size, cos; 766 u32 *sb_data_p; 767 struct bnx2x_fp_txdata txdata; 768 769 /* Rx */ 770 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 771 i, fp->rx_bd_prod, fp->rx_bd_cons, 772 fp->rx_comp_prod, 773 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 774 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 775 fp->rx_sge_prod, fp->last_max_sge, 776 le16_to_cpu(fp->fp_hc_idx)); 777 778 /* Tx */ 779 for_each_cos_in_tx_queue(fp, cos) 780 { 781 txdata = *fp->txdata_ptr[cos]; 782 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 783 i, txdata.tx_pkt_prod, 784 txdata.tx_pkt_cons, txdata.tx_bd_prod, 785 txdata.tx_bd_cons, 786 le16_to_cpu(*txdata.tx_cons_sb)); 787 } 788 789 loop = CHIP_IS_E1x(bp) ? 790 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 791 792 /* host sb data */ 793 794 #ifdef BCM_CNIC 795 if (IS_FCOE_FP(fp)) 796 continue; 797 #endif 798 BNX2X_ERR(" run indexes ("); 799 for (j = 0; j < HC_SB_MAX_SM; j++) 800 pr_cont("0x%x%s", 801 fp->sb_running_index[j], 802 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 803 804 BNX2X_ERR(" indexes ("); 805 for (j = 0; j < loop; j++) 806 pr_cont("0x%x%s", 807 fp->sb_index_values[j], 808 (j == loop - 1) ? ")" : " "); 809 /* fw sb data */ 810 data_size = CHIP_IS_E1x(bp) ? 811 sizeof(struct hc_status_block_data_e1x) : 812 sizeof(struct hc_status_block_data_e2); 813 data_size /= sizeof(u32); 814 sb_data_p = CHIP_IS_E1x(bp) ? 815 (u32 *)&sb_data_e1x : 816 (u32 *)&sb_data_e2; 817 /* copy sb data in here */ 818 for (j = 0; j < data_size; j++) 819 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 820 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 821 j * sizeof(u32)); 822 823 if (!CHIP_IS_E1x(bp)) { 824 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 825 sb_data_e2.common.p_func.pf_id, 826 sb_data_e2.common.p_func.vf_id, 827 sb_data_e2.common.p_func.vf_valid, 828 sb_data_e2.common.p_func.vnic_id, 829 sb_data_e2.common.same_igu_sb_1b, 830 sb_data_e2.common.state); 831 } else { 832 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 833 sb_data_e1x.common.p_func.pf_id, 834 sb_data_e1x.common.p_func.vf_id, 835 sb_data_e1x.common.p_func.vf_valid, 836 sb_data_e1x.common.p_func.vnic_id, 837 sb_data_e1x.common.same_igu_sb_1b, 838 sb_data_e1x.common.state); 839 } 840 841 /* SB_SMs data */ 842 for (j = 0; j < HC_SB_MAX_SM; j++) { 843 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 844 j, hc_sm_p[j].__flags, 845 hc_sm_p[j].igu_sb_id, 846 hc_sm_p[j].igu_seg_id, 847 hc_sm_p[j].time_to_expire, 848 hc_sm_p[j].timer_value); 849 } 850 851 /* Indecies data */ 852 for (j = 0; j < loop; j++) { 853 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 854 hc_index_p[j].flags, 855 hc_index_p[j].timeout); 856 } 857 } 858 859 #ifdef BNX2X_STOP_ON_ERROR 860 /* Rings */ 861 /* Rx */ 862 for_each_rx_queue(bp, i) { 863 struct bnx2x_fastpath *fp = &bp->fp[i]; 864 865 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 866 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 867 for (j = start; j != end; j = RX_BD(j + 1)) { 868 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 869 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 870 871 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 872 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 873 } 874 875 start = RX_SGE(fp->rx_sge_prod); 876 end = RX_SGE(fp->last_max_sge); 877 for (j = start; j != end; j = RX_SGE(j + 1)) { 878 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 879 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 880 881 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 882 i, j, rx_sge[1], rx_sge[0], sw_page->page); 883 } 884 885 start = RCQ_BD(fp->rx_comp_cons - 10); 886 end = RCQ_BD(fp->rx_comp_cons + 503); 887 for (j = start; j != end; j = RCQ_BD(j + 1)) { 888 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 889 890 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 891 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 892 } 893 } 894 895 /* Tx */ 896 for_each_tx_queue(bp, i) { 897 struct bnx2x_fastpath *fp = &bp->fp[i]; 898 for_each_cos_in_tx_queue(fp, cos) { 899 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 900 901 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 902 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 903 for (j = start; j != end; j = TX_BD(j + 1)) { 904 struct sw_tx_bd *sw_bd = 905 &txdata->tx_buf_ring[j]; 906 907 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 908 i, cos, j, sw_bd->skb, 909 sw_bd->first_bd); 910 } 911 912 start = TX_BD(txdata->tx_bd_cons - 10); 913 end = TX_BD(txdata->tx_bd_cons + 254); 914 for (j = start; j != end; j = TX_BD(j + 1)) { 915 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 916 917 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 918 i, cos, j, tx_bd[0], tx_bd[1], 919 tx_bd[2], tx_bd[3]); 920 } 921 } 922 } 923 #endif 924 bnx2x_fw_dump(bp); 925 bnx2x_mc_assert(bp); 926 BNX2X_ERR("end crash dump -----------------\n"); 927 } 928 929 /* 930 * FLR Support for E2 931 * 932 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 933 * initialization. 934 */ 935 #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ 936 #define FLR_WAIT_INTERVAL 50 /* usec */ 937 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 938 939 struct pbf_pN_buf_regs { 940 int pN; 941 u32 init_crd; 942 u32 crd; 943 u32 crd_freed; 944 }; 945 946 struct pbf_pN_cmd_regs { 947 int pN; 948 u32 lines_occup; 949 u32 lines_freed; 950 }; 951 952 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 953 struct pbf_pN_buf_regs *regs, 954 u32 poll_count) 955 { 956 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 957 u32 cur_cnt = poll_count; 958 959 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 960 crd = crd_start = REG_RD(bp, regs->crd); 961 init_crd = REG_RD(bp, regs->init_crd); 962 963 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 964 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 965 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 966 967 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 968 (init_crd - crd_start))) { 969 if (cur_cnt--) { 970 udelay(FLR_WAIT_INTERVAL); 971 crd = REG_RD(bp, regs->crd); 972 crd_freed = REG_RD(bp, regs->crd_freed); 973 } else { 974 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 975 regs->pN); 976 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 977 regs->pN, crd); 978 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 979 regs->pN, crd_freed); 980 break; 981 } 982 } 983 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 984 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 985 } 986 987 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 988 struct pbf_pN_cmd_regs *regs, 989 u32 poll_count) 990 { 991 u32 occup, to_free, freed, freed_start; 992 u32 cur_cnt = poll_count; 993 994 occup = to_free = REG_RD(bp, regs->lines_occup); 995 freed = freed_start = REG_RD(bp, regs->lines_freed); 996 997 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 998 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 999 1000 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1001 if (cur_cnt--) { 1002 udelay(FLR_WAIT_INTERVAL); 1003 occup = REG_RD(bp, regs->lines_occup); 1004 freed = REG_RD(bp, regs->lines_freed); 1005 } else { 1006 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1007 regs->pN); 1008 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1009 regs->pN, occup); 1010 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1011 regs->pN, freed); 1012 break; 1013 } 1014 } 1015 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1016 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1017 } 1018 1019 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1020 u32 expected, u32 poll_count) 1021 { 1022 u32 cur_cnt = poll_count; 1023 u32 val; 1024 1025 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1026 udelay(FLR_WAIT_INTERVAL); 1027 1028 return val; 1029 } 1030 1031 static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1032 char *msg, u32 poll_cnt) 1033 { 1034 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1035 if (val != 0) { 1036 BNX2X_ERR("%s usage count=%d\n", msg, val); 1037 return 1; 1038 } 1039 return 0; 1040 } 1041 1042 static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1043 { 1044 /* adjust polling timeout */ 1045 if (CHIP_REV_IS_EMUL(bp)) 1046 return FLR_POLL_CNT * 2000; 1047 1048 if (CHIP_REV_IS_FPGA(bp)) 1049 return FLR_POLL_CNT * 120; 1050 1051 return FLR_POLL_CNT; 1052 } 1053 1054 static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1055 { 1056 struct pbf_pN_cmd_regs cmd_regs[] = { 1057 {0, (CHIP_IS_E3B0(bp)) ? 1058 PBF_REG_TQ_OCCUPANCY_Q0 : 1059 PBF_REG_P0_TQ_OCCUPANCY, 1060 (CHIP_IS_E3B0(bp)) ? 1061 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1062 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1063 {1, (CHIP_IS_E3B0(bp)) ? 1064 PBF_REG_TQ_OCCUPANCY_Q1 : 1065 PBF_REG_P1_TQ_OCCUPANCY, 1066 (CHIP_IS_E3B0(bp)) ? 1067 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1068 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1069 {4, (CHIP_IS_E3B0(bp)) ? 1070 PBF_REG_TQ_OCCUPANCY_LB_Q : 1071 PBF_REG_P4_TQ_OCCUPANCY, 1072 (CHIP_IS_E3B0(bp)) ? 1073 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1074 PBF_REG_P4_TQ_LINES_FREED_CNT} 1075 }; 1076 1077 struct pbf_pN_buf_regs buf_regs[] = { 1078 {0, (CHIP_IS_E3B0(bp)) ? 1079 PBF_REG_INIT_CRD_Q0 : 1080 PBF_REG_P0_INIT_CRD , 1081 (CHIP_IS_E3B0(bp)) ? 1082 PBF_REG_CREDIT_Q0 : 1083 PBF_REG_P0_CREDIT, 1084 (CHIP_IS_E3B0(bp)) ? 1085 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1086 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1087 {1, (CHIP_IS_E3B0(bp)) ? 1088 PBF_REG_INIT_CRD_Q1 : 1089 PBF_REG_P1_INIT_CRD, 1090 (CHIP_IS_E3B0(bp)) ? 1091 PBF_REG_CREDIT_Q1 : 1092 PBF_REG_P1_CREDIT, 1093 (CHIP_IS_E3B0(bp)) ? 1094 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1095 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1096 {4, (CHIP_IS_E3B0(bp)) ? 1097 PBF_REG_INIT_CRD_LB_Q : 1098 PBF_REG_P4_INIT_CRD, 1099 (CHIP_IS_E3B0(bp)) ? 1100 PBF_REG_CREDIT_LB_Q : 1101 PBF_REG_P4_CREDIT, 1102 (CHIP_IS_E3B0(bp)) ? 1103 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1104 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1105 }; 1106 1107 int i; 1108 1109 /* Verify the command queues are flushed P0, P1, P4 */ 1110 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1111 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1112 1113 1114 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1115 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1116 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1117 } 1118 1119 #define OP_GEN_PARAM(param) \ 1120 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1121 1122 #define OP_GEN_TYPE(type) \ 1123 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1124 1125 #define OP_GEN_AGG_VECT(index) \ 1126 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1127 1128 1129 static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1130 u32 poll_cnt) 1131 { 1132 struct sdm_op_gen op_gen = {0}; 1133 1134 u32 comp_addr = BAR_CSTRORM_INTMEM + 1135 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1136 int ret = 0; 1137 1138 if (REG_RD(bp, comp_addr)) { 1139 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1140 return 1; 1141 } 1142 1143 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1144 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1145 op_gen.command |= OP_GEN_AGG_VECT(clnup_func); 1146 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1147 1148 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1149 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); 1150 1151 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1152 BNX2X_ERR("FW final cleanup did not succeed\n"); 1153 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1154 (REG_RD(bp, comp_addr))); 1155 ret = 1; 1156 } 1157 /* Zero completion for nxt FLR */ 1158 REG_WR(bp, comp_addr, 0); 1159 1160 return ret; 1161 } 1162 1163 static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1164 { 1165 u16 status; 1166 1167 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1168 return status & PCI_EXP_DEVSTA_TRPND; 1169 } 1170 1171 /* PF FLR specific routines 1172 */ 1173 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1174 { 1175 1176 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1177 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1178 CFC_REG_NUM_LCIDS_INSIDE_PF, 1179 "CFC PF usage counter timed out", 1180 poll_cnt)) 1181 return 1; 1182 1183 1184 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1185 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1186 DORQ_REG_PF_USAGE_CNT, 1187 "DQ PF usage counter timed out", 1188 poll_cnt)) 1189 return 1; 1190 1191 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1192 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1193 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1194 "QM PF usage counter timed out", 1195 poll_cnt)) 1196 return 1; 1197 1198 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1199 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1200 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1201 "Timers VNIC usage counter timed out", 1202 poll_cnt)) 1203 return 1; 1204 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1205 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1206 "Timers NUM_SCANS usage counter timed out", 1207 poll_cnt)) 1208 return 1; 1209 1210 /* Wait DMAE PF usage counter to zero */ 1211 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1212 dmae_reg_go_c[INIT_DMAE_C(bp)], 1213 "DMAE dommand register timed out", 1214 poll_cnt)) 1215 return 1; 1216 1217 return 0; 1218 } 1219 1220 static void bnx2x_hw_enable_status(struct bnx2x *bp) 1221 { 1222 u32 val; 1223 1224 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1225 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1226 1227 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1228 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1229 1230 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1231 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1232 1233 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1234 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1235 1236 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1237 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1238 1239 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1240 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1241 1242 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1243 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1244 1245 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1246 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1247 val); 1248 } 1249 1250 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1251 { 1252 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1253 1254 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1255 1256 /* Re-enable PF target read access */ 1257 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1258 1259 /* Poll HW usage counters */ 1260 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1261 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1262 return -EBUSY; 1263 1264 /* Zero the igu 'trailing edge' and 'leading edge' */ 1265 1266 /* Send the FW cleanup command */ 1267 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1268 return -EBUSY; 1269 1270 /* ATC cleanup */ 1271 1272 /* Verify TX hw is flushed */ 1273 bnx2x_tx_hw_flushed(bp, poll_cnt); 1274 1275 /* Wait 100ms (not adjusted according to platform) */ 1276 msleep(100); 1277 1278 /* Verify no pending pci transactions */ 1279 if (bnx2x_is_pcie_pending(bp->pdev)) 1280 BNX2X_ERR("PCIE Transactions still pending\n"); 1281 1282 /* Debug */ 1283 bnx2x_hw_enable_status(bp); 1284 1285 /* 1286 * Master enable - Due to WB DMAE writes performed before this 1287 * register is re-initialized as part of the regular function init 1288 */ 1289 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1290 1291 return 0; 1292 } 1293 1294 static void bnx2x_hc_int_enable(struct bnx2x *bp) 1295 { 1296 int port = BP_PORT(bp); 1297 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1298 u32 val = REG_RD(bp, addr); 1299 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1300 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1301 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1302 1303 if (msix) { 1304 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1305 HC_CONFIG_0_REG_INT_LINE_EN_0); 1306 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1307 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1308 if (single_msix) 1309 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1310 } else if (msi) { 1311 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1312 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1313 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1314 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1315 } else { 1316 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1317 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1318 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1319 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1320 1321 if (!CHIP_IS_E1(bp)) { 1322 DP(NETIF_MSG_IFUP, 1323 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1324 1325 REG_WR(bp, addr, val); 1326 1327 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1328 } 1329 } 1330 1331 if (CHIP_IS_E1(bp)) 1332 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1333 1334 DP(NETIF_MSG_IFUP, 1335 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1336 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1337 1338 REG_WR(bp, addr, val); 1339 /* 1340 * Ensure that HC_CONFIG is written before leading/trailing edge config 1341 */ 1342 mmiowb(); 1343 barrier(); 1344 1345 if (!CHIP_IS_E1(bp)) { 1346 /* init leading/trailing edge */ 1347 if (IS_MF(bp)) { 1348 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1349 if (bp->port.pmf) 1350 /* enable nig and gpio3 attention */ 1351 val |= 0x1100; 1352 } else 1353 val = 0xffff; 1354 1355 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1356 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1357 } 1358 1359 /* Make sure that interrupts are indeed enabled from here on */ 1360 mmiowb(); 1361 } 1362 1363 static void bnx2x_igu_int_enable(struct bnx2x *bp) 1364 { 1365 u32 val; 1366 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1367 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1368 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1369 1370 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1371 1372 if (msix) { 1373 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1374 IGU_PF_CONF_SINGLE_ISR_EN); 1375 val |= (IGU_PF_CONF_FUNC_EN | 1376 IGU_PF_CONF_MSI_MSIX_EN | 1377 IGU_PF_CONF_ATTN_BIT_EN); 1378 1379 if (single_msix) 1380 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1381 } else if (msi) { 1382 val &= ~IGU_PF_CONF_INT_LINE_EN; 1383 val |= (IGU_PF_CONF_FUNC_EN | 1384 IGU_PF_CONF_MSI_MSIX_EN | 1385 IGU_PF_CONF_ATTN_BIT_EN | 1386 IGU_PF_CONF_SINGLE_ISR_EN); 1387 } else { 1388 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1389 val |= (IGU_PF_CONF_FUNC_EN | 1390 IGU_PF_CONF_INT_LINE_EN | 1391 IGU_PF_CONF_ATTN_BIT_EN | 1392 IGU_PF_CONF_SINGLE_ISR_EN); 1393 } 1394 1395 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1396 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1397 1398 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1399 1400 if (val & IGU_PF_CONF_INT_LINE_EN) 1401 pci_intx(bp->pdev, true); 1402 1403 barrier(); 1404 1405 /* init leading/trailing edge */ 1406 if (IS_MF(bp)) { 1407 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1408 if (bp->port.pmf) 1409 /* enable nig and gpio3 attention */ 1410 val |= 0x1100; 1411 } else 1412 val = 0xffff; 1413 1414 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1415 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1416 1417 /* Make sure that interrupts are indeed enabled from here on */ 1418 mmiowb(); 1419 } 1420 1421 void bnx2x_int_enable(struct bnx2x *bp) 1422 { 1423 if (bp->common.int_block == INT_BLOCK_HC) 1424 bnx2x_hc_int_enable(bp); 1425 else 1426 bnx2x_igu_int_enable(bp); 1427 } 1428 1429 static void bnx2x_hc_int_disable(struct bnx2x *bp) 1430 { 1431 int port = BP_PORT(bp); 1432 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1433 u32 val = REG_RD(bp, addr); 1434 1435 /* 1436 * in E1 we must use only PCI configuration space to disable 1437 * MSI/MSIX capablility 1438 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 1439 */ 1440 if (CHIP_IS_E1(bp)) { 1441 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 1442 * Use mask register to prevent from HC sending interrupts 1443 * after we exit the function 1444 */ 1445 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 1446 1447 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1448 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1449 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1450 } else 1451 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1452 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1453 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1454 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1455 1456 DP(NETIF_MSG_IFDOWN, 1457 "write %x to HC %d (addr 0x%x)\n", 1458 val, port, addr); 1459 1460 /* flush all outstanding writes */ 1461 mmiowb(); 1462 1463 REG_WR(bp, addr, val); 1464 if (REG_RD(bp, addr) != val) 1465 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1466 } 1467 1468 static void bnx2x_igu_int_disable(struct bnx2x *bp) 1469 { 1470 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1471 1472 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 1473 IGU_PF_CONF_INT_LINE_EN | 1474 IGU_PF_CONF_ATTN_BIT_EN); 1475 1476 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 1477 1478 /* flush all outstanding writes */ 1479 mmiowb(); 1480 1481 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1482 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 1483 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1484 } 1485 1486 void bnx2x_int_disable(struct bnx2x *bp) 1487 { 1488 if (bp->common.int_block == INT_BLOCK_HC) 1489 bnx2x_hc_int_disable(bp); 1490 else 1491 bnx2x_igu_int_disable(bp); 1492 } 1493 1494 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1495 { 1496 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1497 int i, offset; 1498 1499 if (disable_hw) 1500 /* prevent the HW from sending interrupts */ 1501 bnx2x_int_disable(bp); 1502 1503 /* make sure all ISRs are done */ 1504 if (msix) { 1505 synchronize_irq(bp->msix_table[0].vector); 1506 offset = 1; 1507 #ifdef BCM_CNIC 1508 offset++; 1509 #endif 1510 for_each_eth_queue(bp, i) 1511 synchronize_irq(bp->msix_table[offset++].vector); 1512 } else 1513 synchronize_irq(bp->pdev->irq); 1514 1515 /* make sure sp_task is not running */ 1516 cancel_delayed_work(&bp->sp_task); 1517 cancel_delayed_work(&bp->period_task); 1518 flush_workqueue(bnx2x_wq); 1519 } 1520 1521 /* fast path */ 1522 1523 /* 1524 * General service functions 1525 */ 1526 1527 /* Return true if succeeded to acquire the lock */ 1528 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1529 { 1530 u32 lock_status; 1531 u32 resource_bit = (1 << resource); 1532 int func = BP_FUNC(bp); 1533 u32 hw_lock_control_reg; 1534 1535 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1536 "Trying to take a lock on resource %d\n", resource); 1537 1538 /* Validating that the resource is within range */ 1539 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1540 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1541 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1542 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1543 return false; 1544 } 1545 1546 if (func <= 5) 1547 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1548 else 1549 hw_lock_control_reg = 1550 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1551 1552 /* Try to acquire the lock */ 1553 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1554 lock_status = REG_RD(bp, hw_lock_control_reg); 1555 if (lock_status & resource_bit) 1556 return true; 1557 1558 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1559 "Failed to get a lock on resource %d\n", resource); 1560 return false; 1561 } 1562 1563 /** 1564 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1565 * 1566 * @bp: driver handle 1567 * 1568 * Returns the recovery leader resource id according to the engine this function 1569 * belongs to. Currently only only 2 engines is supported. 1570 */ 1571 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1572 { 1573 if (BP_PATH(bp)) 1574 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1575 else 1576 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1577 } 1578 1579 /** 1580 * bnx2x_trylock_leader_lock- try to aquire a leader lock. 1581 * 1582 * @bp: driver handle 1583 * 1584 * Tries to aquire a leader lock for current engine. 1585 */ 1586 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1587 { 1588 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1589 } 1590 1591 #ifdef BCM_CNIC 1592 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1593 #endif 1594 1595 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1596 { 1597 struct bnx2x *bp = fp->bp; 1598 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1599 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1600 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1601 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1602 1603 DP(BNX2X_MSG_SP, 1604 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1605 fp->index, cid, command, bp->state, 1606 rr_cqe->ramrod_cqe.ramrod_type); 1607 1608 switch (command) { 1609 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1610 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1611 drv_cmd = BNX2X_Q_CMD_UPDATE; 1612 break; 1613 1614 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1615 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1616 drv_cmd = BNX2X_Q_CMD_SETUP; 1617 break; 1618 1619 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1620 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1621 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1622 break; 1623 1624 case (RAMROD_CMD_ID_ETH_HALT): 1625 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1626 drv_cmd = BNX2X_Q_CMD_HALT; 1627 break; 1628 1629 case (RAMROD_CMD_ID_ETH_TERMINATE): 1630 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); 1631 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1632 break; 1633 1634 case (RAMROD_CMD_ID_ETH_EMPTY): 1635 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1636 drv_cmd = BNX2X_Q_CMD_EMPTY; 1637 break; 1638 1639 default: 1640 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1641 command, fp->index); 1642 return; 1643 } 1644 1645 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1646 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1647 /* q_obj->complete_cmd() failure means that this was 1648 * an unexpected completion. 1649 * 1650 * In this case we don't want to increase the bp->spq_left 1651 * because apparently we haven't sent this command the first 1652 * place. 1653 */ 1654 #ifdef BNX2X_STOP_ON_ERROR 1655 bnx2x_panic(); 1656 #else 1657 return; 1658 #endif 1659 1660 smp_mb__before_atomic_inc(); 1661 atomic_inc(&bp->cq_spq_left); 1662 /* push the change in bp->spq_left and towards the memory */ 1663 smp_mb__after_atomic_inc(); 1664 1665 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1666 1667 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1668 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1669 /* if Q update ramrod is completed for last Q in AFEX vif set 1670 * flow, then ACK MCP at the end 1671 * 1672 * mark pending ACK to MCP bit. 1673 * prevent case that both bits are cleared. 1674 * At the end of load/unload driver checks that 1675 * sp_state is cleaerd, and this order prevents 1676 * races 1677 */ 1678 smp_mb__before_clear_bit(); 1679 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1680 wmb(); 1681 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1682 smp_mb__after_clear_bit(); 1683 1684 /* schedule workqueue to send ack to MCP */ 1685 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1686 } 1687 1688 return; 1689 } 1690 1691 void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 1692 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) 1693 { 1694 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; 1695 1696 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, 1697 start); 1698 } 1699 1700 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1701 { 1702 struct bnx2x *bp = netdev_priv(dev_instance); 1703 u16 status = bnx2x_ack_int(bp); 1704 u16 mask; 1705 int i; 1706 u8 cos; 1707 1708 /* Return here if interrupt is shared and it's not for us */ 1709 if (unlikely(status == 0)) { 1710 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1711 return IRQ_NONE; 1712 } 1713 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1714 1715 #ifdef BNX2X_STOP_ON_ERROR 1716 if (unlikely(bp->panic)) 1717 return IRQ_HANDLED; 1718 #endif 1719 1720 for_each_eth_queue(bp, i) { 1721 struct bnx2x_fastpath *fp = &bp->fp[i]; 1722 1723 mask = 0x2 << (fp->index + CNIC_PRESENT); 1724 if (status & mask) { 1725 /* Handle Rx or Tx according to SB id */ 1726 prefetch(fp->rx_cons_sb); 1727 for_each_cos_in_tx_queue(fp, cos) 1728 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1729 prefetch(&fp->sb_running_index[SM_RX_ID]); 1730 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1731 status &= ~mask; 1732 } 1733 } 1734 1735 #ifdef BCM_CNIC 1736 mask = 0x2; 1737 if (status & (mask | 0x1)) { 1738 struct cnic_ops *c_ops = NULL; 1739 1740 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1741 rcu_read_lock(); 1742 c_ops = rcu_dereference(bp->cnic_ops); 1743 if (c_ops) 1744 c_ops->cnic_handler(bp->cnic_data, NULL); 1745 rcu_read_unlock(); 1746 } 1747 1748 status &= ~mask; 1749 } 1750 #endif 1751 1752 if (unlikely(status & 0x1)) { 1753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1754 1755 status &= ~0x1; 1756 if (!status) 1757 return IRQ_HANDLED; 1758 } 1759 1760 if (unlikely(status)) 1761 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1762 status); 1763 1764 return IRQ_HANDLED; 1765 } 1766 1767 /* Link */ 1768 1769 /* 1770 * General service functions 1771 */ 1772 1773 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1774 { 1775 u32 lock_status; 1776 u32 resource_bit = (1 << resource); 1777 int func = BP_FUNC(bp); 1778 u32 hw_lock_control_reg; 1779 int cnt; 1780 1781 /* Validating that the resource is within range */ 1782 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1783 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1784 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1785 return -EINVAL; 1786 } 1787 1788 if (func <= 5) { 1789 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1790 } else { 1791 hw_lock_control_reg = 1792 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1793 } 1794 1795 /* Validating that the resource is not already taken */ 1796 lock_status = REG_RD(bp, hw_lock_control_reg); 1797 if (lock_status & resource_bit) { 1798 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1799 lock_status, resource_bit); 1800 return -EEXIST; 1801 } 1802 1803 /* Try for 5 second every 5ms */ 1804 for (cnt = 0; cnt < 1000; cnt++) { 1805 /* Try to acquire the lock */ 1806 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1807 lock_status = REG_RD(bp, hw_lock_control_reg); 1808 if (lock_status & resource_bit) 1809 return 0; 1810 1811 msleep(5); 1812 } 1813 BNX2X_ERR("Timeout\n"); 1814 return -EAGAIN; 1815 } 1816 1817 int bnx2x_release_leader_lock(struct bnx2x *bp) 1818 { 1819 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1820 } 1821 1822 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1823 { 1824 u32 lock_status; 1825 u32 resource_bit = (1 << resource); 1826 int func = BP_FUNC(bp); 1827 u32 hw_lock_control_reg; 1828 1829 /* Validating that the resource is within range */ 1830 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1831 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1832 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1833 return -EINVAL; 1834 } 1835 1836 if (func <= 5) { 1837 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1838 } else { 1839 hw_lock_control_reg = 1840 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1841 } 1842 1843 /* Validating that the resource is currently taken */ 1844 lock_status = REG_RD(bp, hw_lock_control_reg); 1845 if (!(lock_status & resource_bit)) { 1846 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", 1847 lock_status, resource_bit); 1848 return -EFAULT; 1849 } 1850 1851 REG_WR(bp, hw_lock_control_reg, resource_bit); 1852 return 0; 1853 } 1854 1855 1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1857 { 1858 /* The GPIO should be swapped if swap register is set and active */ 1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1861 int gpio_shift = gpio_num + 1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1863 u32 gpio_mask = (1 << gpio_shift); 1864 u32 gpio_reg; 1865 int value; 1866 1867 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1869 return -EINVAL; 1870 } 1871 1872 /* read GPIO value */ 1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1874 1875 /* get the requested pin value */ 1876 if ((gpio_reg & gpio_mask) == gpio_mask) 1877 value = 1; 1878 else 1879 value = 0; 1880 1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 1882 1883 return value; 1884 } 1885 1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1887 { 1888 /* The GPIO should be swapped if swap register is set and active */ 1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1891 int gpio_shift = gpio_num + 1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1893 u32 gpio_mask = (1 << gpio_shift); 1894 u32 gpio_reg; 1895 1896 if (gpio_num > MISC_REGISTERS_GPIO_3) { 1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 1898 return -EINVAL; 1899 } 1900 1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1902 /* read GPIO and mask except the float bits */ 1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1904 1905 switch (mode) { 1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1907 DP(NETIF_MSG_LINK, 1908 "Set GPIO %d (shift %d) -> output low\n", 1909 gpio_num, gpio_shift); 1910 /* clear FLOAT and set CLR */ 1911 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1912 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 1913 break; 1914 1915 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1916 DP(NETIF_MSG_LINK, 1917 "Set GPIO %d (shift %d) -> output high\n", 1918 gpio_num, gpio_shift); 1919 /* clear FLOAT and set SET */ 1920 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1921 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1922 break; 1923 1924 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1925 DP(NETIF_MSG_LINK, 1926 "Set GPIO %d (shift %d) -> input\n", 1927 gpio_num, gpio_shift); 1928 /* set FLOAT */ 1929 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 1930 break; 1931 1932 default: 1933 break; 1934 } 1935 1936 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1937 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1938 1939 return 0; 1940 } 1941 1942 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 1943 { 1944 u32 gpio_reg = 0; 1945 int rc = 0; 1946 1947 /* Any port swapping should be handled by caller. */ 1948 1949 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1950 /* read GPIO and mask except the float bits */ 1951 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 1952 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1953 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 1954 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 1955 1956 switch (mode) { 1957 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 1958 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 1959 /* set CLR */ 1960 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 1961 break; 1962 1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 1964 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 1965 /* set SET */ 1966 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 1967 break; 1968 1969 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 1970 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 1971 /* set FLOAT */ 1972 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 1973 break; 1974 1975 default: 1976 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 1977 rc = -EINVAL; 1978 break; 1979 } 1980 1981 if (rc == 0) 1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1983 1984 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1985 1986 return rc; 1987 } 1988 1989 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 1990 { 1991 /* The GPIO should be swapped if swap register is set and active */ 1992 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1993 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 1994 int gpio_shift = gpio_num + 1995 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1996 u32 gpio_mask = (1 << gpio_shift); 1997 u32 gpio_reg; 1998 1999 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2000 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2001 return -EINVAL; 2002 } 2003 2004 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2005 /* read GPIO int */ 2006 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2007 2008 switch (mode) { 2009 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2010 DP(NETIF_MSG_LINK, 2011 "Clear GPIO INT %d (shift %d) -> output low\n", 2012 gpio_num, gpio_shift); 2013 /* clear SET and set CLR */ 2014 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2015 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2016 break; 2017 2018 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2019 DP(NETIF_MSG_LINK, 2020 "Set GPIO INT %d (shift %d) -> output high\n", 2021 gpio_num, gpio_shift); 2022 /* clear CLR and set SET */ 2023 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2024 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2025 break; 2026 2027 default: 2028 break; 2029 } 2030 2031 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2032 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2033 2034 return 0; 2035 } 2036 2037 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) 2038 { 2039 u32 spio_mask = (1 << spio_num); 2040 u32 spio_reg; 2041 2042 if ((spio_num < MISC_REGISTERS_SPIO_4) || 2043 (spio_num > MISC_REGISTERS_SPIO_7)) { 2044 BNX2X_ERR("Invalid SPIO %d\n", spio_num); 2045 return -EINVAL; 2046 } 2047 2048 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2049 /* read SPIO and mask except the float bits */ 2050 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 2051 2052 switch (mode) { 2053 case MISC_REGISTERS_SPIO_OUTPUT_LOW: 2054 DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); 2055 /* clear FLOAT and set CLR */ 2056 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2057 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 2058 break; 2059 2060 case MISC_REGISTERS_SPIO_OUTPUT_HIGH: 2061 DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); 2062 /* clear FLOAT and set SET */ 2063 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2064 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); 2065 break; 2066 2067 case MISC_REGISTERS_SPIO_INPUT_HI_Z: 2068 DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); 2069 /* set FLOAT */ 2070 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2071 break; 2072 2073 default: 2074 break; 2075 } 2076 2077 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2078 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2079 2080 return 0; 2081 } 2082 2083 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2084 { 2085 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2086 switch (bp->link_vars.ieee_fc & 2087 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2088 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2089 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2090 ADVERTISED_Pause); 2091 break; 2092 2093 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2094 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2095 ADVERTISED_Pause); 2096 break; 2097 2098 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2099 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2100 break; 2101 2102 default: 2103 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2104 ADVERTISED_Pause); 2105 break; 2106 } 2107 } 2108 2109 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2110 { 2111 if (!BP_NOMCP(bp)) { 2112 u8 rc; 2113 int cfx_idx = bnx2x_get_link_cfg_idx(bp); 2114 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2115 /* 2116 * Initialize link parameters structure variables 2117 * It is recommended to turn off RX FC for jumbo frames 2118 * for better performance 2119 */ 2120 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2122 else 2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2124 2125 bnx2x_acquire_phy_lock(bp); 2126 2127 if (load_mode == LOAD_DIAG) { 2128 struct link_params *lp = &bp->link_params; 2129 lp->loopback_mode = LOOPBACK_XGXS; 2130 /* do PHY loopback at 10G speed, if possible */ 2131 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2132 if (lp->speed_cap_mask[cfx_idx] & 2133 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2134 lp->req_line_speed[cfx_idx] = 2135 SPEED_10000; 2136 else 2137 lp->req_line_speed[cfx_idx] = 2138 SPEED_1000; 2139 } 2140 } 2141 2142 if (load_mode == LOAD_LOOPBACK_EXT) { 2143 struct link_params *lp = &bp->link_params; 2144 lp->loopback_mode = LOOPBACK_EXT; 2145 } 2146 2147 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2148 2149 bnx2x_release_phy_lock(bp); 2150 2151 bnx2x_calc_fc_adv(bp); 2152 2153 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { 2154 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2155 bnx2x_link_report(bp); 2156 } else 2157 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2158 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2159 return rc; 2160 } 2161 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2162 return -EINVAL; 2163 } 2164 2165 void bnx2x_link_set(struct bnx2x *bp) 2166 { 2167 if (!BP_NOMCP(bp)) { 2168 bnx2x_acquire_phy_lock(bp); 2169 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2170 bnx2x_release_phy_lock(bp); 2171 2172 bnx2x_calc_fc_adv(bp); 2173 } else 2174 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2175 } 2176 2177 static void bnx2x__link_reset(struct bnx2x *bp) 2178 { 2179 if (!BP_NOMCP(bp)) { 2180 bnx2x_acquire_phy_lock(bp); 2181 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2182 bnx2x_release_phy_lock(bp); 2183 } else 2184 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2185 } 2186 2187 void bnx2x_force_link_reset(struct bnx2x *bp) 2188 { 2189 bnx2x_acquire_phy_lock(bp); 2190 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2191 bnx2x_release_phy_lock(bp); 2192 } 2193 2194 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2195 { 2196 u8 rc = 0; 2197 2198 if (!BP_NOMCP(bp)) { 2199 bnx2x_acquire_phy_lock(bp); 2200 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2201 is_serdes); 2202 bnx2x_release_phy_lock(bp); 2203 } else 2204 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2205 2206 return rc; 2207 } 2208 2209 2210 /* Calculates the sum of vn_min_rates. 2211 It's needed for further normalizing of the min_rates. 2212 Returns: 2213 sum of vn_min_rates. 2214 or 2215 0 - if all the min_rates are 0. 2216 In the later case fainess algorithm should be deactivated. 2217 If not all min_rates are zero then those that are zeroes will be set to 1. 2218 */ 2219 static void bnx2x_calc_vn_min(struct bnx2x *bp, 2220 struct cmng_init_input *input) 2221 { 2222 int all_zero = 1; 2223 int vn; 2224 2225 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2226 u32 vn_cfg = bp->mf_config[vn]; 2227 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2228 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2229 2230 /* Skip hidden vns */ 2231 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2232 vn_min_rate = 0; 2233 /* If min rate is zero - set it to 1 */ 2234 else if (!vn_min_rate) 2235 vn_min_rate = DEF_MIN_RATE; 2236 else 2237 all_zero = 0; 2238 2239 input->vnic_min_rate[vn] = vn_min_rate; 2240 } 2241 2242 /* if ETS or all min rates are zeros - disable fairness */ 2243 if (BNX2X_IS_ETS_ENABLED(bp)) { 2244 input->flags.cmng_enables &= 2245 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2246 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2247 } else if (all_zero) { 2248 input->flags.cmng_enables &= 2249 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2250 DP(NETIF_MSG_IFUP, 2251 "All MIN values are zeroes fairness will be disabled\n"); 2252 } else 2253 input->flags.cmng_enables |= 2254 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2255 } 2256 2257 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2258 struct cmng_init_input *input) 2259 { 2260 u16 vn_max_rate; 2261 u32 vn_cfg = bp->mf_config[vn]; 2262 2263 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2264 vn_max_rate = 0; 2265 else { 2266 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2267 2268 if (IS_MF_SI(bp)) { 2269 /* maxCfg in percents of linkspeed */ 2270 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2271 } else /* SD modes */ 2272 /* maxCfg is absolute in 100Mb units */ 2273 vn_max_rate = maxCfg * 100; 2274 } 2275 2276 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2277 2278 input->vnic_max_rate[vn] = vn_max_rate; 2279 } 2280 2281 2282 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2283 { 2284 if (CHIP_REV_IS_SLOW(bp)) 2285 return CMNG_FNS_NONE; 2286 if (IS_MF(bp)) 2287 return CMNG_FNS_MINMAX; 2288 2289 return CMNG_FNS_NONE; 2290 } 2291 2292 void bnx2x_read_mf_cfg(struct bnx2x *bp) 2293 { 2294 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2295 2296 if (BP_NOMCP(bp)) 2297 return; /* what should be the default bvalue in this case */ 2298 2299 /* For 2 port configuration the absolute function number formula 2300 * is: 2301 * abs_func = 2 * vn + BP_PORT + BP_PATH 2302 * 2303 * and there are 4 functions per port 2304 * 2305 * For 4 port configuration it is 2306 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2307 * 2308 * and there are 2 functions per port 2309 */ 2310 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2311 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2312 2313 if (func >= E1H_FUNC_MAX) 2314 break; 2315 2316 bp->mf_config[vn] = 2317 MF_CFG_RD(bp, func_mf_config[func].config); 2318 } 2319 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2320 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2321 bp->flags |= MF_FUNC_DIS; 2322 } else { 2323 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2324 bp->flags &= ~MF_FUNC_DIS; 2325 } 2326 } 2327 2328 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2329 { 2330 struct cmng_init_input input; 2331 memset(&input, 0, sizeof(struct cmng_init_input)); 2332 2333 input.port_rate = bp->link_vars.line_speed; 2334 2335 if (cmng_type == CMNG_FNS_MINMAX) { 2336 int vn; 2337 2338 /* read mf conf from shmem */ 2339 if (read_cfg) 2340 bnx2x_read_mf_cfg(bp); 2341 2342 /* vn_weight_sum and enable fairness if not 0 */ 2343 bnx2x_calc_vn_min(bp, &input); 2344 2345 /* calculate and set min-max rate for each vn */ 2346 if (bp->port.pmf) 2347 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2348 bnx2x_calc_vn_max(bp, vn, &input); 2349 2350 /* always enable rate shaping and fairness */ 2351 input.flags.cmng_enables |= 2352 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2353 2354 bnx2x_init_cmng(&input, &bp->cmng); 2355 return; 2356 } 2357 2358 /* rate shaping and fairness are disabled */ 2359 DP(NETIF_MSG_IFUP, 2360 "rate shaping and fairness are disabled\n"); 2361 } 2362 2363 static void storm_memset_cmng(struct bnx2x *bp, 2364 struct cmng_init *cmng, 2365 u8 port) 2366 { 2367 int vn; 2368 size_t size = sizeof(struct cmng_struct_per_port); 2369 2370 u32 addr = BAR_XSTRORM_INTMEM + 2371 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2372 2373 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2374 2375 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2376 int func = func_by_vn(bp, vn); 2377 2378 addr = BAR_XSTRORM_INTMEM + 2379 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2380 size = sizeof(struct rate_shaping_vars_per_vn); 2381 __storm_memset_struct(bp, addr, size, 2382 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2383 2384 addr = BAR_XSTRORM_INTMEM + 2385 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2386 size = sizeof(struct fairness_vars_per_vn); 2387 __storm_memset_struct(bp, addr, size, 2388 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2389 } 2390 } 2391 2392 /* This function is called upon link interrupt */ 2393 static void bnx2x_link_attn(struct bnx2x *bp) 2394 { 2395 /* Make sure that we are synced with the current statistics */ 2396 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2397 2398 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2399 2400 if (bp->link_vars.link_up) { 2401 2402 /* dropless flow control */ 2403 if (!CHIP_IS_E1(bp) && bp->dropless_fc) { 2404 int port = BP_PORT(bp); 2405 u32 pause_enabled = 0; 2406 2407 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2408 pause_enabled = 1; 2409 2410 REG_WR(bp, BAR_USTRORM_INTMEM + 2411 USTORM_ETH_PAUSE_ENABLED_OFFSET(port), 2412 pause_enabled); 2413 } 2414 2415 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2416 struct host_port_stats *pstats; 2417 2418 pstats = bnx2x_sp(bp, port_stats); 2419 /* reset old mac stats */ 2420 memset(&(pstats->mac_stx[0]), 0, 2421 sizeof(struct mac_stx)); 2422 } 2423 if (bp->state == BNX2X_STATE_OPEN) 2424 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2425 } 2426 2427 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2428 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2429 2430 if (cmng_fns != CMNG_FNS_NONE) { 2431 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2432 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2433 } else 2434 /* rate shaping and fairness are disabled */ 2435 DP(NETIF_MSG_IFUP, 2436 "single function mode without fairness\n"); 2437 } 2438 2439 __bnx2x_link_report(bp); 2440 2441 if (IS_MF(bp)) 2442 bnx2x_link_sync_notify(bp); 2443 } 2444 2445 void bnx2x__link_status_update(struct bnx2x *bp) 2446 { 2447 if (bp->state != BNX2X_STATE_OPEN) 2448 return; 2449 2450 /* read updated dcb configuration */ 2451 bnx2x_dcbx_pmf_update(bp); 2452 2453 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2454 2455 if (bp->link_vars.link_up) 2456 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2457 else 2458 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2459 2460 /* indicate link status */ 2461 bnx2x_link_report(bp); 2462 } 2463 2464 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2465 u16 vlan_val, u8 allowed_prio) 2466 { 2467 struct bnx2x_func_state_params func_params = {0}; 2468 struct bnx2x_func_afex_update_params *f_update_params = 2469 &func_params.params.afex_update; 2470 2471 func_params.f_obj = &bp->func_obj; 2472 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2473 2474 /* no need to wait for RAMROD completion, so don't 2475 * set RAMROD_COMP_WAIT flag 2476 */ 2477 2478 f_update_params->vif_id = vifid; 2479 f_update_params->afex_default_vlan = vlan_val; 2480 f_update_params->allowed_priorities = allowed_prio; 2481 2482 /* if ramrod can not be sent, response to MCP immediately */ 2483 if (bnx2x_func_state_change(bp, &func_params) < 0) 2484 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2485 2486 return 0; 2487 } 2488 2489 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2490 u16 vif_index, u8 func_bit_map) 2491 { 2492 struct bnx2x_func_state_params func_params = {0}; 2493 struct bnx2x_func_afex_viflists_params *update_params = 2494 &func_params.params.afex_viflists; 2495 int rc; 2496 u32 drv_msg_code; 2497 2498 /* validate only LIST_SET and LIST_GET are received from switch */ 2499 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2500 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2501 cmd_type); 2502 2503 func_params.f_obj = &bp->func_obj; 2504 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2505 2506 /* set parameters according to cmd_type */ 2507 update_params->afex_vif_list_command = cmd_type; 2508 update_params->vif_list_index = cpu_to_le16(vif_index); 2509 update_params->func_bit_map = 2510 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2511 update_params->func_to_clear = 0; 2512 drv_msg_code = 2513 (cmd_type == VIF_LIST_RULE_GET) ? 2514 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2515 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2516 2517 /* if ramrod can not be sent, respond to MCP immediately for 2518 * SET and GET requests (other are not triggered from MCP) 2519 */ 2520 rc = bnx2x_func_state_change(bp, &func_params); 2521 if (rc < 0) 2522 bnx2x_fw_command(bp, drv_msg_code, 0); 2523 2524 return 0; 2525 } 2526 2527 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2528 { 2529 struct afex_stats afex_stats; 2530 u32 func = BP_ABS_FUNC(bp); 2531 u32 mf_config; 2532 u16 vlan_val; 2533 u32 vlan_prio; 2534 u16 vif_id; 2535 u8 allowed_prio; 2536 u8 vlan_mode; 2537 u32 addr_to_write, vifid, addrs, stats_type, i; 2538 2539 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2540 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2541 DP(BNX2X_MSG_MCP, 2542 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2543 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2544 } 2545 2546 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2547 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2548 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2549 DP(BNX2X_MSG_MCP, 2550 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2551 vifid, addrs); 2552 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2553 addrs); 2554 } 2555 2556 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2557 addr_to_write = SHMEM2_RD(bp, 2558 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2559 stats_type = SHMEM2_RD(bp, 2560 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2561 2562 DP(BNX2X_MSG_MCP, 2563 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2564 addr_to_write); 2565 2566 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2567 2568 /* write response to scratchpad, for MCP */ 2569 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2570 REG_WR(bp, addr_to_write + i*sizeof(u32), 2571 *(((u32 *)(&afex_stats))+i)); 2572 2573 /* send ack message to MCP */ 2574 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2575 } 2576 2577 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2578 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2579 bp->mf_config[BP_VN(bp)] = mf_config; 2580 DP(BNX2X_MSG_MCP, 2581 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2582 mf_config); 2583 2584 /* if VIF_SET is "enabled" */ 2585 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2586 /* set rate limit directly to internal RAM */ 2587 struct cmng_init_input cmng_input; 2588 struct rate_shaping_vars_per_vn m_rs_vn; 2589 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2590 u32 addr = BAR_XSTRORM_INTMEM + 2591 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2592 2593 bp->mf_config[BP_VN(bp)] = mf_config; 2594 2595 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2596 m_rs_vn.vn_counter.rate = 2597 cmng_input.vnic_max_rate[BP_VN(bp)]; 2598 m_rs_vn.vn_counter.quota = 2599 (m_rs_vn.vn_counter.rate * 2600 RS_PERIODIC_TIMEOUT_USEC) / 8; 2601 2602 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2603 2604 /* read relevant values from mf_cfg struct in shmem */ 2605 vif_id = 2606 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2607 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2608 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2609 vlan_val = 2610 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2611 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2612 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2613 vlan_prio = (mf_config & 2614 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2615 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2616 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2617 vlan_mode = 2618 (MF_CFG_RD(bp, 2619 func_mf_config[func].afex_config) & 2620 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2621 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2622 allowed_prio = 2623 (MF_CFG_RD(bp, 2624 func_mf_config[func].afex_config) & 2625 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2626 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2627 2628 /* send ramrod to FW, return in case of failure */ 2629 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2630 allowed_prio)) 2631 return; 2632 2633 bp->afex_def_vlan_tag = vlan_val; 2634 bp->afex_vlan_mode = vlan_mode; 2635 } else { 2636 /* notify link down because BP->flags is disabled */ 2637 bnx2x_link_report(bp); 2638 2639 /* send INVALID VIF ramrod to FW */ 2640 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2641 2642 /* Reset the default afex VLAN */ 2643 bp->afex_def_vlan_tag = -1; 2644 } 2645 } 2646 } 2647 2648 static void bnx2x_pmf_update(struct bnx2x *bp) 2649 { 2650 int port = BP_PORT(bp); 2651 u32 val; 2652 2653 bp->port.pmf = 1; 2654 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2655 2656 /* 2657 * We need the mb() to ensure the ordering between the writing to 2658 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2659 */ 2660 smp_mb(); 2661 2662 /* queue a periodic task */ 2663 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2664 2665 bnx2x_dcbx_pmf_update(bp); 2666 2667 /* enable nig attention */ 2668 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2669 if (bp->common.int_block == INT_BLOCK_HC) { 2670 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2671 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2672 } else if (!CHIP_IS_E1x(bp)) { 2673 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2674 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2675 } 2676 2677 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2678 } 2679 2680 /* end of Link */ 2681 2682 /* slow path */ 2683 2684 /* 2685 * General service functions 2686 */ 2687 2688 /* send the MCP a request, block until there is a reply */ 2689 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2690 { 2691 int mb_idx = BP_FW_MB_IDX(bp); 2692 u32 seq; 2693 u32 rc = 0; 2694 u32 cnt = 1; 2695 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2696 2697 mutex_lock(&bp->fw_mb_mutex); 2698 seq = ++bp->fw_seq; 2699 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2700 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2701 2702 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2703 (command | seq), param); 2704 2705 do { 2706 /* let the FW do it's magic ... */ 2707 msleep(delay); 2708 2709 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2710 2711 /* Give the FW up to 5 second (500*10ms) */ 2712 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2713 2714 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2715 cnt*delay, rc, seq); 2716 2717 /* is this a reply to our command? */ 2718 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2719 rc &= FW_MSG_CODE_MASK; 2720 else { 2721 /* FW BUG! */ 2722 BNX2X_ERR("FW failed to respond!\n"); 2723 bnx2x_fw_dump(bp); 2724 rc = 0; 2725 } 2726 mutex_unlock(&bp->fw_mb_mutex); 2727 2728 return rc; 2729 } 2730 2731 2732 static void storm_memset_func_cfg(struct bnx2x *bp, 2733 struct tstorm_eth_function_common_config *tcfg, 2734 u16 abs_fid) 2735 { 2736 size_t size = sizeof(struct tstorm_eth_function_common_config); 2737 2738 u32 addr = BAR_TSTRORM_INTMEM + 2739 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2740 2741 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2742 } 2743 2744 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2745 { 2746 if (CHIP_IS_E1x(bp)) { 2747 struct tstorm_eth_function_common_config tcfg = {0}; 2748 2749 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2750 } 2751 2752 /* Enable the function in the FW */ 2753 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 2754 storm_memset_func_en(bp, p->func_id, 1); 2755 2756 /* spq */ 2757 if (p->func_flgs & FUNC_FLG_SPQ) { 2758 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 2759 REG_WR(bp, XSEM_REG_FAST_MEMORY + 2760 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 2761 } 2762 } 2763 2764 /** 2765 * bnx2x_get_tx_only_flags - Return common flags 2766 * 2767 * @bp device handle 2768 * @fp queue handle 2769 * @zero_stats TRUE if statistics zeroing is needed 2770 * 2771 * Return the flags that are common for the Tx-only and not normal connections. 2772 */ 2773 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2774 struct bnx2x_fastpath *fp, 2775 bool zero_stats) 2776 { 2777 unsigned long flags = 0; 2778 2779 /* PF driver will always initialize the Queue to an ACTIVE state */ 2780 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2781 2782 /* tx only connections collect statistics (on the same index as the 2783 * parent connection). The statistics are zeroed when the parent 2784 * connection is initialized. 2785 */ 2786 2787 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2788 if (zero_stats) 2789 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2790 2791 2792 return flags; 2793 } 2794 2795 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2796 struct bnx2x_fastpath *fp, 2797 bool leading) 2798 { 2799 unsigned long flags = 0; 2800 2801 /* calculate other queue flags */ 2802 if (IS_MF_SD(bp)) 2803 __set_bit(BNX2X_Q_FLG_OV, &flags); 2804 2805 if (IS_FCOE_FP(fp)) { 2806 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2807 /* For FCoE - force usage of default priority (for afex) */ 2808 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 2809 } 2810 2811 if (!fp->disable_tpa) { 2812 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2813 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 2814 if (fp->mode == TPA_MODE_GRO) 2815 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 2816 } 2817 2818 if (leading) { 2819 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 2820 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 2821 } 2822 2823 /* Always set HW VLAN stripping */ 2824 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2825 2826 /* configure silent vlan removal */ 2827 if (IS_MF_AFEX(bp)) 2828 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 2829 2830 2831 return flags | bnx2x_get_common_flags(bp, fp, true); 2832 } 2833 2834 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 2835 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 2836 u8 cos) 2837 { 2838 gen_init->stat_id = bnx2x_stats_id(fp); 2839 gen_init->spcl_id = fp->cl_id; 2840 2841 /* Always use mini-jumbo MTU for FCoE L2 ring */ 2842 if (IS_FCOE_FP(fp)) 2843 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2844 else 2845 gen_init->mtu = bp->dev->mtu; 2846 2847 gen_init->cos = cos; 2848 } 2849 2850 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 2851 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 2852 struct bnx2x_rxq_setup_params *rxq_init) 2853 { 2854 u8 max_sge = 0; 2855 u16 sge_sz = 0; 2856 u16 tpa_agg_size = 0; 2857 2858 if (!fp->disable_tpa) { 2859 pause->sge_th_lo = SGE_TH_LO(bp); 2860 pause->sge_th_hi = SGE_TH_HI(bp); 2861 2862 /* validate SGE ring has enough to cross high threshold */ 2863 WARN_ON(bp->dropless_fc && 2864 pause->sge_th_hi + FW_PREFETCH_CNT > 2865 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2866 2867 tpa_agg_size = min_t(u32, 2868 (min_t(u32, 8, MAX_SKB_FRAGS) * 2869 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2870 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 2871 SGE_PAGE_SHIFT; 2872 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 2873 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 2874 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, 2875 0xffff); 2876 } 2877 2878 /* pause - not for e1 */ 2879 if (!CHIP_IS_E1(bp)) { 2880 pause->bd_th_lo = BD_TH_LO(bp); 2881 pause->bd_th_hi = BD_TH_HI(bp); 2882 2883 pause->rcq_th_lo = RCQ_TH_LO(bp); 2884 pause->rcq_th_hi = RCQ_TH_HI(bp); 2885 /* 2886 * validate that rings have enough entries to cross 2887 * high thresholds 2888 */ 2889 WARN_ON(bp->dropless_fc && 2890 pause->bd_th_hi + FW_PREFETCH_CNT > 2891 bp->rx_ring_size); 2892 WARN_ON(bp->dropless_fc && 2893 pause->rcq_th_hi + FW_PREFETCH_CNT > 2894 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2895 2896 pause->pri_map = 1; 2897 } 2898 2899 /* rxq setup */ 2900 rxq_init->dscr_map = fp->rx_desc_mapping; 2901 rxq_init->sge_map = fp->rx_sge_mapping; 2902 rxq_init->rcq_map = fp->rx_comp_mapping; 2903 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2904 2905 /* This should be a maximum number of data bytes that may be 2906 * placed on the BD (not including paddings). 2907 */ 2908 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 2909 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 2910 2911 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2912 rxq_init->tpa_agg_sz = tpa_agg_size; 2913 rxq_init->sge_buf_sz = sge_sz; 2914 rxq_init->max_sges_pkt = max_sge; 2915 rxq_init->rss_engine_id = BP_FUNC(bp); 2916 rxq_init->mcast_engine_id = BP_FUNC(bp); 2917 2918 /* Maximum number or simultaneous TPA aggregation for this Queue. 2919 * 2920 * For PF Clients it should be the maximum avaliable number. 2921 * VF driver(s) may want to define it to a smaller value. 2922 */ 2923 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2924 2925 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2926 rxq_init->fw_sb_id = fp->fw_sb_id; 2927 2928 if (IS_FCOE_FP(fp)) 2929 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2930 else 2931 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2932 /* configure silent vlan removal 2933 * if multi function mode is afex, then mask default vlan 2934 */ 2935 if (IS_MF_AFEX(bp)) { 2936 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 2937 rxq_init->silent_removal_mask = VLAN_VID_MASK; 2938 } 2939 } 2940 2941 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2942 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2943 u8 cos) 2944 { 2945 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 2946 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2947 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2948 txq_init->fw_sb_id = fp->fw_sb_id; 2949 2950 /* 2951 * set the tss leading client id for TX classfication == 2952 * leading RSS client id 2953 */ 2954 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 2955 2956 if (IS_FCOE_FP(fp)) { 2957 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 2958 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 2959 } 2960 } 2961 2962 static void bnx2x_pf_init(struct bnx2x *bp) 2963 { 2964 struct bnx2x_func_init_params func_init = {0}; 2965 struct event_ring_data eq_data = { {0} }; 2966 u16 flags; 2967 2968 if (!CHIP_IS_E1x(bp)) { 2969 /* reset IGU PF statistics: MSIX + ATTN */ 2970 /* PF */ 2971 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2972 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2973 (CHIP_MODE_IS_4_PORT(bp) ? 2974 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2975 /* ATTN */ 2976 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 2977 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 2978 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 2979 (CHIP_MODE_IS_4_PORT(bp) ? 2980 BP_FUNC(bp) : BP_VN(bp))*4, 0); 2981 } 2982 2983 /* function setup flags */ 2984 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 2985 2986 /* This flag is relevant for E1x only. 2987 * E2 doesn't have a TPA configuration in a function level. 2988 */ 2989 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 2990 2991 func_init.func_flgs = flags; 2992 func_init.pf_id = BP_FUNC(bp); 2993 func_init.func_id = BP_FUNC(bp); 2994 func_init.spq_map = bp->spq_mapping; 2995 func_init.spq_prod = bp->spq_prod_idx; 2996 2997 bnx2x_func_init(bp, &func_init); 2998 2999 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3000 3001 /* 3002 * Congestion management values depend on the link rate 3003 * There is no active link so initial link rate is set to 10 Gbps. 3004 * When the link comes up The congestion management values are 3005 * re-calculated according to the actual link rate. 3006 */ 3007 bp->link_vars.line_speed = SPEED_10000; 3008 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3009 3010 /* Only the PMF sets the HW */ 3011 if (bp->port.pmf) 3012 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3013 3014 /* init Event Queue */ 3015 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3016 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3017 eq_data.producer = bp->eq_prod; 3018 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3019 eq_data.sb_id = DEF_SB_ID; 3020 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3021 } 3022 3023 3024 static void bnx2x_e1h_disable(struct bnx2x *bp) 3025 { 3026 int port = BP_PORT(bp); 3027 3028 bnx2x_tx_disable(bp); 3029 3030 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3031 } 3032 3033 static void bnx2x_e1h_enable(struct bnx2x *bp) 3034 { 3035 int port = BP_PORT(bp); 3036 3037 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3038 3039 /* Tx queue should be only reenabled */ 3040 netif_tx_wake_all_queues(bp->dev); 3041 3042 /* 3043 * Should not call netif_carrier_on since it will be called if the link 3044 * is up when checking for link state 3045 */ 3046 } 3047 3048 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3049 3050 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3051 { 3052 struct eth_stats_info *ether_stat = 3053 &bp->slowpath->drv_info_to_mcp.ether_stat; 3054 3055 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3056 ETH_STAT_INFO_VERSION_LEN); 3057 3058 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3059 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3060 ether_stat->mac_local); 3061 3062 ether_stat->mtu_size = bp->dev->mtu; 3063 3064 if (bp->dev->features & NETIF_F_RXCSUM) 3065 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3066 if (bp->dev->features & NETIF_F_TSO) 3067 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3068 ether_stat->feature_flags |= bp->common.boot_mode; 3069 3070 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3071 3072 ether_stat->txq_size = bp->tx_ring_size; 3073 ether_stat->rxq_size = bp->rx_ring_size; 3074 } 3075 3076 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3077 { 3078 #ifdef BCM_CNIC 3079 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3080 struct fcoe_stats_info *fcoe_stat = 3081 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3082 3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3084 bp->fip_mac, ETH_ALEN); 3085 3086 fcoe_stat->qos_priority = 3087 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3088 3089 /* insert FCoE stats from ramrod response */ 3090 if (!NO_FCOE(bp)) { 3091 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3092 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3093 tstorm_queue_statistics; 3094 3095 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3096 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3097 xstorm_queue_statistics; 3098 3099 struct fcoe_statistics_params *fw_fcoe_stat = 3100 &bp->fw_stats_data->fcoe; 3101 3102 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, 3103 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3104 3105 ADD_64(fcoe_stat->rx_bytes_hi, 3106 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3107 fcoe_stat->rx_bytes_lo, 3108 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3109 3110 ADD_64(fcoe_stat->rx_bytes_hi, 3111 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3112 fcoe_stat->rx_bytes_lo, 3113 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3114 3115 ADD_64(fcoe_stat->rx_bytes_hi, 3116 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3117 fcoe_stat->rx_bytes_lo, 3118 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3119 3120 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3121 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3122 3123 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3124 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3125 3126 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3127 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3128 3129 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, 3130 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3131 3132 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, 3133 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3134 3135 ADD_64(fcoe_stat->tx_bytes_hi, 3136 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3137 fcoe_stat->tx_bytes_lo, 3138 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3139 3140 ADD_64(fcoe_stat->tx_bytes_hi, 3141 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3142 fcoe_stat->tx_bytes_lo, 3143 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3144 3145 ADD_64(fcoe_stat->tx_bytes_hi, 3146 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3147 fcoe_stat->tx_bytes_lo, 3148 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3149 3150 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3151 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3152 3153 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3154 fcoe_q_xstorm_stats->ucast_pkts_sent); 3155 3156 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3157 fcoe_q_xstorm_stats->bcast_pkts_sent); 3158 3159 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, 3160 fcoe_q_xstorm_stats->mcast_pkts_sent); 3161 } 3162 3163 /* ask L5 driver to add data to the struct */ 3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3165 #endif 3166 } 3167 3168 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3169 { 3170 #ifdef BCM_CNIC 3171 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3172 struct iscsi_stats_info *iscsi_stat = 3173 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3174 3175 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3176 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3177 3178 iscsi_stat->qos_priority = 3179 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3180 3181 /* ask L5 driver to add data to the struct */ 3182 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3183 #endif 3184 } 3185 3186 /* called due to MCP event (on pmf): 3187 * reread new bandwidth configuration 3188 * configure FW 3189 * notify others function about the change 3190 */ 3191 static void bnx2x_config_mf_bw(struct bnx2x *bp) 3192 { 3193 if (bp->link_vars.link_up) { 3194 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3195 bnx2x_link_sync_notify(bp); 3196 } 3197 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3198 } 3199 3200 static void bnx2x_set_mf_bw(struct bnx2x *bp) 3201 { 3202 bnx2x_config_mf_bw(bp); 3203 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3204 } 3205 3206 static void bnx2x_handle_eee_event(struct bnx2x *bp) 3207 { 3208 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3209 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3210 } 3211 3212 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3213 { 3214 enum drv_info_opcode op_code; 3215 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3216 3217 /* if drv_info version supported by MFW doesn't match - send NACK */ 3218 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3219 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3220 return; 3221 } 3222 3223 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3224 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3225 3226 memset(&bp->slowpath->drv_info_to_mcp, 0, 3227 sizeof(union drv_info_to_mcp)); 3228 3229 switch (op_code) { 3230 case ETH_STATS_OPCODE: 3231 bnx2x_drv_info_ether_stat(bp); 3232 break; 3233 case FCOE_STATS_OPCODE: 3234 bnx2x_drv_info_fcoe_stat(bp); 3235 break; 3236 case ISCSI_STATS_OPCODE: 3237 bnx2x_drv_info_iscsi_stat(bp); 3238 break; 3239 default: 3240 /* if op code isn't supported - send NACK */ 3241 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3242 return; 3243 } 3244 3245 /* if we got drv_info attn from MFW then these fields are defined in 3246 * shmem2 for sure 3247 */ 3248 SHMEM2_WR(bp, drv_info_host_addr_lo, 3249 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3250 SHMEM2_WR(bp, drv_info_host_addr_hi, 3251 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3252 3253 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3254 } 3255 3256 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3257 { 3258 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3259 3260 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3261 3262 /* 3263 * This is the only place besides the function initialization 3264 * where the bp->flags can change so it is done without any 3265 * locks 3266 */ 3267 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3268 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3269 bp->flags |= MF_FUNC_DIS; 3270 3271 bnx2x_e1h_disable(bp); 3272 } else { 3273 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3274 bp->flags &= ~MF_FUNC_DIS; 3275 3276 bnx2x_e1h_enable(bp); 3277 } 3278 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3279 } 3280 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3281 bnx2x_config_mf_bw(bp); 3282 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3283 } 3284 3285 /* Report results to MCP */ 3286 if (dcc_event) 3287 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3288 else 3289 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3290 } 3291 3292 /* must be called under the spq lock */ 3293 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3294 { 3295 struct eth_spe *next_spe = bp->spq_prod_bd; 3296 3297 if (bp->spq_prod_bd == bp->spq_last_bd) { 3298 bp->spq_prod_bd = bp->spq; 3299 bp->spq_prod_idx = 0; 3300 DP(BNX2X_MSG_SP, "end of spq\n"); 3301 } else { 3302 bp->spq_prod_bd++; 3303 bp->spq_prod_idx++; 3304 } 3305 return next_spe; 3306 } 3307 3308 /* must be called under the spq lock */ 3309 static void bnx2x_sp_prod_update(struct bnx2x *bp) 3310 { 3311 int func = BP_FUNC(bp); 3312 3313 /* 3314 * Make sure that BD data is updated before writing the producer: 3315 * BD data is written to the memory, the producer is read from the 3316 * memory, thus we need a full memory barrier to ensure the ordering. 3317 */ 3318 mb(); 3319 3320 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3321 bp->spq_prod_idx); 3322 mmiowb(); 3323 } 3324 3325 /** 3326 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3327 * 3328 * @cmd: command to check 3329 * @cmd_type: command type 3330 */ 3331 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3332 { 3333 if ((cmd_type == NONE_CONNECTION_TYPE) || 3334 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3335 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3336 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3337 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3338 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3339 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3340 return true; 3341 else 3342 return false; 3343 3344 } 3345 3346 3347 /** 3348 * bnx2x_sp_post - place a single command on an SP ring 3349 * 3350 * @bp: driver handle 3351 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3352 * @cid: SW CID the command is related to 3353 * @data_hi: command private data address (high 32 bits) 3354 * @data_lo: command private data address (low 32 bits) 3355 * @cmd_type: command type (e.g. NONE, ETH) 3356 * 3357 * SP data is handled as if it's always an address pair, thus data fields are 3358 * not swapped to little endian in upper functions. Instead this function swaps 3359 * data as if it's two u32 fields. 3360 */ 3361 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3362 u32 data_hi, u32 data_lo, int cmd_type) 3363 { 3364 struct eth_spe *spe; 3365 u16 type; 3366 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3367 3368 #ifdef BNX2X_STOP_ON_ERROR 3369 if (unlikely(bp->panic)) { 3370 BNX2X_ERR("Can't post SP when there is panic\n"); 3371 return -EIO; 3372 } 3373 #endif 3374 3375 spin_lock_bh(&bp->spq_lock); 3376 3377 if (common) { 3378 if (!atomic_read(&bp->eq_spq_left)) { 3379 BNX2X_ERR("BUG! EQ ring full!\n"); 3380 spin_unlock_bh(&bp->spq_lock); 3381 bnx2x_panic(); 3382 return -EBUSY; 3383 } 3384 } else if (!atomic_read(&bp->cq_spq_left)) { 3385 BNX2X_ERR("BUG! SPQ ring full!\n"); 3386 spin_unlock_bh(&bp->spq_lock); 3387 bnx2x_panic(); 3388 return -EBUSY; 3389 } 3390 3391 spe = bnx2x_sp_get_next(bp); 3392 3393 /* CID needs port number to be encoded int it */ 3394 spe->hdr.conn_and_cmd_data = 3395 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3396 HW_CID(bp, cid)); 3397 3398 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3399 3400 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3401 SPE_HDR_FUNCTION_ID); 3402 3403 spe->hdr.type = cpu_to_le16(type); 3404 3405 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3406 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3407 3408 /* 3409 * It's ok if the actual decrement is issued towards the memory 3410 * somewhere between the spin_lock and spin_unlock. Thus no 3411 * more explict memory barrier is needed. 3412 */ 3413 if (common) 3414 atomic_dec(&bp->eq_spq_left); 3415 else 3416 atomic_dec(&bp->cq_spq_left); 3417 3418 3419 DP(BNX2X_MSG_SP, 3420 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3421 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3422 (u32)(U64_LO(bp->spq_mapping) + 3423 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3424 HW_CID(bp, cid), data_hi, data_lo, type, 3425 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3426 3427 bnx2x_sp_prod_update(bp); 3428 spin_unlock_bh(&bp->spq_lock); 3429 return 0; 3430 } 3431 3432 /* acquire split MCP access lock register */ 3433 static int bnx2x_acquire_alr(struct bnx2x *bp) 3434 { 3435 u32 j, val; 3436 int rc = 0; 3437 3438 might_sleep(); 3439 for (j = 0; j < 1000; j++) { 3440 val = (1UL << 31); 3441 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 3442 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 3443 if (val & (1L << 31)) 3444 break; 3445 3446 msleep(5); 3447 } 3448 if (!(val & (1L << 31))) { 3449 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3450 rc = -EBUSY; 3451 } 3452 3453 return rc; 3454 } 3455 3456 /* release split MCP access lock register */ 3457 static void bnx2x_release_alr(struct bnx2x *bp) 3458 { 3459 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 3460 } 3461 3462 #define BNX2X_DEF_SB_ATT_IDX 0x0001 3463 #define BNX2X_DEF_SB_IDX 0x0002 3464 3465 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3466 { 3467 struct host_sp_status_block *def_sb = bp->def_status_blk; 3468 u16 rc = 0; 3469 3470 barrier(); /* status block is written to by the chip */ 3471 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3472 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3473 rc |= BNX2X_DEF_SB_ATT_IDX; 3474 } 3475 3476 if (bp->def_idx != def_sb->sp_sb.running_index) { 3477 bp->def_idx = def_sb->sp_sb.running_index; 3478 rc |= BNX2X_DEF_SB_IDX; 3479 } 3480 3481 /* Do not reorder: indecies reading should complete before handling */ 3482 barrier(); 3483 return rc; 3484 } 3485 3486 /* 3487 * slow path service functions 3488 */ 3489 3490 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3491 { 3492 int port = BP_PORT(bp); 3493 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3494 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3495 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3496 NIG_REG_MASK_INTERRUPT_PORT0; 3497 u32 aeu_mask; 3498 u32 nig_mask = 0; 3499 u32 reg_addr; 3500 3501 if (bp->attn_state & asserted) 3502 BNX2X_ERR("IGU ERROR\n"); 3503 3504 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3505 aeu_mask = REG_RD(bp, aeu_addr); 3506 3507 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3508 aeu_mask, asserted); 3509 aeu_mask &= ~(asserted & 0x3ff); 3510 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3511 3512 REG_WR(bp, aeu_addr, aeu_mask); 3513 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3514 3515 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3516 bp->attn_state |= asserted; 3517 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3518 3519 if (asserted & ATTN_HARD_WIRED_MASK) { 3520 if (asserted & ATTN_NIG_FOR_FUNC) { 3521 3522 bnx2x_acquire_phy_lock(bp); 3523 3524 /* save nig interrupt mask */ 3525 nig_mask = REG_RD(bp, nig_int_mask_addr); 3526 3527 /* If nig_mask is not set, no need to call the update 3528 * function. 3529 */ 3530 if (nig_mask) { 3531 REG_WR(bp, nig_int_mask_addr, 0); 3532 3533 bnx2x_link_attn(bp); 3534 } 3535 3536 /* handle unicore attn? */ 3537 } 3538 if (asserted & ATTN_SW_TIMER_4_FUNC) 3539 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3540 3541 if (asserted & GPIO_2_FUNC) 3542 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3543 3544 if (asserted & GPIO_3_FUNC) 3545 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3546 3547 if (asserted & GPIO_4_FUNC) 3548 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3549 3550 if (port == 0) { 3551 if (asserted & ATTN_GENERAL_ATTN_1) { 3552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3554 } 3555 if (asserted & ATTN_GENERAL_ATTN_2) { 3556 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3557 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3558 } 3559 if (asserted & ATTN_GENERAL_ATTN_3) { 3560 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3561 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3562 } 3563 } else { 3564 if (asserted & ATTN_GENERAL_ATTN_4) { 3565 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3566 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3567 } 3568 if (asserted & ATTN_GENERAL_ATTN_5) { 3569 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3570 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3571 } 3572 if (asserted & ATTN_GENERAL_ATTN_6) { 3573 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3575 } 3576 } 3577 3578 } /* if hardwired */ 3579 3580 if (bp->common.int_block == INT_BLOCK_HC) 3581 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3582 COMMAND_REG_ATTN_BITS_SET); 3583 else 3584 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3585 3586 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3587 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3588 REG_WR(bp, reg_addr, asserted); 3589 3590 /* now set back the mask */ 3591 if (asserted & ATTN_NIG_FOR_FUNC) { 3592 REG_WR(bp, nig_int_mask_addr, nig_mask); 3593 bnx2x_release_phy_lock(bp); 3594 } 3595 } 3596 3597 static void bnx2x_fan_failure(struct bnx2x *bp) 3598 { 3599 int port = BP_PORT(bp); 3600 u32 ext_phy_config; 3601 /* mark the failure */ 3602 ext_phy_config = 3603 SHMEM_RD(bp, 3604 dev_info.port_hw_config[port].external_phy_config); 3605 3606 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 3607 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 3608 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 3609 ext_phy_config); 3610 3611 /* log the failure */ 3612 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3613 "Please contact OEM Support for assistance\n"); 3614 3615 /* 3616 * Scheudle device reset (unload) 3617 * This is due to some boards consuming sufficient power when driver is 3618 * up to overheat if fan fails. 3619 */ 3620 smp_mb__before_clear_bit(); 3621 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3622 smp_mb__after_clear_bit(); 3623 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3624 3625 } 3626 3627 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3628 { 3629 int port = BP_PORT(bp); 3630 int reg_offset; 3631 u32 val; 3632 3633 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3634 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3635 3636 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 3637 3638 val = REG_RD(bp, reg_offset); 3639 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 3640 REG_WR(bp, reg_offset, val); 3641 3642 BNX2X_ERR("SPIO5 hw attention\n"); 3643 3644 /* Fan failure attention */ 3645 bnx2x_hw_reset_phy(&bp->link_params); 3646 bnx2x_fan_failure(bp); 3647 } 3648 3649 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 3650 bnx2x_acquire_phy_lock(bp); 3651 bnx2x_handle_module_detect_int(&bp->link_params); 3652 bnx2x_release_phy_lock(bp); 3653 } 3654 3655 if (attn & HW_INTERRUT_ASSERT_SET_0) { 3656 3657 val = REG_RD(bp, reg_offset); 3658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 3659 REG_WR(bp, reg_offset, val); 3660 3661 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 3662 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 3663 bnx2x_panic(); 3664 } 3665 } 3666 3667 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3668 { 3669 u32 val; 3670 3671 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 3672 3673 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 3674 BNX2X_ERR("DB hw attention 0x%x\n", val); 3675 /* DORQ discard attention */ 3676 if (val & 0x2) 3677 BNX2X_ERR("FATAL error from DORQ\n"); 3678 } 3679 3680 if (attn & HW_INTERRUT_ASSERT_SET_1) { 3681 3682 int port = BP_PORT(bp); 3683 int reg_offset; 3684 3685 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 3686 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 3687 3688 val = REG_RD(bp, reg_offset); 3689 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 3690 REG_WR(bp, reg_offset, val); 3691 3692 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 3693 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 3694 bnx2x_panic(); 3695 } 3696 } 3697 3698 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3699 { 3700 u32 val; 3701 3702 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3703 3704 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 3705 BNX2X_ERR("CFC hw attention 0x%x\n", val); 3706 /* CFC error attention */ 3707 if (val & 0x2) 3708 BNX2X_ERR("FATAL error from CFC\n"); 3709 } 3710 3711 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3712 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 3713 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 3714 /* RQ_USDMDP_FIFO_OVERFLOW */ 3715 if (val & 0x18000) 3716 BNX2X_ERR("FATAL error from PXP\n"); 3717 3718 if (!CHIP_IS_E1x(bp)) { 3719 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 3720 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 3721 } 3722 } 3723 3724 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3725 3726 int port = BP_PORT(bp); 3727 int reg_offset; 3728 3729 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 3730 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 3731 3732 val = REG_RD(bp, reg_offset); 3733 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 3734 REG_WR(bp, reg_offset, val); 3735 3736 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 3737 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 3738 bnx2x_panic(); 3739 } 3740 } 3741 3742 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3743 { 3744 u32 val; 3745 3746 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 3747 3748 if (attn & BNX2X_PMF_LINK_ASSERT) { 3749 int func = BP_FUNC(bp); 3750 3751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3752 bnx2x_read_mf_cfg(bp); 3753 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3754 func_mf_config[BP_ABS_FUNC(bp)].config); 3755 val = SHMEM_RD(bp, 3756 func_mb[BP_FW_MB_IDX(bp)].drv_status); 3757 if (val & DRV_STATUS_DCC_EVENT_MASK) 3758 bnx2x_dcc_event(bp, 3759 (val & DRV_STATUS_DCC_EVENT_MASK)); 3760 3761 if (val & DRV_STATUS_SET_MF_BW) 3762 bnx2x_set_mf_bw(bp); 3763 3764 if (val & DRV_STATUS_DRV_INFO_REQ) 3765 bnx2x_handle_drv_info_req(bp); 3766 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3767 bnx2x_pmf_update(bp); 3768 3769 if (bp->port.pmf && 3770 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 3771 bp->dcbx_enabled > 0) 3772 /* start dcbx state machine */ 3773 bnx2x_dcbx_set_params(bp, 3774 BNX2X_DCBX_STATE_NEG_RECEIVED); 3775 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3776 bnx2x_handle_afex_cmd(bp, 3777 val & DRV_STATUS_AFEX_EVENT_MASK); 3778 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 3779 bnx2x_handle_eee_event(bp); 3780 if (bp->link_vars.periodic_flags & 3781 PERIODIC_FLAGS_LINK_EVENT) { 3782 /* sync with link */ 3783 bnx2x_acquire_phy_lock(bp); 3784 bp->link_vars.periodic_flags &= 3785 ~PERIODIC_FLAGS_LINK_EVENT; 3786 bnx2x_release_phy_lock(bp); 3787 if (IS_MF(bp)) 3788 bnx2x_link_sync_notify(bp); 3789 bnx2x_link_report(bp); 3790 } 3791 /* Always call it here: bnx2x_link_report() will 3792 * prevent the link indication duplication. 3793 */ 3794 bnx2x__link_status_update(bp); 3795 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3796 3797 BNX2X_ERR("MC assert!\n"); 3798 bnx2x_mc_assert(bp); 3799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 3800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 3801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 3802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 3803 bnx2x_panic(); 3804 3805 } else if (attn & BNX2X_MCP_ASSERT) { 3806 3807 BNX2X_ERR("MCP assert!\n"); 3808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 3809 bnx2x_fw_dump(bp); 3810 3811 } else 3812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 3813 } 3814 3815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3817 if (attn & BNX2X_GRC_TIMEOUT) { 3818 val = CHIP_IS_E1(bp) ? 0 : 3819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 3820 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3821 } 3822 if (attn & BNX2X_GRC_RSV) { 3823 val = CHIP_IS_E1(bp) ? 0 : 3824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 3825 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3826 } 3827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3828 } 3829 } 3830 3831 /* 3832 * Bits map: 3833 * 0-7 - Engine0 load counter. 3834 * 8-15 - Engine1 load counter. 3835 * 16 - Engine0 RESET_IN_PROGRESS bit. 3836 * 17 - Engine1 RESET_IN_PROGRESS bit. 3837 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 3838 * on the engine 3839 * 19 - Engine1 ONE_IS_LOADED. 3840 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 3841 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 3842 * just the one belonging to its engine). 3843 * 3844 */ 3845 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 3846 3847 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 3848 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 3849 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 3850 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 3851 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 3852 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 3853 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 3854 3855 /* 3856 * Set the GLOBAL_RESET bit. 3857 * 3858 * Should be run under rtnl lock 3859 */ 3860 void bnx2x_set_reset_global(struct bnx2x *bp) 3861 { 3862 u32 val; 3863 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3864 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3865 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 3866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3867 } 3868 3869 /* 3870 * Clear the GLOBAL_RESET bit. 3871 * 3872 * Should be run under rtnl lock 3873 */ 3874 static void bnx2x_clear_reset_global(struct bnx2x *bp) 3875 { 3876 u32 val; 3877 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3878 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3879 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 3880 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3881 } 3882 3883 /* 3884 * Checks the GLOBAL_RESET bit. 3885 * 3886 * should be run under rtnl lock 3887 */ 3888 static bool bnx2x_reset_is_global(struct bnx2x *bp) 3889 { 3890 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3891 3892 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 3893 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 3894 } 3895 3896 /* 3897 * Clear RESET_IN_PROGRESS bit for the current engine. 3898 * 3899 * Should be run under rtnl lock 3900 */ 3901 static void bnx2x_set_reset_done(struct bnx2x *bp) 3902 { 3903 u32 val; 3904 u32 bit = BP_PATH(bp) ? 3905 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3906 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3907 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3908 3909 /* Clear the bit */ 3910 val &= ~bit; 3911 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3912 3913 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3914 } 3915 3916 /* 3917 * Set RESET_IN_PROGRESS for the current engine. 3918 * 3919 * should be run under rtnl lock 3920 */ 3921 void bnx2x_set_reset_in_progress(struct bnx2x *bp) 3922 { 3923 u32 val; 3924 u32 bit = BP_PATH(bp) ? 3925 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3926 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3927 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3928 3929 /* Set the bit */ 3930 val |= bit; 3931 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3932 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3933 } 3934 3935 /* 3936 * Checks the RESET_IN_PROGRESS bit for the given engine. 3937 * should be run under rtnl lock 3938 */ 3939 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 3940 { 3941 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3942 u32 bit = engine ? 3943 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3944 3945 /* return false if bit is set */ 3946 return (val & bit) ? false : true; 3947 } 3948 3949 /* 3950 * set pf load for the current pf. 3951 * 3952 * should be run under rtnl lock 3953 */ 3954 void bnx2x_set_pf_load(struct bnx2x *bp) 3955 { 3956 u32 val1, val; 3957 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3958 BNX2X_PATH0_LOAD_CNT_MASK; 3959 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3960 BNX2X_PATH0_LOAD_CNT_SHIFT; 3961 3962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3963 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3964 3965 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 3966 3967 /* get the current counter value */ 3968 val1 = (val & mask) >> shift; 3969 3970 /* set bit of that PF */ 3971 val1 |= (1 << bp->pf_num); 3972 3973 /* clear the old value */ 3974 val &= ~mask; 3975 3976 /* set the new one */ 3977 val |= ((val1 << shift) & mask); 3978 3979 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3981 } 3982 3983 /** 3984 * bnx2x_clear_pf_load - clear pf load mark 3985 * 3986 * @bp: driver handle 3987 * 3988 * Should be run under rtnl lock. 3989 * Decrements the load counter for the current engine. Returns 3990 * whether other functions are still loaded 3991 */ 3992 bool bnx2x_clear_pf_load(struct bnx2x *bp) 3993 { 3994 u32 val1, val; 3995 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3996 BNX2X_PATH0_LOAD_CNT_MASK; 3997 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3998 BNX2X_PATH0_LOAD_CNT_SHIFT; 3999 4000 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4001 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4002 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4003 4004 /* get the current counter value */ 4005 val1 = (val & mask) >> shift; 4006 4007 /* clear bit of that PF */ 4008 val1 &= ~(1 << bp->pf_num); 4009 4010 /* clear the old value */ 4011 val &= ~mask; 4012 4013 /* set the new one */ 4014 val |= ((val1 << shift) & mask); 4015 4016 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4018 return val1 != 0; 4019 } 4020 4021 /* 4022 * Read the load status for the current engine. 4023 * 4024 * should be run under rtnl lock 4025 */ 4026 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4027 { 4028 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4029 BNX2X_PATH0_LOAD_CNT_MASK); 4030 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4031 BNX2X_PATH0_LOAD_CNT_SHIFT); 4032 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4033 4034 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4035 4036 val = (val & mask) >> shift; 4037 4038 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4039 engine, val); 4040 4041 return val != 0; 4042 } 4043 4044 static void _print_next_block(int idx, const char *blk) 4045 { 4046 pr_cont("%s%s", idx ? ", " : "", blk); 4047 } 4048 4049 static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4050 bool print) 4051 { 4052 int i = 0; 4053 u32 cur_bit = 0; 4054 for (i = 0; sig; i++) { 4055 cur_bit = ((u32)0x1 << i); 4056 if (sig & cur_bit) { 4057 switch (cur_bit) { 4058 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4059 if (print) 4060 _print_next_block(par_num++, "BRB"); 4061 break; 4062 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4063 if (print) 4064 _print_next_block(par_num++, "PARSER"); 4065 break; 4066 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4067 if (print) 4068 _print_next_block(par_num++, "TSDM"); 4069 break; 4070 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4071 if (print) 4072 _print_next_block(par_num++, 4073 "SEARCHER"); 4074 break; 4075 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4076 if (print) 4077 _print_next_block(par_num++, "TCM"); 4078 break; 4079 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4080 if (print) 4081 _print_next_block(par_num++, "TSEMI"); 4082 break; 4083 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4084 if (print) 4085 _print_next_block(par_num++, "XPB"); 4086 break; 4087 } 4088 4089 /* Clear the bit */ 4090 sig &= ~cur_bit; 4091 } 4092 } 4093 4094 return par_num; 4095 } 4096 4097 static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4098 bool *global, bool print) 4099 { 4100 int i = 0; 4101 u32 cur_bit = 0; 4102 for (i = 0; sig; i++) { 4103 cur_bit = ((u32)0x1 << i); 4104 if (sig & cur_bit) { 4105 switch (cur_bit) { 4106 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4107 if (print) 4108 _print_next_block(par_num++, "PBF"); 4109 break; 4110 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4111 if (print) 4112 _print_next_block(par_num++, "QM"); 4113 break; 4114 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4115 if (print) 4116 _print_next_block(par_num++, "TM"); 4117 break; 4118 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4119 if (print) 4120 _print_next_block(par_num++, "XSDM"); 4121 break; 4122 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4123 if (print) 4124 _print_next_block(par_num++, "XCM"); 4125 break; 4126 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4127 if (print) 4128 _print_next_block(par_num++, "XSEMI"); 4129 break; 4130 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4131 if (print) 4132 _print_next_block(par_num++, 4133 "DOORBELLQ"); 4134 break; 4135 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4136 if (print) 4137 _print_next_block(par_num++, "NIG"); 4138 break; 4139 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4140 if (print) 4141 _print_next_block(par_num++, 4142 "VAUX PCI CORE"); 4143 *global = true; 4144 break; 4145 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4146 if (print) 4147 _print_next_block(par_num++, "DEBUG"); 4148 break; 4149 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4150 if (print) 4151 _print_next_block(par_num++, "USDM"); 4152 break; 4153 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4154 if (print) 4155 _print_next_block(par_num++, "UCM"); 4156 break; 4157 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4158 if (print) 4159 _print_next_block(par_num++, "USEMI"); 4160 break; 4161 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4162 if (print) 4163 _print_next_block(par_num++, "UPB"); 4164 break; 4165 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4166 if (print) 4167 _print_next_block(par_num++, "CSDM"); 4168 break; 4169 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4170 if (print) 4171 _print_next_block(par_num++, "CCM"); 4172 break; 4173 } 4174 4175 /* Clear the bit */ 4176 sig &= ~cur_bit; 4177 } 4178 } 4179 4180 return par_num; 4181 } 4182 4183 static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4184 bool print) 4185 { 4186 int i = 0; 4187 u32 cur_bit = 0; 4188 for (i = 0; sig; i++) { 4189 cur_bit = ((u32)0x1 << i); 4190 if (sig & cur_bit) { 4191 switch (cur_bit) { 4192 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4193 if (print) 4194 _print_next_block(par_num++, "CSEMI"); 4195 break; 4196 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4197 if (print) 4198 _print_next_block(par_num++, "PXP"); 4199 break; 4200 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4201 if (print) 4202 _print_next_block(par_num++, 4203 "PXPPCICLOCKCLIENT"); 4204 break; 4205 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4206 if (print) 4207 _print_next_block(par_num++, "CFC"); 4208 break; 4209 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4210 if (print) 4211 _print_next_block(par_num++, "CDU"); 4212 break; 4213 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4214 if (print) 4215 _print_next_block(par_num++, "DMAE"); 4216 break; 4217 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4218 if (print) 4219 _print_next_block(par_num++, "IGU"); 4220 break; 4221 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4222 if (print) 4223 _print_next_block(par_num++, "MISC"); 4224 break; 4225 } 4226 4227 /* Clear the bit */ 4228 sig &= ~cur_bit; 4229 } 4230 } 4231 4232 return par_num; 4233 } 4234 4235 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4236 bool *global, bool print) 4237 { 4238 int i = 0; 4239 u32 cur_bit = 0; 4240 for (i = 0; sig; i++) { 4241 cur_bit = ((u32)0x1 << i); 4242 if (sig & cur_bit) { 4243 switch (cur_bit) { 4244 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4245 if (print) 4246 _print_next_block(par_num++, "MCP ROM"); 4247 *global = true; 4248 break; 4249 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4250 if (print) 4251 _print_next_block(par_num++, 4252 "MCP UMP RX"); 4253 *global = true; 4254 break; 4255 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4256 if (print) 4257 _print_next_block(par_num++, 4258 "MCP UMP TX"); 4259 *global = true; 4260 break; 4261 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4262 if (print) 4263 _print_next_block(par_num++, 4264 "MCP SCPAD"); 4265 *global = true; 4266 break; 4267 } 4268 4269 /* Clear the bit */ 4270 sig &= ~cur_bit; 4271 } 4272 } 4273 4274 return par_num; 4275 } 4276 4277 static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4278 bool print) 4279 { 4280 int i = 0; 4281 u32 cur_bit = 0; 4282 for (i = 0; sig; i++) { 4283 cur_bit = ((u32)0x1 << i); 4284 if (sig & cur_bit) { 4285 switch (cur_bit) { 4286 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4287 if (print) 4288 _print_next_block(par_num++, "PGLUE_B"); 4289 break; 4290 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4291 if (print) 4292 _print_next_block(par_num++, "ATC"); 4293 break; 4294 } 4295 4296 /* Clear the bit */ 4297 sig &= ~cur_bit; 4298 } 4299 } 4300 4301 return par_num; 4302 } 4303 4304 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4305 u32 *sig) 4306 { 4307 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4308 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4309 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4310 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4311 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4312 int par_num = 0; 4313 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4314 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4315 sig[0] & HW_PRTY_ASSERT_SET_0, 4316 sig[1] & HW_PRTY_ASSERT_SET_1, 4317 sig[2] & HW_PRTY_ASSERT_SET_2, 4318 sig[3] & HW_PRTY_ASSERT_SET_3, 4319 sig[4] & HW_PRTY_ASSERT_SET_4); 4320 if (print) 4321 netdev_err(bp->dev, 4322 "Parity errors detected in blocks: "); 4323 par_num = bnx2x_check_blocks_with_parity0( 4324 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4325 par_num = bnx2x_check_blocks_with_parity1( 4326 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4327 par_num = bnx2x_check_blocks_with_parity2( 4328 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4329 par_num = bnx2x_check_blocks_with_parity3( 4330 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4331 par_num = bnx2x_check_blocks_with_parity4( 4332 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4333 4334 if (print) 4335 pr_cont("\n"); 4336 4337 return true; 4338 } else 4339 return false; 4340 } 4341 4342 /** 4343 * bnx2x_chk_parity_attn - checks for parity attentions. 4344 * 4345 * @bp: driver handle 4346 * @global: true if there was a global attention 4347 * @print: show parity attention in syslog 4348 */ 4349 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4350 { 4351 struct attn_route attn = { {0} }; 4352 int port = BP_PORT(bp); 4353 4354 attn.sig[0] = REG_RD(bp, 4355 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4356 port*4); 4357 attn.sig[1] = REG_RD(bp, 4358 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4359 port*4); 4360 attn.sig[2] = REG_RD(bp, 4361 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4362 port*4); 4363 attn.sig[3] = REG_RD(bp, 4364 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4365 port*4); 4366 4367 if (!CHIP_IS_E1x(bp)) 4368 attn.sig[4] = REG_RD(bp, 4369 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4370 port*4); 4371 4372 return bnx2x_parity_attn(bp, global, print, attn.sig); 4373 } 4374 4375 4376 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4377 { 4378 u32 val; 4379 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4380 4381 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4382 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4383 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4384 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4385 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4387 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4388 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4389 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4390 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4391 if (val & 4392 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4393 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4394 if (val & 4395 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4396 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4397 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4398 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4399 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4400 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4401 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4402 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4403 } 4404 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4405 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4406 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4407 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4408 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4409 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4410 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4411 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4412 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4413 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4414 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4415 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4416 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4417 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4418 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4419 } 4420 4421 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4422 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4423 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4424 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4425 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4426 } 4427 4428 } 4429 4430 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4431 { 4432 struct attn_route attn, *group_mask; 4433 int port = BP_PORT(bp); 4434 int index; 4435 u32 reg_addr; 4436 u32 val; 4437 u32 aeu_mask; 4438 bool global = false; 4439 4440 /* need to take HW lock because MCP or other port might also 4441 try to handle this event */ 4442 bnx2x_acquire_alr(bp); 4443 4444 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4445 #ifndef BNX2X_STOP_ON_ERROR 4446 bp->recovery_state = BNX2X_RECOVERY_INIT; 4447 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4448 /* Disable HW interrupts */ 4449 bnx2x_int_disable(bp); 4450 /* In case of parity errors don't handle attentions so that 4451 * other function would "see" parity errors. 4452 */ 4453 #else 4454 bnx2x_panic(); 4455 #endif 4456 bnx2x_release_alr(bp); 4457 return; 4458 } 4459 4460 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4461 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4462 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4463 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4464 if (!CHIP_IS_E1x(bp)) 4465 attn.sig[4] = 4466 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 4467 else 4468 attn.sig[4] = 0; 4469 4470 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 4471 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 4472 4473 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4474 if (deasserted & (1 << index)) { 4475 group_mask = &bp->attn_group[index]; 4476 4477 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 4478 index, 4479 group_mask->sig[0], group_mask->sig[1], 4480 group_mask->sig[2], group_mask->sig[3], 4481 group_mask->sig[4]); 4482 4483 bnx2x_attn_int_deasserted4(bp, 4484 attn.sig[4] & group_mask->sig[4]); 4485 bnx2x_attn_int_deasserted3(bp, 4486 attn.sig[3] & group_mask->sig[3]); 4487 bnx2x_attn_int_deasserted1(bp, 4488 attn.sig[1] & group_mask->sig[1]); 4489 bnx2x_attn_int_deasserted2(bp, 4490 attn.sig[2] & group_mask->sig[2]); 4491 bnx2x_attn_int_deasserted0(bp, 4492 attn.sig[0] & group_mask->sig[0]); 4493 } 4494 } 4495 4496 bnx2x_release_alr(bp); 4497 4498 if (bp->common.int_block == INT_BLOCK_HC) 4499 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4500 COMMAND_REG_ATTN_BITS_CLR); 4501 else 4502 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 4503 4504 val = ~deasserted; 4505 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 4506 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4507 REG_WR(bp, reg_addr, val); 4508 4509 if (~bp->attn_state & deasserted) 4510 BNX2X_ERR("IGU ERROR\n"); 4511 4512 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4513 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4514 4515 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4516 aeu_mask = REG_RD(bp, reg_addr); 4517 4518 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 4519 aeu_mask, deasserted); 4520 aeu_mask |= (deasserted & 0x3ff); 4521 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4522 4523 REG_WR(bp, reg_addr, aeu_mask); 4524 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4525 4526 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4527 bp->attn_state &= ~deasserted; 4528 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4529 } 4530 4531 static void bnx2x_attn_int(struct bnx2x *bp) 4532 { 4533 /* read local copy of bits */ 4534 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 4535 attn_bits); 4536 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 4537 attn_bits_ack); 4538 u32 attn_state = bp->attn_state; 4539 4540 /* look for changed bits */ 4541 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 4542 u32 deasserted = ~attn_bits & attn_ack & attn_state; 4543 4544 DP(NETIF_MSG_HW, 4545 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 4546 attn_bits, attn_ack, asserted, deasserted); 4547 4548 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 4549 BNX2X_ERR("BAD attention state\n"); 4550 4551 /* handle bits that were raised */ 4552 if (asserted) 4553 bnx2x_attn_int_asserted(bp, asserted); 4554 4555 if (deasserted) 4556 bnx2x_attn_int_deasserted(bp, deasserted); 4557 } 4558 4559 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 4560 u16 index, u8 op, u8 update) 4561 { 4562 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 4563 4564 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 4565 igu_addr); 4566 } 4567 4568 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4569 { 4570 /* No memory barriers */ 4571 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4572 mmiowb(); /* keep prod updates ordered */ 4573 } 4574 4575 #ifdef BCM_CNIC 4576 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4577 union event_ring_elem *elem) 4578 { 4579 u8 err = elem->message.error; 4580 4581 if (!bp->cnic_eth_dev.starting_cid || 4582 (cid < bp->cnic_eth_dev.starting_cid && 4583 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 4584 return 1; 4585 4586 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 4587 4588 if (unlikely(err)) { 4589 4590 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 4591 cid); 4592 bnx2x_panic_dump(bp); 4593 } 4594 bnx2x_cnic_cfc_comp(bp, cid, err); 4595 return 0; 4596 } 4597 #endif 4598 4599 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4600 { 4601 struct bnx2x_mcast_ramrod_params rparam; 4602 int rc; 4603 4604 memset(&rparam, 0, sizeof(rparam)); 4605 4606 rparam.mcast_obj = &bp->mcast_obj; 4607 4608 netif_addr_lock_bh(bp->dev); 4609 4610 /* Clear pending state for the last command */ 4611 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 4612 4613 /* If there are pending mcast commands - send them */ 4614 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 4615 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 4616 if (rc < 0) 4617 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 4618 rc); 4619 } 4620 4621 netif_addr_unlock_bh(bp->dev); 4622 } 4623 4624 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4625 union event_ring_elem *elem) 4626 { 4627 unsigned long ramrod_flags = 0; 4628 int rc = 0; 4629 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 4630 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 4631 4632 /* Always push next commands out, don't wait here */ 4633 __set_bit(RAMROD_CONT, &ramrod_flags); 4634 4635 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4636 case BNX2X_FILTER_MAC_PENDING: 4637 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4638 #ifdef BCM_CNIC 4639 if (cid == BNX2X_ISCSI_ETH_CID(bp)) 4640 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4641 else 4642 #endif 4643 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 4644 4645 break; 4646 case BNX2X_FILTER_MCAST_PENDING: 4647 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 4648 /* This is only relevant for 57710 where multicast MACs are 4649 * configured as unicast MACs using the same ramrod. 4650 */ 4651 bnx2x_handle_mcast_eqe(bp); 4652 return; 4653 default: 4654 BNX2X_ERR("Unsupported classification command: %d\n", 4655 elem->message.data.eth_event.echo); 4656 return; 4657 } 4658 4659 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 4660 4661 if (rc < 0) 4662 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 4663 else if (rc > 0) 4664 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 4665 4666 } 4667 4668 #ifdef BCM_CNIC 4669 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4670 #endif 4671 4672 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4673 { 4674 netif_addr_lock_bh(bp->dev); 4675 4676 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 4677 4678 /* Send rx_mode command again if was requested */ 4679 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4680 bnx2x_set_storm_rx_mode(bp); 4681 #ifdef BCM_CNIC 4682 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4683 &bp->sp_state)) 4684 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4685 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4686 &bp->sp_state)) 4687 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4688 #endif 4689 4690 netif_addr_unlock_bh(bp->dev); 4691 } 4692 4693 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 4694 union event_ring_elem *elem) 4695 { 4696 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 4697 DP(BNX2X_MSG_SP, 4698 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 4699 elem->message.data.vif_list_event.func_bit_map); 4700 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 4701 elem->message.data.vif_list_event.func_bit_map); 4702 } else if (elem->message.data.vif_list_event.echo == 4703 VIF_LIST_RULE_SET) { 4704 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 4705 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 4706 } 4707 } 4708 4709 /* called with rtnl_lock */ 4710 static void bnx2x_after_function_update(struct bnx2x *bp) 4711 { 4712 int q, rc; 4713 struct bnx2x_fastpath *fp; 4714 struct bnx2x_queue_state_params queue_params = {NULL}; 4715 struct bnx2x_queue_update_params *q_update_params = 4716 &queue_params.params.update; 4717 4718 /* Send Q update command with afex vlan removal values for all Qs */ 4719 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 4720 4721 /* set silent vlan removal values according to vlan mode */ 4722 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4723 &q_update_params->update_flags); 4724 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 4725 &q_update_params->update_flags); 4726 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4727 4728 /* in access mode mark mask and value are 0 to strip all vlans */ 4729 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 4730 q_update_params->silent_removal_value = 0; 4731 q_update_params->silent_removal_mask = 0; 4732 } else { 4733 q_update_params->silent_removal_value = 4734 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 4735 q_update_params->silent_removal_mask = VLAN_VID_MASK; 4736 } 4737 4738 for_each_eth_queue(bp, q) { 4739 /* Set the appropriate Queue object */ 4740 fp = &bp->fp[q]; 4741 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4742 4743 /* send the ramrod */ 4744 rc = bnx2x_queue_state_change(bp, &queue_params); 4745 if (rc < 0) 4746 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4747 q); 4748 } 4749 4750 #ifdef BCM_CNIC 4751 if (!NO_FCOE(bp)) { 4752 fp = &bp->fp[FCOE_IDX(bp)]; 4753 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4754 4755 /* clear pending completion bit */ 4756 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4757 4758 /* mark latest Q bit */ 4759 smp_mb__before_clear_bit(); 4760 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 4761 smp_mb__after_clear_bit(); 4762 4763 /* send Q update ramrod for FCoE Q */ 4764 rc = bnx2x_queue_state_change(bp, &queue_params); 4765 if (rc < 0) 4766 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 4767 q); 4768 } else { 4769 /* If no FCoE ring - ACK MCP now */ 4770 bnx2x_link_report(bp); 4771 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4772 } 4773 #else 4774 /* If no FCoE ring - ACK MCP now */ 4775 bnx2x_link_report(bp); 4776 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4777 #endif /* BCM_CNIC */ 4778 } 4779 4780 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4781 struct bnx2x *bp, u32 cid) 4782 { 4783 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4784 #ifdef BCM_CNIC 4785 if (cid == BNX2X_FCOE_ETH_CID(bp)) 4786 return &bnx2x_fcoe_sp_obj(bp, q_obj); 4787 else 4788 #endif 4789 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 4790 } 4791 4792 static void bnx2x_eq_int(struct bnx2x *bp) 4793 { 4794 u16 hw_cons, sw_cons, sw_prod; 4795 union event_ring_elem *elem; 4796 u32 cid; 4797 u8 opcode; 4798 int spqe_cnt = 0; 4799 struct bnx2x_queue_sp_obj *q_obj; 4800 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 4801 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 4802 4803 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 4804 4805 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 4806 * when we get the the next-page we nned to adjust so the loop 4807 * condition below will be met. The next element is the size of a 4808 * regular element and hence incrementing by 1 4809 */ 4810 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 4811 hw_cons++; 4812 4813 /* This function may never run in parallel with itself for a 4814 * specific bp, thus there is no need in "paired" read memory 4815 * barrier here. 4816 */ 4817 sw_cons = bp->eq_cons; 4818 sw_prod = bp->eq_prod; 4819 4820 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 4821 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 4822 4823 for (; sw_cons != hw_cons; 4824 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 4825 4826 4827 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 4828 4829 cid = SW_CID(elem->message.data.cfc_del_event.cid); 4830 opcode = elem->message.opcode; 4831 4832 4833 /* handle eq element */ 4834 switch (opcode) { 4835 case EVENT_RING_OPCODE_STAT_QUERY: 4836 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 4837 "got statistics comp event %d\n", 4838 bp->stats_comp++); 4839 /* nothing to do with stats comp */ 4840 goto next_spqe; 4841 4842 case EVENT_RING_OPCODE_CFC_DEL: 4843 /* handle according to cid range */ 4844 /* 4845 * we may want to verify here that the bp state is 4846 * HALTING 4847 */ 4848 DP(BNX2X_MSG_SP, 4849 "got delete ramrod for MULTI[%d]\n", cid); 4850 #ifdef BCM_CNIC 4851 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4852 goto next_spqe; 4853 #endif 4854 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4855 4856 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4857 break; 4858 4859 4860 4861 goto next_spqe; 4862 4863 case EVENT_RING_OPCODE_STOP_TRAFFIC: 4864 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 4865 if (f_obj->complete_cmd(bp, f_obj, 4866 BNX2X_F_CMD_TX_STOP)) 4867 break; 4868 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 4869 goto next_spqe; 4870 4871 case EVENT_RING_OPCODE_START_TRAFFIC: 4872 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 4873 if (f_obj->complete_cmd(bp, f_obj, 4874 BNX2X_F_CMD_TX_START)) 4875 break; 4876 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4877 goto next_spqe; 4878 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4879 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4880 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4881 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4882 4883 /* We will perform the Queues update from sp_rtnl task 4884 * as all Queue SP operations should run under 4885 * rtnl_lock. 4886 */ 4887 smp_mb__before_clear_bit(); 4888 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4889 &bp->sp_rtnl_state); 4890 smp_mb__after_clear_bit(); 4891 4892 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4893 goto next_spqe; 4894 4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4896 f_obj->complete_cmd(bp, f_obj, 4897 BNX2X_F_CMD_AFEX_VIFLISTS); 4898 bnx2x_after_afex_vif_lists(bp, elem); 4899 goto next_spqe; 4900 case EVENT_RING_OPCODE_FUNCTION_START: 4901 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4902 "got FUNC_START ramrod\n"); 4903 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 4904 break; 4905 4906 goto next_spqe; 4907 4908 case EVENT_RING_OPCODE_FUNCTION_STOP: 4909 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4910 "got FUNC_STOP ramrod\n"); 4911 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 4912 break; 4913 4914 goto next_spqe; 4915 } 4916 4917 switch (opcode | bp->state) { 4918 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4919 BNX2X_STATE_OPEN): 4920 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 4921 BNX2X_STATE_OPENING_WAIT4_PORT): 4922 cid = elem->message.data.eth_event.echo & 4923 BNX2X_SWCID_MASK; 4924 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 4925 cid); 4926 rss_raw->clear_pending(rss_raw); 4927 break; 4928 4929 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 4930 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 4931 case (EVENT_RING_OPCODE_SET_MAC | 4932 BNX2X_STATE_CLOSING_WAIT4_HALT): 4933 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4934 BNX2X_STATE_OPEN): 4935 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4936 BNX2X_STATE_DIAG): 4937 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 4938 BNX2X_STATE_CLOSING_WAIT4_HALT): 4939 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 4940 bnx2x_handle_classification_eqe(bp, elem); 4941 break; 4942 4943 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4944 BNX2X_STATE_OPEN): 4945 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4946 BNX2X_STATE_DIAG): 4947 case (EVENT_RING_OPCODE_MULTICAST_RULES | 4948 BNX2X_STATE_CLOSING_WAIT4_HALT): 4949 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 4950 bnx2x_handle_mcast_eqe(bp); 4951 break; 4952 4953 case (EVENT_RING_OPCODE_FILTERS_RULES | 4954 BNX2X_STATE_OPEN): 4955 case (EVENT_RING_OPCODE_FILTERS_RULES | 4956 BNX2X_STATE_DIAG): 4957 case (EVENT_RING_OPCODE_FILTERS_RULES | 4958 BNX2X_STATE_CLOSING_WAIT4_HALT): 4959 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 4960 bnx2x_handle_rx_mode_eqe(bp); 4961 break; 4962 default: 4963 /* unknown event log error and continue */ 4964 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 4965 elem->message.opcode, bp->state); 4966 } 4967 next_spqe: 4968 spqe_cnt++; 4969 } /* for */ 4970 4971 smp_mb__before_atomic_inc(); 4972 atomic_add(spqe_cnt, &bp->eq_spq_left); 4973 4974 bp->eq_cons = sw_cons; 4975 bp->eq_prod = sw_prod; 4976 /* Make sure that above mem writes were issued towards the memory */ 4977 smp_wmb(); 4978 4979 /* update producer */ 4980 bnx2x_update_eq_prod(bp, bp->eq_prod); 4981 } 4982 4983 static void bnx2x_sp_task(struct work_struct *work) 4984 { 4985 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 4986 u16 status; 4987 4988 status = bnx2x_update_dsb_idx(bp); 4989 /* if (status == 0) */ 4990 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 4991 4992 DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); 4993 4994 /* HW attentions */ 4995 if (status & BNX2X_DEF_SB_ATT_IDX) { 4996 bnx2x_attn_int(bp); 4997 status &= ~BNX2X_DEF_SB_ATT_IDX; 4998 } 4999 5000 /* SP events: STAT_QUERY and others */ 5001 if (status & BNX2X_DEF_SB_IDX) { 5002 #ifdef BCM_CNIC 5003 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5004 5005 if ((!NO_FCOE(bp)) && 5006 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5007 /* 5008 * Prevent local bottom-halves from running as 5009 * we are going to change the local NAPI list. 5010 */ 5011 local_bh_disable(); 5012 napi_schedule(&bnx2x_fcoe(bp, napi)); 5013 local_bh_enable(); 5014 } 5015 #endif 5016 /* Handle EQ completions */ 5017 bnx2x_eq_int(bp); 5018 5019 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5020 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5021 5022 status &= ~BNX2X_DEF_SB_IDX; 5023 } 5024 5025 if (unlikely(status)) 5026 DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", 5027 status); 5028 5029 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5030 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5031 5032 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5033 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5034 &bp->sp_state)) { 5035 bnx2x_link_report(bp); 5036 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5037 } 5038 } 5039 5040 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5041 { 5042 struct net_device *dev = dev_instance; 5043 struct bnx2x *bp = netdev_priv(dev); 5044 5045 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5046 IGU_INT_DISABLE, 0); 5047 5048 #ifdef BNX2X_STOP_ON_ERROR 5049 if (unlikely(bp->panic)) 5050 return IRQ_HANDLED; 5051 #endif 5052 5053 #ifdef BCM_CNIC 5054 { 5055 struct cnic_ops *c_ops; 5056 5057 rcu_read_lock(); 5058 c_ops = rcu_dereference(bp->cnic_ops); 5059 if (c_ops) 5060 c_ops->cnic_handler(bp->cnic_data, NULL); 5061 rcu_read_unlock(); 5062 } 5063 #endif 5064 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5065 5066 return IRQ_HANDLED; 5067 } 5068 5069 /* end of slow path */ 5070 5071 5072 void bnx2x_drv_pulse(struct bnx2x *bp) 5073 { 5074 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5075 bp->fw_drv_pulse_wr_seq); 5076 } 5077 5078 5079 static void bnx2x_timer(unsigned long data) 5080 { 5081 struct bnx2x *bp = (struct bnx2x *) data; 5082 5083 if (!netif_running(bp->dev)) 5084 return; 5085 5086 if (!BP_NOMCP(bp)) { 5087 int mb_idx = BP_FW_MB_IDX(bp); 5088 u32 drv_pulse; 5089 u32 mcp_pulse; 5090 5091 ++bp->fw_drv_pulse_wr_seq; 5092 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5093 /* TBD - add SYSTEM_TIME */ 5094 drv_pulse = bp->fw_drv_pulse_wr_seq; 5095 bnx2x_drv_pulse(bp); 5096 5097 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5098 MCP_PULSE_SEQ_MASK); 5099 /* The delta between driver pulse and mcp response 5100 * should be 1 (before mcp response) or 0 (after mcp response) 5101 */ 5102 if ((drv_pulse != mcp_pulse) && 5103 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5104 /* someone lost a heartbeat... */ 5105 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5106 drv_pulse, mcp_pulse); 5107 } 5108 } 5109 5110 if (bp->state == BNX2X_STATE_OPEN) 5111 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5112 5113 mod_timer(&bp->timer, jiffies + bp->current_interval); 5114 } 5115 5116 /* end of Statistics */ 5117 5118 /* nic init */ 5119 5120 /* 5121 * nic init service functions 5122 */ 5123 5124 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5125 { 5126 u32 i; 5127 if (!(len%4) && !(addr%4)) 5128 for (i = 0; i < len; i += 4) 5129 REG_WR(bp, addr + i, fill); 5130 else 5131 for (i = 0; i < len; i++) 5132 REG_WR8(bp, addr + i, fill); 5133 5134 } 5135 5136 /* helper: writes FP SP data to FW - data_size in dwords */ 5137 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5138 int fw_sb_id, 5139 u32 *sb_data_p, 5140 u32 data_size) 5141 { 5142 int index; 5143 for (index = 0; index < data_size; index++) 5144 REG_WR(bp, BAR_CSTRORM_INTMEM + 5145 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5146 sizeof(u32)*index, 5147 *(sb_data_p + index)); 5148 } 5149 5150 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5151 { 5152 u32 *sb_data_p; 5153 u32 data_size = 0; 5154 struct hc_status_block_data_e2 sb_data_e2; 5155 struct hc_status_block_data_e1x sb_data_e1x; 5156 5157 /* disable the function first */ 5158 if (!CHIP_IS_E1x(bp)) { 5159 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5160 sb_data_e2.common.state = SB_DISABLED; 5161 sb_data_e2.common.p_func.vf_valid = false; 5162 sb_data_p = (u32 *)&sb_data_e2; 5163 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5164 } else { 5165 memset(&sb_data_e1x, 0, 5166 sizeof(struct hc_status_block_data_e1x)); 5167 sb_data_e1x.common.state = SB_DISABLED; 5168 sb_data_e1x.common.p_func.vf_valid = false; 5169 sb_data_p = (u32 *)&sb_data_e1x; 5170 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5171 } 5172 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5173 5174 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5175 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5176 CSTORM_STATUS_BLOCK_SIZE); 5177 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5178 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5179 CSTORM_SYNC_BLOCK_SIZE); 5180 } 5181 5182 /* helper: writes SP SB data to FW */ 5183 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5184 struct hc_sp_status_block_data *sp_sb_data) 5185 { 5186 int func = BP_FUNC(bp); 5187 int i; 5188 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5189 REG_WR(bp, BAR_CSTRORM_INTMEM + 5190 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5191 i*sizeof(u32), 5192 *((u32 *)sp_sb_data + i)); 5193 } 5194 5195 static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5196 { 5197 int func = BP_FUNC(bp); 5198 struct hc_sp_status_block_data sp_sb_data; 5199 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5200 5201 sp_sb_data.state = SB_DISABLED; 5202 sp_sb_data.p_func.vf_valid = false; 5203 5204 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5205 5206 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5207 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5208 CSTORM_SP_STATUS_BLOCK_SIZE); 5209 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5210 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5211 CSTORM_SP_SYNC_BLOCK_SIZE); 5212 5213 } 5214 5215 5216 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5217 int igu_sb_id, int igu_seg_id) 5218 { 5219 hc_sm->igu_sb_id = igu_sb_id; 5220 hc_sm->igu_seg_id = igu_seg_id; 5221 hc_sm->timer_value = 0xFF; 5222 hc_sm->time_to_expire = 0xFFFFFFFF; 5223 } 5224 5225 5226 /* allocates state machine ids. */ 5227 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5228 { 5229 /* zero out state machine indices */ 5230 /* rx indices */ 5231 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5232 5233 /* tx indices */ 5234 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5235 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5236 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5237 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5238 5239 /* map indices */ 5240 /* rx indices */ 5241 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5242 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5243 5244 /* tx indices */ 5245 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5246 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5247 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5248 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5249 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5250 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5251 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5252 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5253 } 5254 5255 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5256 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5257 { 5258 int igu_seg_id; 5259 5260 struct hc_status_block_data_e2 sb_data_e2; 5261 struct hc_status_block_data_e1x sb_data_e1x; 5262 struct hc_status_block_sm *hc_sm_p; 5263 int data_size; 5264 u32 *sb_data_p; 5265 5266 if (CHIP_INT_MODE_IS_BC(bp)) 5267 igu_seg_id = HC_SEG_ACCESS_NORM; 5268 else 5269 igu_seg_id = IGU_SEG_ACCESS_NORM; 5270 5271 bnx2x_zero_fp_sb(bp, fw_sb_id); 5272 5273 if (!CHIP_IS_E1x(bp)) { 5274 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5275 sb_data_e2.common.state = SB_ENABLED; 5276 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5277 sb_data_e2.common.p_func.vf_id = vfid; 5278 sb_data_e2.common.p_func.vf_valid = vf_valid; 5279 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5280 sb_data_e2.common.same_igu_sb_1b = true; 5281 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5282 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5283 hc_sm_p = sb_data_e2.common.state_machine; 5284 sb_data_p = (u32 *)&sb_data_e2; 5285 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5286 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5287 } else { 5288 memset(&sb_data_e1x, 0, 5289 sizeof(struct hc_status_block_data_e1x)); 5290 sb_data_e1x.common.state = SB_ENABLED; 5291 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5292 sb_data_e1x.common.p_func.vf_id = 0xff; 5293 sb_data_e1x.common.p_func.vf_valid = false; 5294 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5295 sb_data_e1x.common.same_igu_sb_1b = true; 5296 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5297 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5298 hc_sm_p = sb_data_e1x.common.state_machine; 5299 sb_data_p = (u32 *)&sb_data_e1x; 5300 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5301 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5302 } 5303 5304 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5305 igu_sb_id, igu_seg_id); 5306 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5307 igu_sb_id, igu_seg_id); 5308 5309 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5310 5311 /* write indecies to HW */ 5312 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5313 } 5314 5315 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5316 u16 tx_usec, u16 rx_usec) 5317 { 5318 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5319 false, rx_usec); 5320 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5321 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5322 tx_usec); 5323 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5324 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5325 tx_usec); 5326 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5327 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5328 tx_usec); 5329 } 5330 5331 static void bnx2x_init_def_sb(struct bnx2x *bp) 5332 { 5333 struct host_sp_status_block *def_sb = bp->def_status_blk; 5334 dma_addr_t mapping = bp->def_status_blk_mapping; 5335 int igu_sp_sb_index; 5336 int igu_seg_id; 5337 int port = BP_PORT(bp); 5338 int func = BP_FUNC(bp); 5339 int reg_offset, reg_offset_en5; 5340 u64 section; 5341 int index; 5342 struct hc_sp_status_block_data sp_sb_data; 5343 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5344 5345 if (CHIP_INT_MODE_IS_BC(bp)) { 5346 igu_sp_sb_index = DEF_SB_IGU_ID; 5347 igu_seg_id = HC_SEG_ACCESS_DEF; 5348 } else { 5349 igu_sp_sb_index = bp->igu_dsb_id; 5350 igu_seg_id = IGU_SEG_ACCESS_DEF; 5351 } 5352 5353 /* ATTN */ 5354 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5355 atten_status_block); 5356 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5357 5358 bp->attn_state = 0; 5359 5360 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5361 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5362 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5363 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5364 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5365 int sindex; 5366 /* take care of sig[0]..sig[4] */ 5367 for (sindex = 0; sindex < 4; sindex++) 5368 bp->attn_group[index].sig[sindex] = 5369 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5370 5371 if (!CHIP_IS_E1x(bp)) 5372 /* 5373 * enable5 is separate from the rest of the registers, 5374 * and therefore the address skip is 4 5375 * and not 16 between the different groups 5376 */ 5377 bp->attn_group[index].sig[4] = REG_RD(bp, 5378 reg_offset_en5 + 0x4*index); 5379 else 5380 bp->attn_group[index].sig[4] = 0; 5381 } 5382 5383 if (bp->common.int_block == INT_BLOCK_HC) { 5384 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5385 HC_REG_ATTN_MSG0_ADDR_L); 5386 5387 REG_WR(bp, reg_offset, U64_LO(section)); 5388 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5389 } else if (!CHIP_IS_E1x(bp)) { 5390 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5391 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5392 } 5393 5394 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5395 sp_sb); 5396 5397 bnx2x_zero_sp_sb(bp); 5398 5399 sp_sb_data.state = SB_ENABLED; 5400 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5401 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5402 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5403 sp_sb_data.igu_seg_id = igu_seg_id; 5404 sp_sb_data.p_func.pf_id = func; 5405 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5406 sp_sb_data.p_func.vf_id = 0xff; 5407 5408 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5409 5410 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5411 } 5412 5413 void bnx2x_update_coalesce(struct bnx2x *bp) 5414 { 5415 int i; 5416 5417 for_each_eth_queue(bp, i) 5418 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5419 bp->tx_ticks, bp->rx_ticks); 5420 } 5421 5422 static void bnx2x_init_sp_ring(struct bnx2x *bp) 5423 { 5424 spin_lock_init(&bp->spq_lock); 5425 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5426 5427 bp->spq_prod_idx = 0; 5428 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5429 bp->spq_prod_bd = bp->spq; 5430 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5431 } 5432 5433 static void bnx2x_init_eq_ring(struct bnx2x *bp) 5434 { 5435 int i; 5436 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5437 union event_ring_elem *elem = 5438 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5439 5440 elem->next_page.addr.hi = 5441 cpu_to_le32(U64_HI(bp->eq_mapping + 5442 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5443 elem->next_page.addr.lo = 5444 cpu_to_le32(U64_LO(bp->eq_mapping + 5445 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5446 } 5447 bp->eq_cons = 0; 5448 bp->eq_prod = NUM_EQ_DESC; 5449 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5450 /* we want a warning message before it gets rought... */ 5451 atomic_set(&bp->eq_spq_left, 5452 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5453 } 5454 5455 5456 /* called with netif_addr_lock_bh() */ 5457 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 5458 unsigned long rx_mode_flags, 5459 unsigned long rx_accept_flags, 5460 unsigned long tx_accept_flags, 5461 unsigned long ramrod_flags) 5462 { 5463 struct bnx2x_rx_mode_ramrod_params ramrod_param; 5464 int rc; 5465 5466 memset(&ramrod_param, 0, sizeof(ramrod_param)); 5467 5468 /* Prepare ramrod parameters */ 5469 ramrod_param.cid = 0; 5470 ramrod_param.cl_id = cl_id; 5471 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 5472 ramrod_param.func_id = BP_FUNC(bp); 5473 5474 ramrod_param.pstate = &bp->sp_state; 5475 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 5476 5477 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 5478 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 5479 5480 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5481 5482 ramrod_param.ramrod_flags = ramrod_flags; 5483 ramrod_param.rx_mode_flags = rx_mode_flags; 5484 5485 ramrod_param.rx_accept_flags = rx_accept_flags; 5486 ramrod_param.tx_accept_flags = tx_accept_flags; 5487 5488 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 5489 if (rc < 0) { 5490 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 5491 return; 5492 } 5493 } 5494 5495 /* called with netif_addr_lock_bh() */ 5496 void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5497 { 5498 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5499 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5500 5501 #ifdef BCM_CNIC 5502 if (!NO_FCOE(bp)) 5503 5504 /* Configure rx_mode of FCoE Queue */ 5505 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5506 #endif 5507 5508 switch (bp->rx_mode) { 5509 case BNX2X_RX_MODE_NONE: 5510 /* 5511 * 'drop all' supersedes any accept flags that may have been 5512 * passed to the function. 5513 */ 5514 break; 5515 case BNX2X_RX_MODE_NORMAL: 5516 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5517 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); 5518 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5519 5520 /* internal switching mode */ 5521 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5522 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); 5523 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5524 5525 break; 5526 case BNX2X_RX_MODE_ALLMULTI: 5527 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5528 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5529 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5530 5531 /* internal switching mode */ 5532 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5533 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5534 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5535 5536 break; 5537 case BNX2X_RX_MODE_PROMISC: 5538 /* According to deffinition of SI mode, iface in promisc mode 5539 * should receive matched and unmatched (in resolution of port) 5540 * unicast packets. 5541 */ 5542 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); 5543 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); 5544 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); 5545 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); 5546 5547 /* internal switching mode */ 5548 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); 5549 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); 5550 5551 if (IS_MF_SI(bp)) 5552 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); 5553 else 5554 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); 5555 5556 break; 5557 default: 5558 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); 5559 return; 5560 } 5561 5562 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 5563 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); 5564 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); 5565 } 5566 5567 __set_bit(RAMROD_RX, &ramrod_flags); 5568 __set_bit(RAMROD_TX, &ramrod_flags); 5569 5570 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, 5571 tx_accept_flags, ramrod_flags); 5572 } 5573 5574 static void bnx2x_init_internal_common(struct bnx2x *bp) 5575 { 5576 int i; 5577 5578 if (IS_MF_SI(bp)) 5579 /* 5580 * In switch independent mode, the TSTORM needs to accept 5581 * packets that failed classification, since approximate match 5582 * mac addresses aren't written to NIG LLH 5583 */ 5584 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5585 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); 5586 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ 5587 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5588 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); 5589 5590 /* Zero this manually as its initialization is 5591 currently missing in the initTool */ 5592 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 5593 REG_WR(bp, BAR_USTRORM_INTMEM + 5594 USTORM_AGG_DATA_OFFSET + i * 4, 0); 5595 if (!CHIP_IS_E1x(bp)) { 5596 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 5597 CHIP_INT_MODE_IS_BC(bp) ? 5598 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 5599 } 5600 } 5601 5602 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 5603 { 5604 switch (load_code) { 5605 case FW_MSG_CODE_DRV_LOAD_COMMON: 5606 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5607 bnx2x_init_internal_common(bp); 5608 /* no break */ 5609 5610 case FW_MSG_CODE_DRV_LOAD_PORT: 5611 /* nothing to do */ 5612 /* no break */ 5613 5614 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5615 /* internal memory per function is 5616 initialized inside bnx2x_pf_init */ 5617 break; 5618 5619 default: 5620 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5621 break; 5622 } 5623 } 5624 5625 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5626 { 5627 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5628 } 5629 5630 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5631 { 5632 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5633 } 5634 5635 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5636 { 5637 if (CHIP_IS_E1x(fp->bp)) 5638 return BP_L_ID(fp->bp) + fp->index; 5639 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 5640 return bnx2x_fp_igu_sb_id(fp); 5641 } 5642 5643 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 5644 { 5645 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 5646 u8 cos; 5647 unsigned long q_type = 0; 5648 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 5649 fp->rx_queue = fp_idx; 5650 fp->cid = fp_idx; 5651 fp->cl_id = bnx2x_fp_cl_id(fp); 5652 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 5653 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 5654 /* qZone id equals to FW (per path) client id */ 5655 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 5656 5657 /* init shortcut */ 5658 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 5659 5660 /* Setup SB indicies */ 5661 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 5662 5663 /* Configure Queue State object */ 5664 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 5665 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 5666 5667 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 5668 5669 /* init tx data */ 5670 for_each_cos_in_tx_queue(fp, cos) { 5671 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 5672 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 5673 FP_COS_TO_TXQ(fp, cos, bp), 5674 BNX2X_TX_SB_INDEX_BASE + cos, fp); 5675 cids[cos] = fp->txdata_ptr[cos]->cid; 5676 } 5677 5678 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 5679 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5680 bnx2x_sp_mapping(bp, q_rdata), q_type); 5681 5682 /** 5683 * Configure classification DBs: Always enable Tx switching 5684 */ 5685 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 5686 5687 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 5688 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 5689 fp->igu_sb_id); 5690 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 5691 fp->fw_sb_id, fp->igu_sb_id); 5692 5693 bnx2x_update_fpsb_idx(fp); 5694 } 5695 5696 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 5697 { 5698 int i; 5699 5700 for (i = 1; i <= NUM_TX_RINGS; i++) { 5701 struct eth_tx_next_bd *tx_next_bd = 5702 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 5703 5704 tx_next_bd->addr_hi = 5705 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 5706 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5707 tx_next_bd->addr_lo = 5708 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 5709 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 5710 } 5711 5712 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 5713 txdata->tx_db.data.zero_fill1 = 0; 5714 txdata->tx_db.data.prod = 0; 5715 5716 txdata->tx_pkt_prod = 0; 5717 txdata->tx_pkt_cons = 0; 5718 txdata->tx_bd_prod = 0; 5719 txdata->tx_bd_cons = 0; 5720 txdata->tx_pkt = 0; 5721 } 5722 5723 static void bnx2x_init_tx_rings(struct bnx2x *bp) 5724 { 5725 int i; 5726 u8 cos; 5727 5728 for_each_tx_queue(bp, i) 5729 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5730 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 5731 } 5732 5733 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5734 { 5735 int i; 5736 5737 for_each_eth_queue(bp, i) 5738 bnx2x_init_eth_fp(bp, i); 5739 #ifdef BCM_CNIC 5740 if (!NO_FCOE(bp)) 5741 bnx2x_init_fcoe_fp(bp); 5742 5743 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 5744 BNX2X_VF_ID_INVALID, false, 5745 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5746 5747 #endif 5748 5749 /* Initialize MOD_ABS interrupts */ 5750 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5751 bp->common.shmem_base, bp->common.shmem2_base, 5752 BP_PORT(bp)); 5753 /* ensure status block indices were read */ 5754 rmb(); 5755 5756 bnx2x_init_def_sb(bp); 5757 bnx2x_update_dsb_idx(bp); 5758 bnx2x_init_rx_rings(bp); 5759 bnx2x_init_tx_rings(bp); 5760 bnx2x_init_sp_ring(bp); 5761 bnx2x_init_eq_ring(bp); 5762 bnx2x_init_internal(bp, load_code); 5763 bnx2x_pf_init(bp); 5764 bnx2x_stats_init(bp); 5765 5766 /* flush all before enabling interrupts */ 5767 mb(); 5768 mmiowb(); 5769 5770 bnx2x_int_enable(bp); 5771 5772 /* Check for SPIO5 */ 5773 bnx2x_attn_int_deasserted0(bp, 5774 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 5775 AEU_INPUTS_ATTN_BITS_SPIO5); 5776 } 5777 5778 /* end of nic init */ 5779 5780 /* 5781 * gzip service functions 5782 */ 5783 5784 static int bnx2x_gunzip_init(struct bnx2x *bp) 5785 { 5786 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 5787 &bp->gunzip_mapping, GFP_KERNEL); 5788 if (bp->gunzip_buf == NULL) 5789 goto gunzip_nomem1; 5790 5791 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 5792 if (bp->strm == NULL) 5793 goto gunzip_nomem2; 5794 5795 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 5796 if (bp->strm->workspace == NULL) 5797 goto gunzip_nomem3; 5798 5799 return 0; 5800 5801 gunzip_nomem3: 5802 kfree(bp->strm); 5803 bp->strm = NULL; 5804 5805 gunzip_nomem2: 5806 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5807 bp->gunzip_mapping); 5808 bp->gunzip_buf = NULL; 5809 5810 gunzip_nomem1: 5811 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 5812 return -ENOMEM; 5813 } 5814 5815 static void bnx2x_gunzip_end(struct bnx2x *bp) 5816 { 5817 if (bp->strm) { 5818 vfree(bp->strm->workspace); 5819 kfree(bp->strm); 5820 bp->strm = NULL; 5821 } 5822 5823 if (bp->gunzip_buf) { 5824 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 5825 bp->gunzip_mapping); 5826 bp->gunzip_buf = NULL; 5827 } 5828 } 5829 5830 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 5831 { 5832 int n, rc; 5833 5834 /* check gzip header */ 5835 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 5836 BNX2X_ERR("Bad gzip header\n"); 5837 return -EINVAL; 5838 } 5839 5840 n = 10; 5841 5842 #define FNAME 0x8 5843 5844 if (zbuf[3] & FNAME) 5845 while ((zbuf[n++] != 0) && (n < len)); 5846 5847 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 5848 bp->strm->avail_in = len - n; 5849 bp->strm->next_out = bp->gunzip_buf; 5850 bp->strm->avail_out = FW_BUF_SIZE; 5851 5852 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 5853 if (rc != Z_OK) 5854 return rc; 5855 5856 rc = zlib_inflate(bp->strm, Z_FINISH); 5857 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 5858 netdev_err(bp->dev, "Firmware decompression error: %s\n", 5859 bp->strm->msg); 5860 5861 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 5862 if (bp->gunzip_outlen & 0x3) 5863 netdev_err(bp->dev, 5864 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 5865 bp->gunzip_outlen); 5866 bp->gunzip_outlen >>= 2; 5867 5868 zlib_inflateEnd(bp->strm); 5869 5870 if (rc == Z_STREAM_END) 5871 return 0; 5872 5873 return rc; 5874 } 5875 5876 /* nic load/unload */ 5877 5878 /* 5879 * General service functions 5880 */ 5881 5882 /* send a NIG loopback debug packet */ 5883 static void bnx2x_lb_pckt(struct bnx2x *bp) 5884 { 5885 u32 wb_write[3]; 5886 5887 /* Ethernet source and destination addresses */ 5888 wb_write[0] = 0x55555555; 5889 wb_write[1] = 0x55555555; 5890 wb_write[2] = 0x20; /* SOP */ 5891 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5892 5893 /* NON-IP protocol */ 5894 wb_write[0] = 0x09000000; 5895 wb_write[1] = 0x55555555; 5896 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 5897 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 5898 } 5899 5900 /* some of the internal memories 5901 * are not directly readable from the driver 5902 * to test them we send debug packets 5903 */ 5904 static int bnx2x_int_mem_test(struct bnx2x *bp) 5905 { 5906 int factor; 5907 int count, i; 5908 u32 val = 0; 5909 5910 if (CHIP_REV_IS_FPGA(bp)) 5911 factor = 120; 5912 else if (CHIP_REV_IS_EMUL(bp)) 5913 factor = 200; 5914 else 5915 factor = 1; 5916 5917 /* Disable inputs of parser neighbor blocks */ 5918 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5919 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5920 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5921 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5922 5923 /* Write 0 to parser credits for CFC search request */ 5924 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5925 5926 /* send Ethernet packet */ 5927 bnx2x_lb_pckt(bp); 5928 5929 /* TODO do i reset NIG statistic? */ 5930 /* Wait until NIG register shows 1 packet of size 0x10 */ 5931 count = 1000 * factor; 5932 while (count) { 5933 5934 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5935 val = *bnx2x_sp(bp, wb_data[0]); 5936 if (val == 0x10) 5937 break; 5938 5939 msleep(10); 5940 count--; 5941 } 5942 if (val != 0x10) { 5943 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 5944 return -1; 5945 } 5946 5947 /* Wait until PRS register shows 1 packet */ 5948 count = 1000 * factor; 5949 while (count) { 5950 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 5951 if (val == 1) 5952 break; 5953 5954 msleep(10); 5955 count--; 5956 } 5957 if (val != 0x1) { 5958 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 5959 return -2; 5960 } 5961 5962 /* Reset and init BRB, PRS */ 5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 5964 msleep(50); 5965 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 5966 msleep(50); 5967 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 5968 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 5969 5970 DP(NETIF_MSG_HW, "part2\n"); 5971 5972 /* Disable inputs of parser neighbor blocks */ 5973 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 5974 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 5975 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 5976 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 5977 5978 /* Write 0 to parser credits for CFC search request */ 5979 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 5980 5981 /* send 10 Ethernet packets */ 5982 for (i = 0; i < 10; i++) 5983 bnx2x_lb_pckt(bp); 5984 5985 /* Wait until NIG register shows 10 + 1 5986 packets of size 11*0x10 = 0xb0 */ 5987 count = 1000 * factor; 5988 while (count) { 5989 5990 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5991 val = *bnx2x_sp(bp, wb_data[0]); 5992 if (val == 0xb0) 5993 break; 5994 5995 msleep(10); 5996 count--; 5997 } 5998 if (val != 0xb0) { 5999 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6000 return -3; 6001 } 6002 6003 /* Wait until PRS register shows 2 packets */ 6004 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6005 if (val != 2) 6006 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6007 6008 /* Write 1 to parser credits for CFC search request */ 6009 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6010 6011 /* Wait until PRS register shows 3 packets */ 6012 msleep(10 * factor); 6013 /* Wait until NIG register shows 1 packet of size 0x10 */ 6014 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6015 if (val != 3) 6016 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6017 6018 /* clear NIG EOP FIFO */ 6019 for (i = 0; i < 11; i++) 6020 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6021 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6022 if (val != 1) { 6023 BNX2X_ERR("clear of NIG failed\n"); 6024 return -4; 6025 } 6026 6027 /* Reset and init BRB, PRS, NIG */ 6028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6029 msleep(50); 6030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6031 msleep(50); 6032 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6033 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6034 #ifndef BCM_CNIC 6035 /* set NIC mode */ 6036 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6037 #endif 6038 6039 /* Enable inputs of parser neighbor blocks */ 6040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6041 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6042 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6043 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6044 6045 DP(NETIF_MSG_HW, "done\n"); 6046 6047 return 0; /* OK */ 6048 } 6049 6050 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6051 { 6052 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6053 if (!CHIP_IS_E1x(bp)) 6054 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6055 else 6056 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6057 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6058 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6059 /* 6060 * mask read length error interrupts in brb for parser 6061 * (parsing unit and 'checksum and crc' unit) 6062 * these errors are legal (PU reads fixed length and CAC can cause 6063 * read length error on truncated packets) 6064 */ 6065 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6066 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6067 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6068 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6069 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6070 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6071 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6072 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6073 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6074 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6075 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6076 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6077 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6078 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6079 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6080 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6081 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6082 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6083 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6084 6085 if (CHIP_REV_IS_FPGA(bp)) 6086 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 6087 else if (!CHIP_IS_E1x(bp)) 6088 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 6089 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF 6090 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT 6091 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN 6092 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED 6093 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); 6094 else 6095 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 6096 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6097 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6098 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6099 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6100 6101 if (!CHIP_IS_E1x(bp)) 6102 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6103 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6104 6105 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6106 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6107 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6108 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6109 } 6110 6111 static void bnx2x_reset_common(struct bnx2x *bp) 6112 { 6113 u32 val = 0x1400; 6114 6115 /* reset_common */ 6116 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6117 0xd3ffff7f); 6118 6119 if (CHIP_IS_E3(bp)) { 6120 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6121 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6122 } 6123 6124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6125 } 6126 6127 static void bnx2x_setup_dmae(struct bnx2x *bp) 6128 { 6129 bp->dmae_ready = 0; 6130 spin_lock_init(&bp->dmae_lock); 6131 } 6132 6133 static void bnx2x_init_pxp(struct bnx2x *bp) 6134 { 6135 u16 devctl; 6136 int r_order, w_order; 6137 6138 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6139 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6140 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6141 if (bp->mrrs == -1) 6142 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6143 else { 6144 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6145 r_order = bp->mrrs; 6146 } 6147 6148 bnx2x_init_pxp_arb(bp, r_order, w_order); 6149 } 6150 6151 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6152 { 6153 int is_required; 6154 u32 val; 6155 int port; 6156 6157 if (BP_NOMCP(bp)) 6158 return; 6159 6160 is_required = 0; 6161 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6162 SHARED_HW_CFG_FAN_FAILURE_MASK; 6163 6164 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6165 is_required = 1; 6166 6167 /* 6168 * The fan failure mechanism is usually related to the PHY type since 6169 * the power consumption of the board is affected by the PHY. Currently, 6170 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6171 */ 6172 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6173 for (port = PORT_0; port < PORT_MAX; port++) { 6174 is_required |= 6175 bnx2x_fan_failure_det_req( 6176 bp, 6177 bp->common.shmem_base, 6178 bp->common.shmem2_base, 6179 port); 6180 } 6181 6182 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6183 6184 if (is_required == 0) 6185 return; 6186 6187 /* Fan failure is indicated by SPIO 5 */ 6188 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 6189 MISC_REGISTERS_SPIO_INPUT_HI_Z); 6190 6191 /* set to active low mode */ 6192 val = REG_RD(bp, MISC_REG_SPIO_INT); 6193 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6194 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6195 REG_WR(bp, MISC_REG_SPIO_INT, val); 6196 6197 /* enable interrupt to signal the IGU */ 6198 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6199 val |= (1 << MISC_REGISTERS_SPIO_5); 6200 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6201 } 6202 6203 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) 6204 { 6205 u32 offset = 0; 6206 6207 if (CHIP_IS_E1(bp)) 6208 return; 6209 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) 6210 return; 6211 6212 switch (BP_ABS_FUNC(bp)) { 6213 case 0: 6214 offset = PXP2_REG_PGL_PRETEND_FUNC_F0; 6215 break; 6216 case 1: 6217 offset = PXP2_REG_PGL_PRETEND_FUNC_F1; 6218 break; 6219 case 2: 6220 offset = PXP2_REG_PGL_PRETEND_FUNC_F2; 6221 break; 6222 case 3: 6223 offset = PXP2_REG_PGL_PRETEND_FUNC_F3; 6224 break; 6225 case 4: 6226 offset = PXP2_REG_PGL_PRETEND_FUNC_F4; 6227 break; 6228 case 5: 6229 offset = PXP2_REG_PGL_PRETEND_FUNC_F5; 6230 break; 6231 case 6: 6232 offset = PXP2_REG_PGL_PRETEND_FUNC_F6; 6233 break; 6234 case 7: 6235 offset = PXP2_REG_PGL_PRETEND_FUNC_F7; 6236 break; 6237 default: 6238 return; 6239 } 6240 6241 REG_WR(bp, offset, pretend_func_num); 6242 REG_RD(bp, offset); 6243 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); 6244 } 6245 6246 void bnx2x_pf_disable(struct bnx2x *bp) 6247 { 6248 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6249 val &= ~IGU_PF_CONF_FUNC_EN; 6250 6251 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6252 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6253 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6254 } 6255 6256 static void bnx2x__common_init_phy(struct bnx2x *bp) 6257 { 6258 u32 shmem_base[2], shmem2_base[2]; 6259 shmem_base[0] = bp->common.shmem_base; 6260 shmem2_base[0] = bp->common.shmem2_base; 6261 if (!CHIP_IS_E1x(bp)) { 6262 shmem_base[1] = 6263 SHMEM2_RD(bp, other_shmem_base_addr); 6264 shmem2_base[1] = 6265 SHMEM2_RD(bp, other_shmem2_base_addr); 6266 } 6267 bnx2x_acquire_phy_lock(bp); 6268 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6269 bp->common.chip_id); 6270 bnx2x_release_phy_lock(bp); 6271 } 6272 6273 /** 6274 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6275 * 6276 * @bp: driver handle 6277 */ 6278 static int bnx2x_init_hw_common(struct bnx2x *bp) 6279 { 6280 u32 val; 6281 6282 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6283 6284 /* 6285 * take the UNDI lock to protect undi_unload flow from accessing 6286 * registers while we're resetting the chip 6287 */ 6288 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6289 6290 bnx2x_reset_common(bp); 6291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6292 6293 val = 0xfffc; 6294 if (CHIP_IS_E3(bp)) { 6295 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6296 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6297 } 6298 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6299 6300 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6301 6302 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6303 6304 if (!CHIP_IS_E1x(bp)) { 6305 u8 abs_func_id; 6306 6307 /** 6308 * 4-port mode or 2-port mode we need to turn of master-enable 6309 * for everyone, after that, turn it back on for self. 6310 * so, we disregard multi-function or not, and always disable 6311 * for all functions on the given path, this means 0,2,4,6 for 6312 * path 0 and 1,3,5,7 for path 1 6313 */ 6314 for (abs_func_id = BP_PATH(bp); 6315 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6316 if (abs_func_id == BP_ABS_FUNC(bp)) { 6317 REG_WR(bp, 6318 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6319 1); 6320 continue; 6321 } 6322 6323 bnx2x_pretend_func(bp, abs_func_id); 6324 /* clear pf enable */ 6325 bnx2x_pf_disable(bp); 6326 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6327 } 6328 } 6329 6330 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6331 if (CHIP_IS_E1(bp)) { 6332 /* enable HW interrupt from PXP on USDM overflow 6333 bit 16 on INT_MASK_0 */ 6334 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6335 } 6336 6337 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6338 bnx2x_init_pxp(bp); 6339 6340 #ifdef __BIG_ENDIAN 6341 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6342 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6343 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6344 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6345 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6346 /* make sure this value is 0 */ 6347 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6348 6349 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6350 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6351 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6352 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6353 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6354 #endif 6355 6356 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6357 6358 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6359 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6360 6361 /* let the HW do it's magic ... */ 6362 msleep(100); 6363 /* finish PXP init */ 6364 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6365 if (val != 1) { 6366 BNX2X_ERR("PXP2 CFG failed\n"); 6367 return -EBUSY; 6368 } 6369 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6370 if (val != 1) { 6371 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6372 return -EBUSY; 6373 } 6374 6375 /* Timers bug workaround E2 only. We need to set the entire ILT to 6376 * have entries with value "0" and valid bit on. 6377 * This needs to be done by the first PF that is loaded in a path 6378 * (i.e. common phase) 6379 */ 6380 if (!CHIP_IS_E1x(bp)) { 6381 /* In E2 there is a bug in the timers block that can cause function 6 / 7 6382 * (i.e. vnic3) to start even if it is marked as "scan-off". 6383 * This occurs when a different function (func2,3) is being marked 6384 * as "scan-off". Real-life scenario for example: if a driver is being 6385 * load-unloaded while func6,7 are down. This will cause the timer to access 6386 * the ilt, translate to a logical address and send a request to read/write. 6387 * Since the ilt for the function that is down is not valid, this will cause 6388 * a translation error which is unrecoverable. 6389 * The Workaround is intended to make sure that when this happens nothing fatal 6390 * will occur. The workaround: 6391 * 1. First PF driver which loads on a path will: 6392 * a. After taking the chip out of reset, by using pretend, 6393 * it will write "0" to the following registers of 6394 * the other vnics. 6395 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6396 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6397 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6398 * And for itself it will write '1' to 6399 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6400 * dmae-operations (writing to pram for example.) 6401 * note: can be done for only function 6,7 but cleaner this 6402 * way. 6403 * b. Write zero+valid to the entire ILT. 6404 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6405 * VNIC3 (of that port). The range allocated will be the 6406 * entire ILT. This is needed to prevent ILT range error. 6407 * 2. Any PF driver load flow: 6408 * a. ILT update with the physical addresses of the allocated 6409 * logical pages. 6410 * b. Wait 20msec. - note that this timeout is needed to make 6411 * sure there are no requests in one of the PXP internal 6412 * queues with "old" ILT addresses. 6413 * c. PF enable in the PGLC. 6414 * d. Clear the was_error of the PF in the PGLC. (could have 6415 * occured while driver was down) 6416 * e. PF enable in the CFC (WEAK + STRONG) 6417 * f. Timers scan enable 6418 * 3. PF driver unload flow: 6419 * a. Clear the Timers scan_en. 6420 * b. Polling for scan_on=0 for that PF. 6421 * c. Clear the PF enable bit in the PXP. 6422 * d. Clear the PF enable in the CFC (WEAK + STRONG) 6423 * e. Write zero+valid to all ILT entries (The valid bit must 6424 * stay set) 6425 * f. If this is VNIC 3 of a port then also init 6426 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6427 * to the last enrty in the ILT. 6428 * 6429 * Notes: 6430 * Currently the PF error in the PGLC is non recoverable. 6431 * In the future the there will be a recovery routine for this error. 6432 * Currently attention is masked. 6433 * Having an MCP lock on the load/unload process does not guarantee that 6434 * there is no Timer disable during Func6/7 enable. This is because the 6435 * Timers scan is currently being cleared by the MCP on FLR. 6436 * Step 2.d can be done only for PF6/7 and the driver can also check if 6437 * there is error before clearing it. But the flow above is simpler and 6438 * more general. 6439 * All ILT entries are written by zero+valid and not just PF6/7 6440 * ILT entries since in the future the ILT entries allocation for 6441 * PF-s might be dynamic. 6442 */ 6443 struct ilt_client_info ilt_cli; 6444 struct bnx2x_ilt ilt; 6445 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 6446 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 6447 6448 /* initialize dummy TM client */ 6449 ilt_cli.start = 0; 6450 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 6451 ilt_cli.client_num = ILT_CLIENT_TM; 6452 6453 /* Step 1: set zeroes to all ilt page entries with valid bit on 6454 * Step 2: set the timers first/last ilt entry to point 6455 * to the entire range to prevent ILT range error for 3rd/4th 6456 * vnic (this code assumes existance of the vnic) 6457 * 6458 * both steps performed by call to bnx2x_ilt_client_init_op() 6459 * with dummy TM client 6460 * 6461 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 6462 * and his brother are split registers 6463 */ 6464 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 6465 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 6466 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6467 6468 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 6469 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 6470 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 6471 } 6472 6473 6474 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 6475 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 6476 6477 if (!CHIP_IS_E1x(bp)) { 6478 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 6479 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 6480 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 6481 6482 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 6483 6484 /* let the HW do it's magic ... */ 6485 do { 6486 msleep(200); 6487 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 6488 } while (factor-- && (val != 1)); 6489 6490 if (val != 1) { 6491 BNX2X_ERR("ATC_INIT failed\n"); 6492 return -EBUSY; 6493 } 6494 } 6495 6496 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 6497 6498 /* clean the DMAE memory */ 6499 bp->dmae_ready = 1; 6500 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 6501 6502 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 6503 6504 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 6505 6506 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 6507 6508 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 6509 6510 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 6511 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 6512 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 6513 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6514 6515 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6516 6517 6518 /* QM queues pointers table */ 6519 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6520 6521 /* soft reset pulse */ 6522 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6523 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6524 6525 #ifdef BCM_CNIC 6526 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6527 #endif 6528 6529 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6530 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6531 if (!CHIP_REV_IS_SLOW(bp)) 6532 /* enable hw interrupt from doorbell Q */ 6533 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6534 6535 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6536 6537 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6538 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6539 6540 if (!CHIP_IS_E1(bp)) 6541 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6542 6543 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 6544 if (IS_MF_AFEX(bp)) { 6545 /* configure that VNTag and VLAN headers must be 6546 * received in afex mode 6547 */ 6548 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 6549 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 6550 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 6551 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 6552 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 6553 } else { 6554 /* Bit-map indicating which L2 hdrs may appear 6555 * after the basic Ethernet header 6556 */ 6557 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6558 bp->path_has_ovlan ? 7 : 6); 6559 } 6560 } 6561 6562 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6563 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6564 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 6565 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 6566 6567 if (!CHIP_IS_E1x(bp)) { 6568 /* reset VFC memories */ 6569 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6570 VFC_MEMORIES_RST_REG_CAM_RST | 6571 VFC_MEMORIES_RST_REG_RAM_RST); 6572 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 6573 VFC_MEMORIES_RST_REG_CAM_RST | 6574 VFC_MEMORIES_RST_REG_RAM_RST); 6575 6576 msleep(20); 6577 } 6578 6579 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 6580 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 6581 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 6582 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 6583 6584 /* sync semi rtc */ 6585 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6586 0x80000000); 6587 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 6588 0x80000000); 6589 6590 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 6591 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6592 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6593 6594 if (!CHIP_IS_E1x(bp)) { 6595 if (IS_MF_AFEX(bp)) { 6596 /* configure that VNTag and VLAN headers must be 6597 * sent in afex mode 6598 */ 6599 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 6600 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 6601 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 6602 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 6603 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 6604 } else { 6605 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6606 bp->path_has_ovlan ? 7 : 6); 6607 } 6608 } 6609 6610 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6611 6612 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6613 6614 #ifdef BCM_CNIC 6615 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6616 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6617 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6618 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6619 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6620 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6621 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6622 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6623 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6624 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6625 #endif 6626 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6627 6628 if (sizeof(union cdu_context) != 1024) 6629 /* we currently assume that a context is 1024 bytes */ 6630 dev_alert(&bp->pdev->dev, 6631 "please adjust the size of cdu_context(%ld)\n", 6632 (long)sizeof(union cdu_context)); 6633 6634 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 6635 val = (4 << 24) + (0 << 12) + 1024; 6636 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 6637 6638 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 6639 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 6640 /* enable context validation interrupt from CFC */ 6641 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6642 6643 /* set the thresholds to prevent CFC/CDU race */ 6644 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 6645 6646 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 6647 6648 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 6649 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 6650 6651 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 6652 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 6653 6654 /* Reset PCIE errors for debug */ 6655 REG_WR(bp, 0x2814, 0xffffffff); 6656 REG_WR(bp, 0x3820, 0xffffffff); 6657 6658 if (!CHIP_IS_E1x(bp)) { 6659 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 6660 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 6661 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 6662 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 6663 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 6664 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 6665 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 6666 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 6667 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 6668 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 6669 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 6670 } 6671 6672 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 6673 if (!CHIP_IS_E1(bp)) { 6674 /* in E3 this done in per-port section */ 6675 if (!CHIP_IS_E3(bp)) 6676 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6677 } 6678 if (CHIP_IS_E1H(bp)) 6679 /* not applicable for E2 (and above ...) */ 6680 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 6681 6682 if (CHIP_REV_IS_SLOW(bp)) 6683 msleep(200); 6684 6685 /* finish CFC init */ 6686 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 6687 if (val != 1) { 6688 BNX2X_ERR("CFC LL_INIT failed\n"); 6689 return -EBUSY; 6690 } 6691 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 6692 if (val != 1) { 6693 BNX2X_ERR("CFC AC_INIT failed\n"); 6694 return -EBUSY; 6695 } 6696 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 6697 if (val != 1) { 6698 BNX2X_ERR("CFC CAM_INIT failed\n"); 6699 return -EBUSY; 6700 } 6701 REG_WR(bp, CFC_REG_DEBUG0, 0); 6702 6703 if (CHIP_IS_E1(bp)) { 6704 /* read NIG statistic 6705 to see if this is our first up since powerup */ 6706 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6707 val = *bnx2x_sp(bp, wb_data[0]); 6708 6709 /* do internal memory self test */ 6710 if ((val == 0) && bnx2x_int_mem_test(bp)) { 6711 BNX2X_ERR("internal mem self test failed\n"); 6712 return -EBUSY; 6713 } 6714 } 6715 6716 bnx2x_setup_fan_failure_detection(bp); 6717 6718 /* clear PXP2 attentions */ 6719 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6720 6721 bnx2x_enable_blocks_attention(bp); 6722 bnx2x_enable_blocks_parity(bp); 6723 6724 if (!BP_NOMCP(bp)) { 6725 if (CHIP_IS_E1x(bp)) 6726 bnx2x__common_init_phy(bp); 6727 } else 6728 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 6729 6730 return 0; 6731 } 6732 6733 /** 6734 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 6735 * 6736 * @bp: driver handle 6737 */ 6738 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 6739 { 6740 int rc = bnx2x_init_hw_common(bp); 6741 6742 if (rc) 6743 return rc; 6744 6745 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 6746 if (!BP_NOMCP(bp)) 6747 bnx2x__common_init_phy(bp); 6748 6749 return 0; 6750 } 6751 6752 static int bnx2x_init_hw_port(struct bnx2x *bp) 6753 { 6754 int port = BP_PORT(bp); 6755 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 6756 u32 low, high; 6757 u32 val; 6758 6759 6760 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 6761 6762 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6763 6764 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 6765 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 6766 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 6767 6768 /* Timers bug workaround: disables the pf_master bit in pglue at 6769 * common phase, we need to enable it here before any dmae access are 6770 * attempted. Therefore we manually added the enable-master to the 6771 * port phase (it also happens in the function phase) 6772 */ 6773 if (!CHIP_IS_E1x(bp)) 6774 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 6775 6776 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 6777 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 6778 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 6779 bnx2x_init_block(bp, BLOCK_QM, init_phase); 6780 6781 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 6782 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 6783 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 6784 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 6785 6786 /* QM cid (connection) count */ 6787 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6788 6789 #ifdef BCM_CNIC 6790 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6793 #endif 6794 6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6796 6797 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6798 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 6799 6800 if (IS_MF(bp)) 6801 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6802 else if (bp->dev->mtu > 4096) { 6803 if (bp->flags & ONE_PORT_FLAG) 6804 low = 160; 6805 else { 6806 val = bp->dev->mtu; 6807 /* (24*1024 + val*4)/256 */ 6808 low = 96 + (val/64) + 6809 ((val % 64) ? 1 : 0); 6810 } 6811 } else 6812 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 6813 high = low + 56; /* 14*1024/256 */ 6814 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 6815 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 6816 } 6817 6818 if (CHIP_MODE_IS_4_PORT(bp)) 6819 REG_WR(bp, (BP_PORT(bp) ? 6820 BRB1_REG_MAC_GUARANTIED_1 : 6821 BRB1_REG_MAC_GUARANTIED_0), 40); 6822 6823 6824 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6825 if (CHIP_IS_E3B0(bp)) { 6826 if (IS_MF_AFEX(bp)) { 6827 /* configure headers for AFEX mode */ 6828 REG_WR(bp, BP_PORT(bp) ? 6829 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6830 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 6831 REG_WR(bp, BP_PORT(bp) ? 6832 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 6833 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 6834 REG_WR(bp, BP_PORT(bp) ? 6835 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 6836 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 6837 } else { 6838 /* Ovlan exists only if we are in multi-function + 6839 * switch-dependent mode, in switch-independent there 6840 * is no ovlan headers 6841 */ 6842 REG_WR(bp, BP_PORT(bp) ? 6843 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6844 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6845 (bp->path_has_ovlan ? 7 : 6)); 6846 } 6847 } 6848 6849 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6850 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6851 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 6852 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 6853 6854 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 6855 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 6856 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 6857 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 6858 6859 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 6860 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 6861 6862 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 6863 6864 if (CHIP_IS_E1x(bp)) { 6865 /* configure PBF to work without PAUSE mtu 9000 */ 6866 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 6867 6868 /* update threshold */ 6869 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 6870 /* update init credit */ 6871 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 6872 6873 /* probe changes */ 6874 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 6875 udelay(50); 6876 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6877 } 6878 6879 #ifdef BCM_CNIC 6880 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6881 #endif 6882 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6883 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6884 6885 if (CHIP_IS_E1(bp)) { 6886 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6887 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6888 } 6889 bnx2x_init_block(bp, BLOCK_HC, init_phase); 6890 6891 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 6892 6893 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 6894 /* init aeu_mask_attn_func_0/1: 6895 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 6896 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 6897 * bits 4-7 are used for "per vn group attention" */ 6898 val = IS_MF(bp) ? 0xF7 : 0x7; 6899 /* Enable DCBX attention for all but E1 */ 6900 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 6901 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 6902 6903 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 6904 6905 if (!CHIP_IS_E1x(bp)) { 6906 /* Bit-map indicating which L2 hdrs may appear after the 6907 * basic Ethernet header 6908 */ 6909 if (IS_MF_AFEX(bp)) 6910 REG_WR(bp, BP_PORT(bp) ? 6911 NIG_REG_P1_HDRS_AFTER_BASIC : 6912 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 6913 else 6914 REG_WR(bp, BP_PORT(bp) ? 6915 NIG_REG_P1_HDRS_AFTER_BASIC : 6916 NIG_REG_P0_HDRS_AFTER_BASIC, 6917 IS_MF_SD(bp) ? 7 : 6); 6918 6919 if (CHIP_IS_E3(bp)) 6920 REG_WR(bp, BP_PORT(bp) ? 6921 NIG_REG_LLH1_MF_MODE : 6922 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 6923 } 6924 if (!CHIP_IS_E3(bp)) 6925 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 6926 6927 if (!CHIP_IS_E1(bp)) { 6928 /* 0x2 disable mf_ov, 0x1 enable */ 6929 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 6930 (IS_MF_SD(bp) ? 0x1 : 0x2)); 6931 6932 if (!CHIP_IS_E1x(bp)) { 6933 val = 0; 6934 switch (bp->mf_mode) { 6935 case MULTI_FUNCTION_SD: 6936 val = 1; 6937 break; 6938 case MULTI_FUNCTION_SI: 6939 case MULTI_FUNCTION_AFEX: 6940 val = 2; 6941 break; 6942 } 6943 6944 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 6945 NIG_REG_LLH0_CLS_TYPE), val); 6946 } 6947 { 6948 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 6949 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 6950 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 6951 } 6952 } 6953 6954 6955 /* If SPIO5 is set to generate interrupts, enable it for this port */ 6956 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6957 if (val & (1 << MISC_REGISTERS_SPIO_5)) { 6958 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6959 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6960 val = REG_RD(bp, reg_addr); 6961 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 6962 REG_WR(bp, reg_addr, val); 6963 } 6964 6965 return 0; 6966 } 6967 6968 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6969 { 6970 int reg; 6971 u32 wb_write[2]; 6972 6973 if (CHIP_IS_E1(bp)) 6974 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6975 else 6976 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6977 6978 wb_write[0] = ONCHIP_ADDR1(addr); 6979 wb_write[1] = ONCHIP_ADDR2(addr); 6980 REG_WR_DMAE(bp, reg, wb_write, 2); 6981 } 6982 6983 static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 6984 u8 idu_sb_id, bool is_Pf) 6985 { 6986 u32 data, ctl, cnt = 100; 6987 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 6988 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 6989 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 6990 u32 sb_bit = 1 << (idu_sb_id%32); 6991 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 6992 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 6993 6994 /* Not supported in BC mode */ 6995 if (CHIP_INT_MODE_IS_BC(bp)) 6996 return; 6997 6998 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 6999 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7000 IGU_REGULAR_CLEANUP_SET | 7001 IGU_REGULAR_BCLEANUP; 7002 7003 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7004 func_encode << IGU_CTRL_REG_FID_SHIFT | 7005 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7006 7007 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7008 data, igu_addr_data); 7009 REG_WR(bp, igu_addr_data, data); 7010 mmiowb(); 7011 barrier(); 7012 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7013 ctl, igu_addr_ctl); 7014 REG_WR(bp, igu_addr_ctl, ctl); 7015 mmiowb(); 7016 barrier(); 7017 7018 /* wait for clean up to finish */ 7019 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7020 msleep(20); 7021 7022 7023 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7024 DP(NETIF_MSG_HW, 7025 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7026 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7027 } 7028 } 7029 7030 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7031 { 7032 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7033 } 7034 7035 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7036 { 7037 u32 i, base = FUNC_ILT_BASE(func); 7038 for (i = base; i < base + ILT_PER_FUNC; i++) 7039 bnx2x_ilt_wr(bp, i, 0); 7040 } 7041 7042 static int bnx2x_init_hw_func(struct bnx2x *bp) 7043 { 7044 int port = BP_PORT(bp); 7045 int func = BP_FUNC(bp); 7046 int init_phase = PHASE_PF0 + func; 7047 struct bnx2x_ilt *ilt = BP_ILT(bp); 7048 u16 cdu_ilt_start; 7049 u32 addr, val; 7050 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7051 int i, main_mem_width, rc; 7052 7053 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7054 7055 /* FLR cleanup - hmmm */ 7056 if (!CHIP_IS_E1x(bp)) { 7057 rc = bnx2x_pf_flr_clnup(bp); 7058 if (rc) 7059 return rc; 7060 } 7061 7062 /* set MSI reconfigure capability */ 7063 if (bp->common.int_block == INT_BLOCK_HC) { 7064 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7065 val = REG_RD(bp, addr); 7066 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7067 REG_WR(bp, addr, val); 7068 } 7069 7070 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7071 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7072 7073 ilt = BP_ILT(bp); 7074 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7075 7076 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7077 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7078 ilt->lines[cdu_ilt_start + i].page_mapping = 7079 bp->context[i].cxt_mapping; 7080 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7081 } 7082 bnx2x_ilt_init_op(bp, INITOP_SET); 7083 7084 #ifdef BCM_CNIC 7085 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7086 7087 /* T1 hash bits value determines the T1 number of entries */ 7088 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7089 #endif 7090 7091 #ifndef BCM_CNIC 7092 /* set NIC mode */ 7093 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7094 #endif /* BCM_CNIC */ 7095 7096 if (!CHIP_IS_E1x(bp)) { 7097 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7098 7099 /* Turn on a single ISR mode in IGU if driver is going to use 7100 * INT#x or MSI 7101 */ 7102 if (!(bp->flags & USING_MSIX_FLAG)) 7103 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7104 /* 7105 * Timers workaround bug: function init part. 7106 * Need to wait 20msec after initializing ILT, 7107 * needed to make sure there are no requests in 7108 * one of the PXP internal queues with "old" ILT addresses 7109 */ 7110 msleep(20); 7111 /* 7112 * Master enable - Due to WB DMAE writes performed before this 7113 * register is re-initialized as part of the regular function 7114 * init 7115 */ 7116 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7117 /* Enable the function in IGU */ 7118 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7119 } 7120 7121 bp->dmae_ready = 1; 7122 7123 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7124 7125 if (!CHIP_IS_E1x(bp)) 7126 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7127 7128 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7129 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7130 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7131 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7132 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7133 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7134 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7135 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7136 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7137 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7138 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7139 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7140 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7141 7142 if (!CHIP_IS_E1x(bp)) 7143 REG_WR(bp, QM_REG_PF_EN, 1); 7144 7145 if (!CHIP_IS_E1x(bp)) { 7146 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7147 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7148 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7149 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7150 } 7151 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7152 7153 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7154 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7155 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7156 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7157 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7158 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7159 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7160 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7161 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7162 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7163 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7164 if (!CHIP_IS_E1x(bp)) 7165 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7166 7167 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7168 7169 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7170 7171 if (!CHIP_IS_E1x(bp)) 7172 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7173 7174 if (IS_MF(bp)) { 7175 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7176 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7177 } 7178 7179 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7180 7181 /* HC init per function */ 7182 if (bp->common.int_block == INT_BLOCK_HC) { 7183 if (CHIP_IS_E1H(bp)) { 7184 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7185 7186 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7187 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7188 } 7189 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7190 7191 } else { 7192 int num_segs, sb_idx, prod_offset; 7193 7194 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7195 7196 if (!CHIP_IS_E1x(bp)) { 7197 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7198 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7199 } 7200 7201 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7202 7203 if (!CHIP_IS_E1x(bp)) { 7204 int dsb_idx = 0; 7205 /** 7206 * Producer memory: 7207 * E2 mode: address 0-135 match to the mapping memory; 7208 * 136 - PF0 default prod; 137 - PF1 default prod; 7209 * 138 - PF2 default prod; 139 - PF3 default prod; 7210 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7211 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7212 * 144-147 reserved. 7213 * 7214 * E1.5 mode - In backward compatible mode; 7215 * for non default SB; each even line in the memory 7216 * holds the U producer and each odd line hold 7217 * the C producer. The first 128 producers are for 7218 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7219 * producers are for the DSB for each PF. 7220 * Each PF has five segments: (the order inside each 7221 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7222 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7223 * 144-147 attn prods; 7224 */ 7225 /* non-default-status-blocks */ 7226 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7227 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7228 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7229 prod_offset = (bp->igu_base_sb + sb_idx) * 7230 num_segs; 7231 7232 for (i = 0; i < num_segs; i++) { 7233 addr = IGU_REG_PROD_CONS_MEMORY + 7234 (prod_offset + i) * 4; 7235 REG_WR(bp, addr, 0); 7236 } 7237 /* send consumer update with value 0 */ 7238 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7239 USTORM_ID, 0, IGU_INT_NOP, 1); 7240 bnx2x_igu_clear_sb(bp, 7241 bp->igu_base_sb + sb_idx); 7242 } 7243 7244 /* default-status-blocks */ 7245 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7246 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7247 7248 if (CHIP_MODE_IS_4_PORT(bp)) 7249 dsb_idx = BP_FUNC(bp); 7250 else 7251 dsb_idx = BP_VN(bp); 7252 7253 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7254 IGU_BC_BASE_DSB_PROD + dsb_idx : 7255 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7256 7257 /* 7258 * igu prods come in chunks of E1HVN_MAX (4) - 7259 * does not matters what is the current chip mode 7260 */ 7261 for (i = 0; i < (num_segs * E1HVN_MAX); 7262 i += E1HVN_MAX) { 7263 addr = IGU_REG_PROD_CONS_MEMORY + 7264 (prod_offset + i)*4; 7265 REG_WR(bp, addr, 0); 7266 } 7267 /* send consumer update with 0 */ 7268 if (CHIP_INT_MODE_IS_BC(bp)) { 7269 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7270 USTORM_ID, 0, IGU_INT_NOP, 1); 7271 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7272 CSTORM_ID, 0, IGU_INT_NOP, 1); 7273 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7274 XSTORM_ID, 0, IGU_INT_NOP, 1); 7275 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7276 TSTORM_ID, 0, IGU_INT_NOP, 1); 7277 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7278 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7279 } else { 7280 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7281 USTORM_ID, 0, IGU_INT_NOP, 1); 7282 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7283 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7284 } 7285 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7286 7287 /* !!! these should become driver const once 7288 rf-tool supports split-68 const */ 7289 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7290 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7291 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 7292 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 7293 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 7294 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 7295 } 7296 } 7297 7298 /* Reset PCIE errors for debug */ 7299 REG_WR(bp, 0x2114, 0xffffffff); 7300 REG_WR(bp, 0x2120, 0xffffffff); 7301 7302 if (CHIP_IS_E1x(bp)) { 7303 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 7304 main_mem_base = HC_REG_MAIN_MEMORY + 7305 BP_PORT(bp) * (main_mem_size * 4); 7306 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 7307 main_mem_width = 8; 7308 7309 val = REG_RD(bp, main_mem_prty_clr); 7310 if (val) 7311 DP(NETIF_MSG_HW, 7312 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 7313 val); 7314 7315 /* Clear "false" parity errors in MSI-X table */ 7316 for (i = main_mem_base; 7317 i < main_mem_base + main_mem_size * 4; 7318 i += main_mem_width) { 7319 bnx2x_read_dmae(bp, i, main_mem_width / 4); 7320 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 7321 i, main_mem_width / 4); 7322 } 7323 /* Clear HC parity attention */ 7324 REG_RD(bp, main_mem_prty_clr); 7325 } 7326 7327 #ifdef BNX2X_STOP_ON_ERROR 7328 /* Enable STORMs SP logging */ 7329 REG_WR8(bp, BAR_USTRORM_INTMEM + 7330 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7331 REG_WR8(bp, BAR_TSTRORM_INTMEM + 7332 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7333 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7334 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7335 REG_WR8(bp, BAR_XSTRORM_INTMEM + 7336 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7337 #endif 7338 7339 bnx2x_phy_probe(&bp->link_params); 7340 7341 return 0; 7342 } 7343 7344 7345 void bnx2x_free_mem(struct bnx2x *bp) 7346 { 7347 int i; 7348 7349 /* fastpath */ 7350 bnx2x_free_fp_mem(bp); 7351 /* end of fastpath */ 7352 7353 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7354 sizeof(struct host_sp_status_block)); 7355 7356 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7357 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7358 7359 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7360 sizeof(struct bnx2x_slowpath)); 7361 7362 for (i = 0; i < L2_ILT_LINES(bp); i++) 7363 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 7364 bp->context[i].size); 7365 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7366 7367 BNX2X_FREE(bp->ilt->lines); 7368 7369 #ifdef BCM_CNIC 7370 if (!CHIP_IS_E1x(bp)) 7371 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 7372 sizeof(struct host_hc_status_block_e2)); 7373 else 7374 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 7375 sizeof(struct host_hc_status_block_e1x)); 7376 7377 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7378 #endif 7379 7380 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7381 7382 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7383 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7384 } 7385 7386 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7387 { 7388 int num_groups; 7389 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7390 7391 /* number of queues for statistics is number of eth queues + FCoE */ 7392 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; 7393 7394 /* Total number of FW statistics requests = 7395 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + 7396 * num of queues 7397 */ 7398 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 7399 7400 7401 /* Request is built from stats_query_header and an array of 7402 * stats_query_cmd_group each of which contains 7403 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 7404 * configured in the stats_query_header. 7405 */ 7406 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + 7407 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); 7408 7409 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 7410 num_groups * sizeof(struct stats_query_cmd_group); 7411 7412 /* Data for statistics requests + stats_conter 7413 * 7414 * stats_counter holds per-STORM counters that are incremented 7415 * when STORM has finished with the current request. 7416 * 7417 * memory for FCoE offloaded statistics are counted anyway, 7418 * even if they will not be sent. 7419 */ 7420 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + 7421 sizeof(struct per_pf_stats) + 7422 sizeof(struct fcoe_statistics_params) + 7423 sizeof(struct per_queue_stats) * num_queue_stats + 7424 sizeof(struct stats_counter); 7425 7426 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, 7427 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7428 7429 /* Set shortcuts */ 7430 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 7431 bp->fw_stats_req_mapping = bp->fw_stats_mapping; 7432 7433 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) 7434 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); 7435 7436 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 7437 bp->fw_stats_req_sz; 7438 return 0; 7439 7440 alloc_mem_err: 7441 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7442 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7443 BNX2X_ERR("Can't allocate memory\n"); 7444 return -ENOMEM; 7445 } 7446 7447 7448 int bnx2x_alloc_mem(struct bnx2x *bp) 7449 { 7450 int i, allocated, context_size; 7451 7452 #ifdef BCM_CNIC 7453 if (!CHIP_IS_E1x(bp)) 7454 /* size = the status block + ramrod buffers */ 7455 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7456 sizeof(struct host_hc_status_block_e2)); 7457 else 7458 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7459 sizeof(struct host_hc_status_block_e1x)); 7460 7461 /* allocate searcher T2 table */ 7462 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7463 #endif 7464 7465 7466 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7467 sizeof(struct host_sp_status_block)); 7468 7469 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7470 sizeof(struct bnx2x_slowpath)); 7471 7472 #ifdef BCM_CNIC 7473 /* write address to which L5 should insert its values */ 7474 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp; 7475 #endif 7476 7477 /* Allocated memory for FW statistics */ 7478 if (bnx2x_alloc_fw_stats_mem(bp)) 7479 goto alloc_mem_err; 7480 7481 /* Allocate memory for CDU context: 7482 * This memory is allocated separately and not in the generic ILT 7483 * functions because CDU differs in few aspects: 7484 * 1. There are multiple entities allocating memory for context - 7485 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 7486 * its own ILT lines. 7487 * 2. Since CDU page-size is not a single 4KB page (which is the case 7488 * for the other ILT clients), to be efficient we want to support 7489 * allocation of sub-page-size in the last entry. 7490 * 3. Context pointers are used by the driver to pass to FW / update 7491 * the context (for the other ILT clients the pointers are used just to 7492 * free the memory during unload). 7493 */ 7494 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7495 7496 for (i = 0, allocated = 0; allocated < context_size; i++) { 7497 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 7498 (context_size - allocated)); 7499 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 7500 &bp->context[i].cxt_mapping, 7501 bp->context[i].size); 7502 allocated += bp->context[i].size; 7503 } 7504 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7505 7506 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7507 goto alloc_mem_err; 7508 7509 /* Slow path ring */ 7510 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 7511 7512 /* EQ */ 7513 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 7514 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7515 7516 7517 /* fastpath */ 7518 /* need to be done at the end, since it's self adjusting to amount 7519 * of memory available for RSS queues 7520 */ 7521 if (bnx2x_alloc_fp_mem(bp)) 7522 goto alloc_mem_err; 7523 return 0; 7524 7525 alloc_mem_err: 7526 bnx2x_free_mem(bp); 7527 BNX2X_ERR("Can't allocate memory\n"); 7528 return -ENOMEM; 7529 } 7530 7531 /* 7532 * Init service functions 7533 */ 7534 7535 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 7536 struct bnx2x_vlan_mac_obj *obj, bool set, 7537 int mac_type, unsigned long *ramrod_flags) 7538 { 7539 int rc; 7540 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 7541 7542 memset(&ramrod_param, 0, sizeof(ramrod_param)); 7543 7544 /* Fill general parameters */ 7545 ramrod_param.vlan_mac_obj = obj; 7546 ramrod_param.ramrod_flags = *ramrod_flags; 7547 7548 /* Fill a user request section if needed */ 7549 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 7550 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 7551 7552 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 7553 7554 /* Set the command: ADD or DEL */ 7555 if (set) 7556 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 7557 else 7558 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 7559 } 7560 7561 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 7562 7563 if (rc == -EEXIST) { 7564 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 7565 /* do not treat adding same MAC as error */ 7566 rc = 0; 7567 } else if (rc < 0) 7568 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 7569 7570 return rc; 7571 } 7572 7573 int bnx2x_del_all_macs(struct bnx2x *bp, 7574 struct bnx2x_vlan_mac_obj *mac_obj, 7575 int mac_type, bool wait_for_comp) 7576 { 7577 int rc; 7578 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 7579 7580 /* Wait for completion of requested */ 7581 if (wait_for_comp) 7582 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7583 7584 /* Set the mac type of addresses we want to clear */ 7585 __set_bit(mac_type, &vlan_mac_flags); 7586 7587 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 7588 if (rc < 0) 7589 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 7590 7591 return rc; 7592 } 7593 7594 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 7595 { 7596 unsigned long ramrod_flags = 0; 7597 7598 #ifdef BCM_CNIC 7599 if (is_zero_ether_addr(bp->dev->dev_addr) && 7600 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7601 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7602 "Ignoring Zero MAC for STORAGE SD mode\n"); 7603 return 0; 7604 } 7605 #endif 7606 7607 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7608 7609 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7610 /* Eth MAC is set on RSS leading client (fp[0]) */ 7611 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj, 7612 set, BNX2X_ETH_MAC, &ramrod_flags); 7613 } 7614 7615 int bnx2x_setup_leading(struct bnx2x *bp) 7616 { 7617 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 7618 } 7619 7620 /** 7621 * bnx2x_set_int_mode - configure interrupt mode 7622 * 7623 * @bp: driver handle 7624 * 7625 * In case of MSI-X it will also try to enable MSI-X. 7626 */ 7627 void bnx2x_set_int_mode(struct bnx2x *bp) 7628 { 7629 switch (int_mode) { 7630 case INT_MODE_MSI: 7631 bnx2x_enable_msi(bp); 7632 /* falling through... */ 7633 case INT_MODE_INTx: 7634 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7635 BNX2X_DEV_INFO("set number of queues to 1\n"); 7636 break; 7637 default: 7638 /* if we can't use MSI-X we only need one fp, 7639 * so try to enable MSI-X with the requested number of fp's 7640 * and fallback to MSI or legacy INTx with one fp 7641 */ 7642 if (bnx2x_enable_msix(bp) || 7643 bp->flags & USING_SINGLE_MSIX_FLAG) { 7644 /* failed to enable multiple MSI-X */ 7645 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7646 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7647 7648 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7649 7650 /* Try to enable MSI */ 7651 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7652 !(bp->flags & DISABLE_MSI_FLAG)) 7653 bnx2x_enable_msi(bp); 7654 } 7655 break; 7656 } 7657 } 7658 7659 /* must be called prioir to any HW initializations */ 7660 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 7661 { 7662 return L2_ILT_LINES(bp); 7663 } 7664 7665 void bnx2x_ilt_set_info(struct bnx2x *bp) 7666 { 7667 struct ilt_client_info *ilt_client; 7668 struct bnx2x_ilt *ilt = BP_ILT(bp); 7669 u16 line = 0; 7670 7671 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 7672 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 7673 7674 /* CDU */ 7675 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 7676 ilt_client->client_num = ILT_CLIENT_CDU; 7677 ilt_client->page_size = CDU_ILT_PAGE_SZ; 7678 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7679 ilt_client->start = line; 7680 line += bnx2x_cid_ilt_lines(bp); 7681 #ifdef BCM_CNIC 7682 line += CNIC_ILT_LINES; 7683 #endif 7684 ilt_client->end = line - 1; 7685 7686 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7687 ilt_client->start, 7688 ilt_client->end, 7689 ilt_client->page_size, 7690 ilt_client->flags, 7691 ilog2(ilt_client->page_size >> 12)); 7692 7693 /* QM */ 7694 if (QM_INIT(bp->qm_cid_count)) { 7695 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 7696 ilt_client->client_num = ILT_CLIENT_QM; 7697 ilt_client->page_size = QM_ILT_PAGE_SZ; 7698 ilt_client->flags = 0; 7699 ilt_client->start = line; 7700 7701 /* 4 bytes for each cid */ 7702 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 7703 QM_ILT_PAGE_SZ); 7704 7705 ilt_client->end = line - 1; 7706 7707 DP(NETIF_MSG_IFUP, 7708 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7709 ilt_client->start, 7710 ilt_client->end, 7711 ilt_client->page_size, 7712 ilt_client->flags, 7713 ilog2(ilt_client->page_size >> 12)); 7714 7715 } 7716 /* SRC */ 7717 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 7718 #ifdef BCM_CNIC 7719 ilt_client->client_num = ILT_CLIENT_SRC; 7720 ilt_client->page_size = SRC_ILT_PAGE_SZ; 7721 ilt_client->flags = 0; 7722 ilt_client->start = line; 7723 line += SRC_ILT_LINES; 7724 ilt_client->end = line - 1; 7725 7726 DP(NETIF_MSG_IFUP, 7727 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7728 ilt_client->start, 7729 ilt_client->end, 7730 ilt_client->page_size, 7731 ilt_client->flags, 7732 ilog2(ilt_client->page_size >> 12)); 7733 7734 #else 7735 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7736 #endif 7737 7738 /* TM */ 7739 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7740 #ifdef BCM_CNIC 7741 ilt_client->client_num = ILT_CLIENT_TM; 7742 ilt_client->page_size = TM_ILT_PAGE_SZ; 7743 ilt_client->flags = 0; 7744 ilt_client->start = line; 7745 line += TM_ILT_LINES; 7746 ilt_client->end = line - 1; 7747 7748 DP(NETIF_MSG_IFUP, 7749 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7750 ilt_client->start, 7751 ilt_client->end, 7752 ilt_client->page_size, 7753 ilt_client->flags, 7754 ilog2(ilt_client->page_size >> 12)); 7755 7756 #else 7757 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7758 #endif 7759 BUG_ON(line > ILT_MAX_LINES); 7760 } 7761 7762 /** 7763 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 7764 * 7765 * @bp: driver handle 7766 * @fp: pointer to fastpath 7767 * @init_params: pointer to parameters structure 7768 * 7769 * parameters configured: 7770 * - HC configuration 7771 * - Queue's CDU context 7772 */ 7773 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7774 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7775 { 7776 7777 u8 cos; 7778 int cxt_index, cxt_offset; 7779 7780 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7781 if (!IS_FCOE_FP(fp)) { 7782 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7783 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 7784 7785 /* If HC is supporterd, enable host coalescing in the transition 7786 * to INIT state. 7787 */ 7788 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 7789 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 7790 7791 /* HC rate */ 7792 init_params->rx.hc_rate = bp->rx_ticks ? 7793 (1000000 / bp->rx_ticks) : 0; 7794 init_params->tx.hc_rate = bp->tx_ticks ? 7795 (1000000 / bp->tx_ticks) : 0; 7796 7797 /* FW SB ID */ 7798 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 7799 fp->fw_sb_id; 7800 7801 /* 7802 * CQ index among the SB indices: FCoE clients uses the default 7803 * SB, therefore it's different. 7804 */ 7805 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 7806 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 7807 } 7808 7809 /* set maximum number of COSs supported by this queue */ 7810 init_params->max_cos = fp->max_cos; 7811 7812 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 7813 fp->index, init_params->max_cos); 7814 7815 /* set the context pointers queue object */ 7816 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 7817 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 7818 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 7819 ILT_PAGE_CIDS); 7820 init_params->cxts[cos] = 7821 &bp->context[cxt_index].vcxt[cxt_offset].eth; 7822 } 7823 } 7824 7825 int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7826 struct bnx2x_queue_state_params *q_params, 7827 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 7828 int tx_index, bool leading) 7829 { 7830 memset(tx_only_params, 0, sizeof(*tx_only_params)); 7831 7832 /* Set the command */ 7833 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 7834 7835 /* Set tx-only QUEUE flags: don't zero statistics */ 7836 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 7837 7838 /* choose the index of the cid to send the slow path on */ 7839 tx_only_params->cid_index = tx_index; 7840 7841 /* Set general TX_ONLY_SETUP parameters */ 7842 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 7843 7844 /* Set Tx TX_ONLY_SETUP parameters */ 7845 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 7846 7847 DP(NETIF_MSG_IFUP, 7848 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 7849 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 7850 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 7851 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 7852 7853 /* send the ramrod */ 7854 return bnx2x_queue_state_change(bp, q_params); 7855 } 7856 7857 7858 /** 7859 * bnx2x_setup_queue - setup queue 7860 * 7861 * @bp: driver handle 7862 * @fp: pointer to fastpath 7863 * @leading: is leading 7864 * 7865 * This function performs 2 steps in a Queue state machine 7866 * actually: 1) RESET->INIT 2) INIT->SETUP 7867 */ 7868 7869 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7870 bool leading) 7871 { 7872 struct bnx2x_queue_state_params q_params = {NULL}; 7873 struct bnx2x_queue_setup_params *setup_params = 7874 &q_params.params.setup; 7875 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 7876 &q_params.params.tx_only; 7877 int rc; 7878 u8 tx_index; 7879 7880 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 7881 7882 /* reset IGU state skip FCoE L2 queue */ 7883 if (!IS_FCOE_FP(fp)) 7884 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7885 IGU_INT_ENABLE, 0); 7886 7887 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 7888 /* We want to wait for completion in this context */ 7889 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7890 7891 /* Prepare the INIT parameters */ 7892 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 7893 7894 /* Set the command */ 7895 q_params.cmd = BNX2X_Q_CMD_INIT; 7896 7897 /* Change the state to INIT */ 7898 rc = bnx2x_queue_state_change(bp, &q_params); 7899 if (rc) { 7900 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 7901 return rc; 7902 } 7903 7904 DP(NETIF_MSG_IFUP, "init complete\n"); 7905 7906 7907 /* Now move the Queue to the SETUP state... */ 7908 memset(setup_params, 0, sizeof(*setup_params)); 7909 7910 /* Set QUEUE flags */ 7911 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 7912 7913 /* Set general SETUP parameters */ 7914 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 7915 FIRST_TX_COS_INDEX); 7916 7917 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 7918 &setup_params->rxq_params); 7919 7920 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 7921 FIRST_TX_COS_INDEX); 7922 7923 /* Set the command */ 7924 q_params.cmd = BNX2X_Q_CMD_SETUP; 7925 7926 /* Change the state to SETUP */ 7927 rc = bnx2x_queue_state_change(bp, &q_params); 7928 if (rc) { 7929 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 7930 return rc; 7931 } 7932 7933 /* loop through the relevant tx-only indices */ 7934 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7935 tx_index < fp->max_cos; 7936 tx_index++) { 7937 7938 /* prepare and send tx-only ramrod*/ 7939 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 7940 tx_only_params, tx_index, leading); 7941 if (rc) { 7942 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 7943 fp->index, tx_index); 7944 return rc; 7945 } 7946 } 7947 7948 return rc; 7949 } 7950 7951 static int bnx2x_stop_queue(struct bnx2x *bp, int index) 7952 { 7953 struct bnx2x_fastpath *fp = &bp->fp[index]; 7954 struct bnx2x_fp_txdata *txdata; 7955 struct bnx2x_queue_state_params q_params = {NULL}; 7956 int rc, tx_index; 7957 7958 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7959 7960 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 7961 /* We want to wait for completion in this context */ 7962 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7963 7964 7965 /* close tx-only connections */ 7966 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 7967 tx_index < fp->max_cos; 7968 tx_index++){ 7969 7970 /* ascertain this is a normal queue*/ 7971 txdata = fp->txdata_ptr[tx_index]; 7972 7973 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7974 txdata->txq_index); 7975 7976 /* send halt terminate on tx-only connection */ 7977 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7978 memset(&q_params.params.terminate, 0, 7979 sizeof(q_params.params.terminate)); 7980 q_params.params.terminate.cid_index = tx_index; 7981 7982 rc = bnx2x_queue_state_change(bp, &q_params); 7983 if (rc) 7984 return rc; 7985 7986 /* send halt terminate on tx-only connection */ 7987 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 7988 memset(&q_params.params.cfc_del, 0, 7989 sizeof(q_params.params.cfc_del)); 7990 q_params.params.cfc_del.cid_index = tx_index; 7991 rc = bnx2x_queue_state_change(bp, &q_params); 7992 if (rc) 7993 return rc; 7994 } 7995 /* Stop the primary connection: */ 7996 /* ...halt the connection */ 7997 q_params.cmd = BNX2X_Q_CMD_HALT; 7998 rc = bnx2x_queue_state_change(bp, &q_params); 7999 if (rc) 8000 return rc; 8001 8002 /* ...terminate the connection */ 8003 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8004 memset(&q_params.params.terminate, 0, 8005 sizeof(q_params.params.terminate)); 8006 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8007 rc = bnx2x_queue_state_change(bp, &q_params); 8008 if (rc) 8009 return rc; 8010 /* ...delete cfc entry */ 8011 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8012 memset(&q_params.params.cfc_del, 0, 8013 sizeof(q_params.params.cfc_del)); 8014 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8015 return bnx2x_queue_state_change(bp, &q_params); 8016 } 8017 8018 8019 static void bnx2x_reset_func(struct bnx2x *bp) 8020 { 8021 int port = BP_PORT(bp); 8022 int func = BP_FUNC(bp); 8023 int i; 8024 8025 /* Disable the function in the FW */ 8026 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8027 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8028 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8029 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8030 8031 /* FP SBs */ 8032 for_each_eth_queue(bp, i) { 8033 struct bnx2x_fastpath *fp = &bp->fp[i]; 8034 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8035 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8036 SB_DISABLED); 8037 } 8038 8039 #ifdef BCM_CNIC 8040 /* CNIC SB */ 8041 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8042 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 8043 SB_DISABLED); 8044 #endif 8045 /* SP SB */ 8046 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8047 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8048 SB_DISABLED); 8049 8050 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8051 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8052 0); 8053 8054 /* Configure IGU */ 8055 if (bp->common.int_block == INT_BLOCK_HC) { 8056 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8057 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8058 } else { 8059 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8060 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8061 } 8062 8063 #ifdef BCM_CNIC 8064 /* Disable Timer scan */ 8065 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8066 /* 8067 * Wait for at least 10ms and up to 2 second for the timers scan to 8068 * complete 8069 */ 8070 for (i = 0; i < 200; i++) { 8071 msleep(10); 8072 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8073 break; 8074 } 8075 #endif 8076 /* Clear ILT */ 8077 bnx2x_clear_func_ilt(bp, func); 8078 8079 /* Timers workaround bug for E2: if this is vnic-3, 8080 * we need to set the entire ilt range for this timers. 8081 */ 8082 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8083 struct ilt_client_info ilt_cli; 8084 /* use dummy TM client */ 8085 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8086 ilt_cli.start = 0; 8087 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8088 ilt_cli.client_num = ILT_CLIENT_TM; 8089 8090 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8091 } 8092 8093 /* this assumes that reset_port() called before reset_func()*/ 8094 if (!CHIP_IS_E1x(bp)) 8095 bnx2x_pf_disable(bp); 8096 8097 bp->dmae_ready = 0; 8098 } 8099 8100 static void bnx2x_reset_port(struct bnx2x *bp) 8101 { 8102 int port = BP_PORT(bp); 8103 u32 val; 8104 8105 /* Reset physical Link */ 8106 bnx2x__link_reset(bp); 8107 8108 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8109 8110 /* Do not rcv packets to BRB */ 8111 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8112 /* Do not direct rcv packets that are not for MCP to the BRB */ 8113 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8114 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8115 8116 /* Configure AEU */ 8117 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8118 8119 msleep(100); 8120 /* Check for BRB port occupancy */ 8121 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8122 if (val) 8123 DP(NETIF_MSG_IFDOWN, 8124 "BRB1 is not empty %d blocks are occupied\n", val); 8125 8126 /* TODO: Close Doorbell port? */ 8127 } 8128 8129 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8130 { 8131 struct bnx2x_func_state_params func_params = {NULL}; 8132 8133 /* Prepare parameters for function state transitions */ 8134 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8135 8136 func_params.f_obj = &bp->func_obj; 8137 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8138 8139 func_params.params.hw_init.load_phase = load_code; 8140 8141 return bnx2x_func_state_change(bp, &func_params); 8142 } 8143 8144 static int bnx2x_func_stop(struct bnx2x *bp) 8145 { 8146 struct bnx2x_func_state_params func_params = {NULL}; 8147 int rc; 8148 8149 /* Prepare parameters for function state transitions */ 8150 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8151 func_params.f_obj = &bp->func_obj; 8152 func_params.cmd = BNX2X_F_CMD_STOP; 8153 8154 /* 8155 * Try to stop the function the 'good way'. If fails (in case 8156 * of a parity error during bnx2x_chip_cleanup()) and we are 8157 * not in a debug mode, perform a state transaction in order to 8158 * enable further HW_RESET transaction. 8159 */ 8160 rc = bnx2x_func_state_change(bp, &func_params); 8161 if (rc) { 8162 #ifdef BNX2X_STOP_ON_ERROR 8163 return rc; 8164 #else 8165 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8166 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8167 return bnx2x_func_state_change(bp, &func_params); 8168 #endif 8169 } 8170 8171 return 0; 8172 } 8173 8174 /** 8175 * bnx2x_send_unload_req - request unload mode from the MCP. 8176 * 8177 * @bp: driver handle 8178 * @unload_mode: requested function's unload mode 8179 * 8180 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8181 */ 8182 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8183 { 8184 u32 reset_code = 0; 8185 int port = BP_PORT(bp); 8186 8187 /* Select the UNLOAD request mode */ 8188 if (unload_mode == UNLOAD_NORMAL) 8189 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8190 8191 else if (bp->flags & NO_WOL_FLAG) 8192 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8193 8194 else if (bp->wol) { 8195 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8196 u8 *mac_addr = bp->dev->dev_addr; 8197 u32 val; 8198 u16 pmc; 8199 8200 /* The mac address is written to entries 1-4 to 8201 * preserve entry 0 which is used by the PMF 8202 */ 8203 u8 entry = (BP_VN(bp) + 1)*8; 8204 8205 val = (mac_addr[0] << 8) | mac_addr[1]; 8206 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8207 8208 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8209 (mac_addr[4] << 8) | mac_addr[5]; 8210 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8211 8212 /* Enable the PME and clear the status */ 8213 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8214 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8215 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8216 8217 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8218 8219 } else 8220 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8221 8222 /* Send the request to the MCP */ 8223 if (!BP_NOMCP(bp)) 8224 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8225 else { 8226 int path = BP_PATH(bp); 8227 8228 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8229 path, load_count[path][0], load_count[path][1], 8230 load_count[path][2]); 8231 load_count[path][0]--; 8232 load_count[path][1 + port]--; 8233 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8234 path, load_count[path][0], load_count[path][1], 8235 load_count[path][2]); 8236 if (load_count[path][0] == 0) 8237 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8238 else if (load_count[path][1 + port] == 0) 8239 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8240 else 8241 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8242 } 8243 8244 return reset_code; 8245 } 8246 8247 /** 8248 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8249 * 8250 * @bp: driver handle 8251 * @keep_link: true iff link should be kept up 8252 */ 8253 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 8254 { 8255 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 8256 8257 /* Report UNLOAD_DONE to MCP */ 8258 if (!BP_NOMCP(bp)) 8259 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 8260 } 8261 8262 static int bnx2x_func_wait_started(struct bnx2x *bp) 8263 { 8264 int tout = 50; 8265 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8266 8267 if (!bp->port.pmf) 8268 return 0; 8269 8270 /* 8271 * (assumption: No Attention from MCP at this stage) 8272 * PMF probably in the middle of TXdisable/enable transaction 8273 * 1. Sync IRS for default SB 8274 * 2. Sync SP queue - this guarantes us that attention handling started 8275 * 3. Wait, that TXdisable/enable transaction completes 8276 * 8277 * 1+2 guranty that if DCBx attention was scheduled it already changed 8278 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy 8279 * received complettion for the transaction the state is TX_STOPPED. 8280 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8281 * transaction. 8282 */ 8283 8284 /* make sure default SB ISR is done */ 8285 if (msix) 8286 synchronize_irq(bp->msix_table[0].vector); 8287 else 8288 synchronize_irq(bp->pdev->irq); 8289 8290 flush_workqueue(bnx2x_wq); 8291 8292 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8293 BNX2X_F_STATE_STARTED && tout--) 8294 msleep(20); 8295 8296 if (bnx2x_func_get_state(bp, &bp->func_obj) != 8297 BNX2X_F_STATE_STARTED) { 8298 #ifdef BNX2X_STOP_ON_ERROR 8299 BNX2X_ERR("Wrong function state\n"); 8300 return -EBUSY; 8301 #else 8302 /* 8303 * Failed to complete the transaction in a "good way" 8304 * Force both transactions with CLR bit 8305 */ 8306 struct bnx2x_func_state_params func_params = {NULL}; 8307 8308 DP(NETIF_MSG_IFDOWN, 8309 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8310 8311 func_params.f_obj = &bp->func_obj; 8312 __set_bit(RAMROD_DRV_CLR_ONLY, 8313 &func_params.ramrod_flags); 8314 8315 /* STARTED-->TX_ST0PPED */ 8316 func_params.cmd = BNX2X_F_CMD_TX_STOP; 8317 bnx2x_func_state_change(bp, &func_params); 8318 8319 /* TX_ST0PPED-->STARTED */ 8320 func_params.cmd = BNX2X_F_CMD_TX_START; 8321 return bnx2x_func_state_change(bp, &func_params); 8322 #endif 8323 } 8324 8325 return 0; 8326 } 8327 8328 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 8329 { 8330 int port = BP_PORT(bp); 8331 int i, rc = 0; 8332 u8 cos; 8333 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 8334 u32 reset_code; 8335 8336 /* Wait until tx fastpath tasks complete */ 8337 for_each_tx_queue(bp, i) { 8338 struct bnx2x_fastpath *fp = &bp->fp[i]; 8339 8340 for_each_cos_in_tx_queue(fp, cos) 8341 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 8342 #ifdef BNX2X_STOP_ON_ERROR 8343 if (rc) 8344 return; 8345 #endif 8346 } 8347 8348 /* Give HW time to discard old tx messages */ 8349 usleep_range(1000, 1000); 8350 8351 /* Clean all ETH MACs */ 8352 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 8353 false); 8354 if (rc < 0) 8355 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8356 8357 /* Clean up UC list */ 8358 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 8359 true); 8360 if (rc < 0) 8361 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8362 rc); 8363 8364 /* Disable LLH */ 8365 if (!CHIP_IS_E1(bp)) 8366 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8367 8368 /* Set "drop all" (stop Rx). 8369 * We need to take a netif_addr_lock() here in order to prevent 8370 * a race between the completion code and this code. 8371 */ 8372 netif_addr_lock_bh(bp->dev); 8373 /* Schedule the rx_mode command */ 8374 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 8375 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 8376 else 8377 bnx2x_set_storm_rx_mode(bp); 8378 8379 /* Cleanup multicast configuration */ 8380 rparam.mcast_obj = &bp->mcast_obj; 8381 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 8382 if (rc < 0) 8383 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 8384 8385 netif_addr_unlock_bh(bp->dev); 8386 8387 8388 8389 /* 8390 * Send the UNLOAD_REQUEST to the MCP. This will return if 8391 * this function should perform FUNC, PORT or COMMON HW 8392 * reset. 8393 */ 8394 reset_code = bnx2x_send_unload_req(bp, unload_mode); 8395 8396 /* 8397 * (assumption: No Attention from MCP at this stage) 8398 * PMF probably in the middle of TXdisable/enable transaction 8399 */ 8400 rc = bnx2x_func_wait_started(bp); 8401 if (rc) { 8402 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 8403 #ifdef BNX2X_STOP_ON_ERROR 8404 return; 8405 #endif 8406 } 8407 8408 /* Close multi and leading connections 8409 * Completions for ramrods are collected in a synchronous way 8410 */ 8411 for_each_queue(bp, i) 8412 if (bnx2x_stop_queue(bp, i)) 8413 #ifdef BNX2X_STOP_ON_ERROR 8414 return; 8415 #else 8416 goto unload_error; 8417 #endif 8418 /* If SP settings didn't get completed so far - something 8419 * very wrong has happen. 8420 */ 8421 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 8422 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 8423 8424 #ifndef BNX2X_STOP_ON_ERROR 8425 unload_error: 8426 #endif 8427 rc = bnx2x_func_stop(bp); 8428 if (rc) { 8429 BNX2X_ERR("Function stop failed!\n"); 8430 #ifdef BNX2X_STOP_ON_ERROR 8431 return; 8432 #endif 8433 } 8434 8435 /* Disable HW interrupts, NAPI */ 8436 bnx2x_netif_stop(bp, 1); 8437 /* Delete all NAPI objects */ 8438 bnx2x_del_all_napi(bp); 8439 8440 /* Release IRQs */ 8441 bnx2x_free_irq(bp); 8442 8443 /* Reset the chip */ 8444 rc = bnx2x_reset_hw(bp, reset_code); 8445 if (rc) 8446 BNX2X_ERR("HW_RESET failed\n"); 8447 8448 8449 /* Report UNLOAD_DONE to MCP */ 8450 bnx2x_send_unload_done(bp, keep_link); 8451 } 8452 8453 void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8454 { 8455 u32 val; 8456 8457 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 8458 8459 if (CHIP_IS_E1(bp)) { 8460 int port = BP_PORT(bp); 8461 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8462 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8463 8464 val = REG_RD(bp, addr); 8465 val &= ~(0x300); 8466 REG_WR(bp, addr, val); 8467 } else { 8468 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 8469 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 8470 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 8471 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 8472 } 8473 } 8474 8475 /* Close gates #2, #3 and #4: */ 8476 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 8477 { 8478 u32 val; 8479 8480 /* Gates #2 and #4a are closed/opened for "not E1" only */ 8481 if (!CHIP_IS_E1(bp)) { 8482 /* #4 */ 8483 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 8484 /* #2 */ 8485 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 8486 } 8487 8488 /* #3 */ 8489 if (CHIP_IS_E1x(bp)) { 8490 /* Prevent interrupts from HC on both ports */ 8491 val = REG_RD(bp, HC_REG_CONFIG_1); 8492 REG_WR(bp, HC_REG_CONFIG_1, 8493 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 8494 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 8495 8496 val = REG_RD(bp, HC_REG_CONFIG_0); 8497 REG_WR(bp, HC_REG_CONFIG_0, 8498 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 8499 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 8500 } else { 8501 /* Prevent incomming interrupts in IGU */ 8502 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 8503 8504 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 8505 (!close) ? 8506 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 8507 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 8508 } 8509 8510 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 8511 close ? "closing" : "opening"); 8512 mmiowb(); 8513 } 8514 8515 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 8516 8517 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 8518 { 8519 /* Do some magic... */ 8520 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8521 *magic_val = val & SHARED_MF_CLP_MAGIC; 8522 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 8523 } 8524 8525 /** 8526 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 8527 * 8528 * @bp: driver handle 8529 * @magic_val: old value of the `magic' bit. 8530 */ 8531 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 8532 { 8533 /* Restore the `magic' bit value... */ 8534 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 8535 MF_CFG_WR(bp, shared_mf_config.clp_mb, 8536 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 8537 } 8538 8539 /** 8540 * bnx2x_reset_mcp_prep - prepare for MCP reset. 8541 * 8542 * @bp: driver handle 8543 * @magic_val: old value of 'magic' bit. 8544 * 8545 * Takes care of CLP configurations. 8546 */ 8547 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 8548 { 8549 u32 shmem; 8550 u32 validity_offset; 8551 8552 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 8553 8554 /* Set `magic' bit in order to save MF config */ 8555 if (!CHIP_IS_E1(bp)) 8556 bnx2x_clp_reset_prep(bp, magic_val); 8557 8558 /* Get shmem offset */ 8559 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8560 validity_offset = offsetof(struct shmem_region, validity_map[0]); 8561 8562 /* Clear validity map flags */ 8563 if (shmem > 0) 8564 REG_WR(bp, shmem + validity_offset, 0); 8565 } 8566 8567 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 8568 #define MCP_ONE_TIMEOUT 100 /* 100 ms */ 8569 8570 /** 8571 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 8572 * 8573 * @bp: driver handle 8574 */ 8575 static void bnx2x_mcp_wait_one(struct bnx2x *bp) 8576 { 8577 /* special handling for emulation and FPGA, 8578 wait 10 times longer */ 8579 if (CHIP_REV_IS_SLOW(bp)) 8580 msleep(MCP_ONE_TIMEOUT*10); 8581 else 8582 msleep(MCP_ONE_TIMEOUT); 8583 } 8584 8585 /* 8586 * initializes bp->common.shmem_base and waits for validity signature to appear 8587 */ 8588 static int bnx2x_init_shmem(struct bnx2x *bp) 8589 { 8590 int cnt = 0; 8591 u32 val = 0; 8592 8593 do { 8594 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8595 if (bp->common.shmem_base) { 8596 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 8597 if (val & SHR_MEM_VALIDITY_MB) 8598 return 0; 8599 } 8600 8601 bnx2x_mcp_wait_one(bp); 8602 8603 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 8604 8605 BNX2X_ERR("BAD MCP validity signature\n"); 8606 8607 return -ENODEV; 8608 } 8609 8610 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 8611 { 8612 int rc = bnx2x_init_shmem(bp); 8613 8614 /* Restore the `magic' bit value */ 8615 if (!CHIP_IS_E1(bp)) 8616 bnx2x_clp_reset_done(bp, magic_val); 8617 8618 return rc; 8619 } 8620 8621 static void bnx2x_pxp_prep(struct bnx2x *bp) 8622 { 8623 if (!CHIP_IS_E1(bp)) { 8624 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 8625 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 8626 mmiowb(); 8627 } 8628 } 8629 8630 /* 8631 * Reset the whole chip except for: 8632 * - PCIE core 8633 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 8634 * one reset bit) 8635 * - IGU 8636 * - MISC (including AEU) 8637 * - GRC 8638 * - RBCN, RBCP 8639 */ 8640 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 8641 { 8642 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 8643 u32 global_bits2, stay_reset2; 8644 8645 /* 8646 * Bits that have to be set in reset_mask2 if we want to reset 'global' 8647 * (per chip) blocks. 8648 */ 8649 global_bits2 = 8650 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 8651 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 8652 8653 /* Don't reset the following blocks */ 8654 not_reset_mask1 = 8655 MISC_REGISTERS_RESET_REG_1_RST_HC | 8656 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 8657 MISC_REGISTERS_RESET_REG_1_RST_PXP; 8658 8659 not_reset_mask2 = 8660 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 8661 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 8662 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 8663 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 8664 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 8665 MISC_REGISTERS_RESET_REG_2_RST_GRC | 8666 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 8667 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 8668 MISC_REGISTERS_RESET_REG_2_RST_ATC | 8669 MISC_REGISTERS_RESET_REG_2_PGLC; 8670 8671 /* 8672 * Keep the following blocks in reset: 8673 * - all xxMACs are handled by the bnx2x_link code. 8674 */ 8675 stay_reset2 = 8676 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 8677 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 8678 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 8679 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 8680 MISC_REGISTERS_RESET_REG_2_UMAC0 | 8681 MISC_REGISTERS_RESET_REG_2_UMAC1 | 8682 MISC_REGISTERS_RESET_REG_2_XMAC | 8683 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 8684 8685 /* Full reset masks according to the chip */ 8686 reset_mask1 = 0xffffffff; 8687 8688 if (CHIP_IS_E1(bp)) 8689 reset_mask2 = 0xffff; 8690 else if (CHIP_IS_E1H(bp)) 8691 reset_mask2 = 0x1ffff; 8692 else if (CHIP_IS_E2(bp)) 8693 reset_mask2 = 0xfffff; 8694 else /* CHIP_IS_E3 */ 8695 reset_mask2 = 0x3ffffff; 8696 8697 /* Don't reset global blocks unless we need to */ 8698 if (!global) 8699 reset_mask2 &= ~global_bits2; 8700 8701 /* 8702 * In case of attention in the QM, we need to reset PXP 8703 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 8704 * because otherwise QM reset would release 'close the gates' shortly 8705 * before resetting the PXP, then the PSWRQ would send a write 8706 * request to PGLUE. Then when PXP is reset, PGLUE would try to 8707 * read the payload data from PSWWR, but PSWWR would not 8708 * respond. The write queue in PGLUE would stuck, dmae commands 8709 * would not return. Therefore it's important to reset the second 8710 * reset register (containing the 8711 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 8712 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 8713 * bit). 8714 */ 8715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 8716 reset_mask2 & (~not_reset_mask2)); 8717 8718 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 8719 reset_mask1 & (~not_reset_mask1)); 8720 8721 barrier(); 8722 mmiowb(); 8723 8724 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 8725 reset_mask2 & (~stay_reset2)); 8726 8727 barrier(); 8728 mmiowb(); 8729 8730 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 8731 mmiowb(); 8732 } 8733 8734 /** 8735 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 8736 * It should get cleared in no more than 1s. 8737 * 8738 * @bp: driver handle 8739 * 8740 * It should get cleared in no more than 1s. Returns 0 if 8741 * pending writes bit gets cleared. 8742 */ 8743 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 8744 { 8745 u32 cnt = 1000; 8746 u32 pend_bits = 0; 8747 8748 do { 8749 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 8750 8751 if (pend_bits == 0) 8752 break; 8753 8754 usleep_range(1000, 1000); 8755 } while (cnt-- > 0); 8756 8757 if (cnt <= 0) { 8758 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 8759 pend_bits); 8760 return -EBUSY; 8761 } 8762 8763 return 0; 8764 } 8765 8766 static int bnx2x_process_kill(struct bnx2x *bp, bool global) 8767 { 8768 int cnt = 1000; 8769 u32 val = 0; 8770 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 8771 8772 8773 /* Empty the Tetris buffer, wait for 1s */ 8774 do { 8775 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 8776 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 8777 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 8778 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 8779 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 8780 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 8781 ((port_is_idle_0 & 0x1) == 0x1) && 8782 ((port_is_idle_1 & 0x1) == 0x1) && 8783 (pgl_exp_rom2 == 0xffffffff)) 8784 break; 8785 usleep_range(1000, 1000); 8786 } while (cnt-- > 0); 8787 8788 if (cnt <= 0) { 8789 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 8790 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 8791 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 8792 pgl_exp_rom2); 8793 return -EAGAIN; 8794 } 8795 8796 barrier(); 8797 8798 /* Close gates #2, #3 and #4 */ 8799 bnx2x_set_234_gates(bp, true); 8800 8801 /* Poll for IGU VQs for 57712 and newer chips */ 8802 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 8803 return -EAGAIN; 8804 8805 8806 /* TBD: Indicate that "process kill" is in progress to MCP */ 8807 8808 /* Clear "unprepared" bit */ 8809 REG_WR(bp, MISC_REG_UNPREPARED, 0); 8810 barrier(); 8811 8812 /* Make sure all is written to the chip before the reset */ 8813 mmiowb(); 8814 8815 /* Wait for 1ms to empty GLUE and PCI-E core queues, 8816 * PSWHST, GRC and PSWRD Tetris buffer. 8817 */ 8818 usleep_range(1000, 1000); 8819 8820 /* Prepare to chip reset: */ 8821 /* MCP */ 8822 if (global) 8823 bnx2x_reset_mcp_prep(bp, &val); 8824 8825 /* PXP */ 8826 bnx2x_pxp_prep(bp); 8827 barrier(); 8828 8829 /* reset the chip */ 8830 bnx2x_process_kill_chip_reset(bp, global); 8831 barrier(); 8832 8833 /* Recover after reset: */ 8834 /* MCP */ 8835 if (global && bnx2x_reset_mcp_comp(bp, val)) 8836 return -EAGAIN; 8837 8838 /* TBD: Add resetting the NO_MCP mode DB here */ 8839 8840 /* PXP */ 8841 bnx2x_pxp_prep(bp); 8842 8843 /* Open the gates #2, #3 and #4 */ 8844 bnx2x_set_234_gates(bp, false); 8845 8846 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 8847 * reset state, re-enable attentions. */ 8848 8849 return 0; 8850 } 8851 8852 int bnx2x_leader_reset(struct bnx2x *bp) 8853 { 8854 int rc = 0; 8855 bool global = bnx2x_reset_is_global(bp); 8856 u32 load_code; 8857 8858 /* if not going to reset MCP - load "fake" driver to reset HW while 8859 * driver is owner of the HW 8860 */ 8861 if (!global && !BP_NOMCP(bp)) { 8862 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 8863 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 8864 if (!load_code) { 8865 BNX2X_ERR("MCP response failure, aborting\n"); 8866 rc = -EAGAIN; 8867 goto exit_leader_reset; 8868 } 8869 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 8870 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 8871 BNX2X_ERR("MCP unexpected resp, aborting\n"); 8872 rc = -EAGAIN; 8873 goto exit_leader_reset2; 8874 } 8875 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 8876 if (!load_code) { 8877 BNX2X_ERR("MCP response failure, aborting\n"); 8878 rc = -EAGAIN; 8879 goto exit_leader_reset2; 8880 } 8881 } 8882 8883 /* Try to recover after the failure */ 8884 if (bnx2x_process_kill(bp, global)) { 8885 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 8886 BP_PATH(bp)); 8887 rc = -EAGAIN; 8888 goto exit_leader_reset2; 8889 } 8890 8891 /* 8892 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 8893 * state. 8894 */ 8895 bnx2x_set_reset_done(bp); 8896 if (global) 8897 bnx2x_clear_reset_global(bp); 8898 8899 exit_leader_reset2: 8900 /* unload "fake driver" if it was loaded */ 8901 if (!global && !BP_NOMCP(bp)) { 8902 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 8903 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8904 } 8905 exit_leader_reset: 8906 bp->is_leader = 0; 8907 bnx2x_release_leader_lock(bp); 8908 smp_mb(); 8909 return rc; 8910 } 8911 8912 static void bnx2x_recovery_failed(struct bnx2x *bp) 8913 { 8914 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8915 8916 /* Disconnect this device */ 8917 netif_device_detach(bp->dev); 8918 8919 /* 8920 * Block ifup for all function on this engine until "process kill" 8921 * or power cycle. 8922 */ 8923 bnx2x_set_reset_in_progress(bp); 8924 8925 /* Shut down the power */ 8926 bnx2x_set_power_state(bp, PCI_D3hot); 8927 8928 bp->recovery_state = BNX2X_RECOVERY_FAILED; 8929 8930 smp_mb(); 8931 } 8932 8933 /* 8934 * Assumption: runs under rtnl lock. This together with the fact 8935 * that it's called only from bnx2x_sp_rtnl() ensure that it 8936 * will never be called when netif_running(bp->dev) is false. 8937 */ 8938 static void bnx2x_parity_recover(struct bnx2x *bp) 8939 { 8940 bool global = false; 8941 u32 error_recovered, error_unrecovered; 8942 bool is_parity; 8943 8944 DP(NETIF_MSG_HW, "Handling parity\n"); 8945 while (1) { 8946 switch (bp->recovery_state) { 8947 case BNX2X_RECOVERY_INIT: 8948 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 8949 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 8950 WARN_ON(!is_parity); 8951 8952 /* Try to get a LEADER_LOCK HW lock */ 8953 if (bnx2x_trylock_leader_lock(bp)) { 8954 bnx2x_set_reset_in_progress(bp); 8955 /* 8956 * Check if there is a global attention and if 8957 * there was a global attention, set the global 8958 * reset bit. 8959 */ 8960 8961 if (global) 8962 bnx2x_set_reset_global(bp); 8963 8964 bp->is_leader = 1; 8965 } 8966 8967 /* Stop the driver */ 8968 /* If interface has been removed - break */ 8969 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 8970 return; 8971 8972 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8973 8974 /* Ensure "is_leader", MCP command sequence and 8975 * "recovery_state" update values are seen on other 8976 * CPUs. 8977 */ 8978 smp_mb(); 8979 break; 8980 8981 case BNX2X_RECOVERY_WAIT: 8982 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 8983 if (bp->is_leader) { 8984 int other_engine = BP_PATH(bp) ? 0 : 1; 8985 bool other_load_status = 8986 bnx2x_get_load_status(bp, other_engine); 8987 bool load_status = 8988 bnx2x_get_load_status(bp, BP_PATH(bp)); 8989 global = bnx2x_reset_is_global(bp); 8990 8991 /* 8992 * In case of a parity in a global block, let 8993 * the first leader that performs a 8994 * leader_reset() reset the global blocks in 8995 * order to clear global attentions. Otherwise 8996 * the the gates will remain closed for that 8997 * engine. 8998 */ 8999 if (load_status || 9000 (global && other_load_status)) { 9001 /* Wait until all other functions get 9002 * down. 9003 */ 9004 schedule_delayed_work(&bp->sp_rtnl_task, 9005 HZ/10); 9006 return; 9007 } else { 9008 /* If all other functions got down - 9009 * try to bring the chip back to 9010 * normal. In any case it's an exit 9011 * point for a leader. 9012 */ 9013 if (bnx2x_leader_reset(bp)) { 9014 bnx2x_recovery_failed(bp); 9015 return; 9016 } 9017 9018 /* If we are here, means that the 9019 * leader has succeeded and doesn't 9020 * want to be a leader any more. Try 9021 * to continue as a none-leader. 9022 */ 9023 break; 9024 } 9025 } else { /* non-leader */ 9026 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 9027 /* Try to get a LEADER_LOCK HW lock as 9028 * long as a former leader may have 9029 * been unloaded by the user or 9030 * released a leadership by another 9031 * reason. 9032 */ 9033 if (bnx2x_trylock_leader_lock(bp)) { 9034 /* I'm a leader now! Restart a 9035 * switch case. 9036 */ 9037 bp->is_leader = 1; 9038 break; 9039 } 9040 9041 schedule_delayed_work(&bp->sp_rtnl_task, 9042 HZ/10); 9043 return; 9044 9045 } else { 9046 /* 9047 * If there was a global attention, wait 9048 * for it to be cleared. 9049 */ 9050 if (bnx2x_reset_is_global(bp)) { 9051 schedule_delayed_work( 9052 &bp->sp_rtnl_task, 9053 HZ/10); 9054 return; 9055 } 9056 9057 error_recovered = 9058 bp->eth_stats.recoverable_error; 9059 error_unrecovered = 9060 bp->eth_stats.unrecoverable_error; 9061 bp->recovery_state = 9062 BNX2X_RECOVERY_NIC_LOADING; 9063 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9064 error_unrecovered++; 9065 netdev_err(bp->dev, 9066 "Recovery failed. Power cycle needed\n"); 9067 /* Disconnect this device */ 9068 netif_device_detach(bp->dev); 9069 /* Shut down the power */ 9070 bnx2x_set_power_state( 9071 bp, PCI_D3hot); 9072 smp_mb(); 9073 } else { 9074 bp->recovery_state = 9075 BNX2X_RECOVERY_DONE; 9076 error_recovered++; 9077 smp_mb(); 9078 } 9079 bp->eth_stats.recoverable_error = 9080 error_recovered; 9081 bp->eth_stats.unrecoverable_error = 9082 error_unrecovered; 9083 9084 return; 9085 } 9086 } 9087 default: 9088 return; 9089 } 9090 } 9091 } 9092 9093 static int bnx2x_close(struct net_device *dev); 9094 9095 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9096 * scheduled on a general queue in order to prevent a dead lock. 9097 */ 9098 static void bnx2x_sp_rtnl_task(struct work_struct *work) 9099 { 9100 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9101 9102 rtnl_lock(); 9103 9104 if (!netif_running(bp->dev)) 9105 goto sp_rtnl_exit; 9106 9107 /* if stop on error is defined no recovery flows should be executed */ 9108 #ifdef BNX2X_STOP_ON_ERROR 9109 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9110 "you will need to reboot when done\n"); 9111 goto sp_rtnl_not_reset; 9112 #endif 9113 9114 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9115 /* 9116 * Clear all pending SP commands as we are going to reset the 9117 * function anyway. 9118 */ 9119 bp->sp_rtnl_state = 0; 9120 smp_mb(); 9121 9122 bnx2x_parity_recover(bp); 9123 9124 goto sp_rtnl_exit; 9125 } 9126 9127 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9128 /* 9129 * Clear all pending SP commands as we are going to reset the 9130 * function anyway. 9131 */ 9132 bp->sp_rtnl_state = 0; 9133 smp_mb(); 9134 9135 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 9136 bnx2x_nic_load(bp, LOAD_NORMAL); 9137 9138 goto sp_rtnl_exit; 9139 } 9140 #ifdef BNX2X_STOP_ON_ERROR 9141 sp_rtnl_not_reset: 9142 #endif 9143 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9144 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9145 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9146 bnx2x_after_function_update(bp); 9147 /* 9148 * in case of fan failure we need to reset id if the "stop on error" 9149 * debug flag is set, since we trying to prevent permanent overheating 9150 * damage 9151 */ 9152 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9153 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9154 netif_device_detach(bp->dev); 9155 bnx2x_close(bp->dev); 9156 } 9157 9158 sp_rtnl_exit: 9159 rtnl_unlock(); 9160 } 9161 9162 /* end of nic load/unload */ 9163 9164 static void bnx2x_period_task(struct work_struct *work) 9165 { 9166 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9167 9168 if (!netif_running(bp->dev)) 9169 goto period_task_exit; 9170 9171 if (CHIP_REV_IS_SLOW(bp)) { 9172 BNX2X_ERR("period task called on emulation, ignoring\n"); 9173 goto period_task_exit; 9174 } 9175 9176 bnx2x_acquire_phy_lock(bp); 9177 /* 9178 * The barrier is needed to ensure the ordering between the writing to 9179 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9180 * the reading here. 9181 */ 9182 smp_mb(); 9183 if (bp->port.pmf) { 9184 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9185 9186 /* Re-queue task in 1 sec */ 9187 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9188 } 9189 9190 bnx2x_release_phy_lock(bp); 9191 period_task_exit: 9192 return; 9193 } 9194 9195 /* 9196 * Init service functions 9197 */ 9198 9199 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9200 { 9201 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9202 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9203 return base + (BP_ABS_FUNC(bp)) * stride; 9204 } 9205 9206 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) 9207 { 9208 u32 reg = bnx2x_get_pretend_reg(bp); 9209 9210 /* Flush all outstanding writes */ 9211 mmiowb(); 9212 9213 /* Pretend to be function 0 */ 9214 REG_WR(bp, reg, 0); 9215 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ 9216 9217 /* From now we are in the "like-E1" mode */ 9218 bnx2x_int_disable(bp); 9219 9220 /* Flush all outstanding writes */ 9221 mmiowb(); 9222 9223 /* Restore the original function */ 9224 REG_WR(bp, reg, BP_ABS_FUNC(bp)); 9225 REG_RD(bp, reg); 9226 } 9227 9228 static inline void bnx2x_undi_int_disable(struct bnx2x *bp) 9229 { 9230 if (CHIP_IS_E1(bp)) 9231 bnx2x_int_disable(bp); 9232 else 9233 bnx2x_undi_int_disable_e1h(bp); 9234 } 9235 9236 static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp) 9237 { 9238 u32 val, base_addr, offset, mask, reset_reg; 9239 bool mac_stopped = false; 9240 u8 port = BP_PORT(bp); 9241 9242 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9243 9244 if (!CHIP_IS_E3(bp)) { 9245 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9246 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9247 if ((mask & reset_reg) && val) { 9248 u32 wb_data[2]; 9249 BNX2X_DEV_INFO("Disable bmac Rx\n"); 9250 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 9251 : NIG_REG_INGRESS_BMAC0_MEM; 9252 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 9253 : BIGMAC_REGISTER_BMAC_CONTROL; 9254 9255 /* 9256 * use rd/wr since we cannot use dmae. This is safe 9257 * since MCP won't access the bus due to the request 9258 * to unload, and no function on the path can be 9259 * loaded at this time. 9260 */ 9261 wb_data[0] = REG_RD(bp, base_addr + offset); 9262 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 9263 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9264 REG_WR(bp, base_addr + offset, wb_data[0]); 9265 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]); 9266 9267 } 9268 BNX2X_DEV_INFO("Disable emac Rx\n"); 9269 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0); 9270 9271 mac_stopped = true; 9272 } else { 9273 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9274 BNX2X_DEV_INFO("Disable xmac Rx\n"); 9275 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9276 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 9277 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9278 val & ~(1 << 1)); 9279 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9280 val | (1 << 1)); 9281 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0); 9282 mac_stopped = true; 9283 } 9284 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9285 if (mask & reset_reg) { 9286 BNX2X_DEV_INFO("Disable umac Rx\n"); 9287 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9288 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0); 9289 mac_stopped = true; 9290 } 9291 } 9292 9293 if (mac_stopped) 9294 msleep(20); 9295 9296 } 9297 9298 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9299 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9300 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9301 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9302 9303 static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, 9304 u8 inc) 9305 { 9306 u16 rcq, bd; 9307 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9308 9309 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9310 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9311 9312 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9313 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9314 9315 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 9316 port, bd, rcq); 9317 } 9318 9319 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9320 { 9321 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 9322 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9323 if (!rc) { 9324 BNX2X_ERR("MCP response failure, aborting\n"); 9325 return -EBUSY; 9326 } 9327 9328 return 0; 9329 } 9330 9331 static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp) 9332 { 9333 struct bnx2x_prev_path_list *tmp_list; 9334 int rc = false; 9335 9336 if (down_trylock(&bnx2x_prev_sem)) 9337 return false; 9338 9339 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { 9340 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9341 bp->pdev->bus->number == tmp_list->bus && 9342 BP_PATH(bp) == tmp_list->path) { 9343 rc = true; 9344 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9345 BP_PATH(bp)); 9346 break; 9347 } 9348 } 9349 9350 up(&bnx2x_prev_sem); 9351 9352 return rc; 9353 } 9354 9355 static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) 9356 { 9357 struct bnx2x_prev_path_list *tmp_list; 9358 int rc; 9359 9360 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9361 if (!tmp_list) { 9362 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9363 return -ENOMEM; 9364 } 9365 9366 tmp_list->bus = bp->pdev->bus->number; 9367 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9368 tmp_list->path = BP_PATH(bp); 9369 9370 rc = down_interruptible(&bnx2x_prev_sem); 9371 if (rc) { 9372 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9373 kfree(tmp_list); 9374 } else { 9375 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", 9376 BP_PATH(bp)); 9377 list_add(&tmp_list->list, &bnx2x_prev_list); 9378 up(&bnx2x_prev_sem); 9379 } 9380 9381 return rc; 9382 } 9383 9384 static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9385 { 9386 int i; 9387 u16 status; 9388 struct pci_dev *dev = bp->pdev; 9389 9390 9391 if (CHIP_IS_E1x(bp)) { 9392 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 9393 return -EINVAL; 9394 } 9395 9396 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 9397 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 9398 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 9399 bp->common.bc_ver); 9400 return -EINVAL; 9401 } 9402 9403 /* Wait for Transaction Pending bit clean */ 9404 for (i = 0; i < 4; i++) { 9405 if (i) 9406 msleep((1 << (i - 1)) * 100); 9407 9408 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 9409 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9410 goto clear; 9411 } 9412 9413 dev_err(&dev->dev, 9414 "transaction is not cleared; proceeding with reset anyway\n"); 9415 9416 clear: 9417 9418 BNX2X_DEV_INFO("Initiating FLR\n"); 9419 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9420 9421 return 0; 9422 } 9423 9424 static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) 9425 { 9426 int rc; 9427 9428 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 9429 9430 /* Test if previous unload process was already finished for this path */ 9431 if (bnx2x_prev_is_path_marked(bp)) 9432 return bnx2x_prev_mcp_done(bp); 9433 9434 /* If function has FLR capabilities, and existing FW version matches 9435 * the one required, then FLR will be sufficient to clean any residue 9436 * left by previous driver 9437 */ 9438 rc = bnx2x_test_firmware_version(bp, false); 9439 9440 if (!rc) { 9441 /* fw version is good */ 9442 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 9443 rc = bnx2x_do_flr(bp); 9444 } 9445 9446 if (!rc) { 9447 /* FLR was performed */ 9448 BNX2X_DEV_INFO("FLR successful\n"); 9449 return 0; 9450 } 9451 9452 BNX2X_DEV_INFO("Could not FLR\n"); 9453 9454 /* Close the MCP request, return failure*/ 9455 rc = bnx2x_prev_mcp_done(bp); 9456 if (!rc) 9457 rc = BNX2X_PREV_WAIT_NEEDED; 9458 9459 return rc; 9460 } 9461 9462 static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) 9463 { 9464 u32 reset_reg, tmp_reg = 0, rc; 9465 /* It is possible a previous function received 'common' answer, 9466 * but hasn't loaded yet, therefore creating a scenario of 9467 * multiple functions receiving 'common' on the same path. 9468 */ 9469 BNX2X_DEV_INFO("Common unload Flow\n"); 9470 9471 if (bnx2x_prev_is_path_marked(bp)) 9472 return bnx2x_prev_mcp_done(bp); 9473 9474 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9475 9476 /* Reset should be performed after BRB is emptied */ 9477 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9478 u32 timer_count = 1000; 9479 bool prev_undi = false; 9480 9481 /* Close the MAC Rx to prevent BRB from filling up */ 9482 bnx2x_prev_unload_close_mac(bp); 9483 9484 /* Check if the UNDI driver was previously loaded 9485 * UNDI driver initializes CID offset for normal bell to 0x7 9486 */ 9487 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 9488 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 9489 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 9490 if (tmp_reg == 0x7) { 9491 BNX2X_DEV_INFO("UNDI previously loaded\n"); 9492 prev_undi = true; 9493 /* clear the UNDI indication */ 9494 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 9495 } 9496 } 9497 /* wait until BRB is empty */ 9498 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9499 while (timer_count) { 9500 u32 prev_brb = tmp_reg; 9501 9502 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9503 if (!tmp_reg) 9504 break; 9505 9506 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 9507 9508 /* reset timer as long as BRB actually gets emptied */ 9509 if (prev_brb > tmp_reg) 9510 timer_count = 1000; 9511 else 9512 timer_count--; 9513 9514 /* If UNDI resides in memory, manually increment it */ 9515 if (prev_undi) 9516 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 9517 9518 udelay(10); 9519 } 9520 9521 if (!timer_count) 9522 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 9523 9524 } 9525 9526 /* No packets are in the pipeline, path is ready for reset */ 9527 bnx2x_reset_common(bp); 9528 9529 rc = bnx2x_prev_mark_path(bp); 9530 if (rc) { 9531 bnx2x_prev_mcp_done(bp); 9532 return rc; 9533 } 9534 9535 return bnx2x_prev_mcp_done(bp); 9536 } 9537 9538 /* previous driver DMAE transaction may have occurred when pre-boot stage ended 9539 * and boot began, or when kdump kernel was loaded. Either case would invalidate 9540 * the addresses of the transaction, resulting in was-error bit set in the pci 9541 * causing all hw-to-host pcie transactions to timeout. If this happened we want 9542 * to clear the interrupt which detected this from the pglueb and the was done 9543 * bit 9544 */ 9545 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9546 { 9547 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9548 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9549 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9550 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9551 } 9552 } 9553 9554 static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9555 { 9556 int time_counter = 10; 9557 u32 rc, fw, hw_lock_reg, hw_lock_val; 9558 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9559 9560 /* clear hw from errors which may have resulted from an interrupted 9561 * dmae transaction. 9562 */ 9563 bnx2x_prev_interrupted_dmae(bp); 9564 9565 /* Release previously held locks */ 9566 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9567 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9568 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 9569 9570 hw_lock_val = (REG_RD(bp, hw_lock_reg)); 9571 if (hw_lock_val) { 9572 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 9573 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 9574 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 9575 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 9576 } 9577 9578 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 9579 REG_WR(bp, hw_lock_reg, 0xffffffff); 9580 } else 9581 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 9582 9583 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 9584 BNX2X_DEV_INFO("Release previously held alr\n"); 9585 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 9586 } 9587 9588 9589 do { 9590 /* Lock MCP using an unload request */ 9591 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 9592 if (!fw) { 9593 BNX2X_ERR("MCP response failure, aborting\n"); 9594 rc = -EBUSY; 9595 break; 9596 } 9597 9598 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 9599 rc = bnx2x_prev_unload_common(bp); 9600 break; 9601 } 9602 9603 /* non-common reply from MCP night require looping */ 9604 rc = bnx2x_prev_unload_uncommon(bp); 9605 if (rc != BNX2X_PREV_WAIT_NEEDED) 9606 break; 9607 9608 msleep(20); 9609 } while (--time_counter); 9610 9611 if (!time_counter || rc) { 9612 BNX2X_ERR("Failed unloading previous driver, aborting\n"); 9613 rc = -EBUSY; 9614 } 9615 9616 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 9617 9618 return rc; 9619 } 9620 9621 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 9622 { 9623 u32 val, val2, val3, val4, id, boot_mode; 9624 u16 pmc; 9625 9626 /* Get the chip revision id and number. */ 9627 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 9628 val = REG_RD(bp, MISC_REG_CHIP_NUM); 9629 id = ((val & 0xffff) << 16); 9630 val = REG_RD(bp, MISC_REG_CHIP_REV); 9631 id |= ((val & 0xf) << 12); 9632 val = REG_RD(bp, MISC_REG_CHIP_METAL); 9633 id |= ((val & 0xff) << 4); 9634 val = REG_RD(bp, MISC_REG_BOND_ID); 9635 id |= (val & 0xf); 9636 bp->common.chip_id = id; 9637 9638 /* force 57811 according to MISC register */ 9639 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 9640 if (CHIP_IS_57810(bp)) 9641 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 9642 (bp->common.chip_id & 0x0000FFFF); 9643 else if (CHIP_IS_57810_MF(bp)) 9644 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 9645 (bp->common.chip_id & 0x0000FFFF); 9646 bp->common.chip_id |= 0x1; 9647 } 9648 9649 /* Set doorbell size */ 9650 bp->db_size = (1 << BNX2X_DB_SHIFT); 9651 9652 if (!CHIP_IS_E1x(bp)) { 9653 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 9654 if ((val & 1) == 0) 9655 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 9656 else 9657 val = (val >> 1) & 1; 9658 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 9659 "2_PORT_MODE"); 9660 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 9661 CHIP_2_PORT_MODE; 9662 9663 if (CHIP_MODE_IS_4_PORT(bp)) 9664 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 9665 else 9666 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 9667 } else { 9668 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 9669 bp->pfid = bp->pf_num; /* 0..7 */ 9670 } 9671 9672 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 9673 9674 bp->link_params.chip_id = bp->common.chip_id; 9675 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 9676 9677 val = (REG_RD(bp, 0x2874) & 0x55); 9678 if ((bp->common.chip_id & 0x1) || 9679 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 9680 bp->flags |= ONE_PORT_FLAG; 9681 BNX2X_DEV_INFO("single port device\n"); 9682 } 9683 9684 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 9685 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 9686 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 9687 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 9688 bp->common.flash_size, bp->common.flash_size); 9689 9690 bnx2x_init_shmem(bp); 9691 9692 9693 9694 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 9695 MISC_REG_GENERIC_CR_1 : 9696 MISC_REG_GENERIC_CR_0)); 9697 9698 bp->link_params.shmem_base = bp->common.shmem_base; 9699 bp->link_params.shmem2_base = bp->common.shmem2_base; 9700 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 9701 bp->common.shmem_base, bp->common.shmem2_base); 9702 9703 if (!bp->common.shmem_base) { 9704 BNX2X_DEV_INFO("MCP not active\n"); 9705 bp->flags |= NO_MCP_FLAG; 9706 return; 9707 } 9708 9709 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9710 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9711 9712 bp->link_params.hw_led_mode = ((bp->common.hw_config & 9713 SHARED_HW_CFG_LED_MODE_MASK) >> 9714 SHARED_HW_CFG_LED_MODE_SHIFT); 9715 9716 bp->link_params.feature_config_flags = 0; 9717 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 9718 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 9719 bp->link_params.feature_config_flags |= 9720 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9721 else 9722 bp->link_params.feature_config_flags &= 9723 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 9724 9725 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 9726 bp->common.bc_ver = val; 9727 BNX2X_DEV_INFO("bc_ver %X\n", val); 9728 if (val < BNX2X_BC_VER) { 9729 /* for now only warn 9730 * later we might need to enforce this */ 9731 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 9732 BNX2X_BC_VER, val); 9733 } 9734 bp->link_params.feature_config_flags |= 9735 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 9736 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 9737 9738 bp->link_params.feature_config_flags |= 9739 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9740 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9741 bp->link_params.feature_config_flags |= 9742 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 9743 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 9744 bp->link_params.feature_config_flags |= 9745 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9746 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9747 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9748 BC_SUPPORTS_PFC_STATS : 0; 9749 9750 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 9751 BC_SUPPORTS_FCOE_FEATURES : 0; 9752 9753 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 9754 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 9755 boot_mode = SHMEM_RD(bp, 9756 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9757 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9758 switch (boot_mode) { 9759 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 9760 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 9761 break; 9762 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 9763 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 9764 break; 9765 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 9766 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 9767 break; 9768 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 9769 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 9770 break; 9771 } 9772 9773 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 9774 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 9775 9776 BNX2X_DEV_INFO("%sWoL capable\n", 9777 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 9778 9779 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 9780 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 9781 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9782 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9783 9784 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 9785 val, val2, val3, val4); 9786 } 9787 9788 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 9789 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 9790 9791 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 9792 { 9793 int pfid = BP_FUNC(bp); 9794 int igu_sb_id; 9795 u32 val; 9796 u8 fid, igu_sb_cnt = 0; 9797 9798 bp->igu_base_sb = 0xff; 9799 if (CHIP_INT_MODE_IS_BC(bp)) { 9800 int vn = BP_VN(bp); 9801 igu_sb_cnt = bp->igu_sb_cnt; 9802 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 9803 FP_SB_MAX_E1x; 9804 9805 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 9806 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 9807 9808 return; 9809 } 9810 9811 /* IGU in normal mode - read CAM */ 9812 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 9813 igu_sb_id++) { 9814 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 9815 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 9816 continue; 9817 fid = IGU_FID(val); 9818 if ((fid & IGU_FID_ENCODE_IS_PF)) { 9819 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 9820 continue; 9821 if (IGU_VEC(val) == 0) 9822 /* default status block */ 9823 bp->igu_dsb_id = igu_sb_id; 9824 else { 9825 if (bp->igu_base_sb == 0xff) 9826 bp->igu_base_sb = igu_sb_id; 9827 igu_sb_cnt++; 9828 } 9829 } 9830 } 9831 9832 #ifdef CONFIG_PCI_MSI 9833 /* Due to new PF resource allocation by MFW T7.4 and above, it's 9834 * optional that number of CAM entries will not be equal to the value 9835 * advertised in PCI. 9836 * Driver should use the minimal value of both as the actual status 9837 * block count 9838 */ 9839 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 9840 #endif 9841 9842 if (igu_sb_cnt == 0) 9843 BNX2X_ERR("CAM configuration error\n"); 9844 } 9845 9846 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9847 u32 switch_cfg) 9848 { 9849 int cfg_size = 0, idx, port = BP_PORT(bp); 9850 9851 /* Aggregation of supported attributes of all external phys */ 9852 bp->port.supported[0] = 0; 9853 bp->port.supported[1] = 0; 9854 switch (bp->link_params.num_phys) { 9855 case 1: 9856 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 9857 cfg_size = 1; 9858 break; 9859 case 2: 9860 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 9861 cfg_size = 1; 9862 break; 9863 case 3: 9864 if (bp->link_params.multi_phy_config & 9865 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 9866 bp->port.supported[1] = 9867 bp->link_params.phy[EXT_PHY1].supported; 9868 bp->port.supported[0] = 9869 bp->link_params.phy[EXT_PHY2].supported; 9870 } else { 9871 bp->port.supported[0] = 9872 bp->link_params.phy[EXT_PHY1].supported; 9873 bp->port.supported[1] = 9874 bp->link_params.phy[EXT_PHY2].supported; 9875 } 9876 cfg_size = 2; 9877 break; 9878 } 9879 9880 if (!(bp->port.supported[0] || bp->port.supported[1])) { 9881 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 9882 SHMEM_RD(bp, 9883 dev_info.port_hw_config[port].external_phy_config), 9884 SHMEM_RD(bp, 9885 dev_info.port_hw_config[port].external_phy_config2)); 9886 return; 9887 } 9888 9889 if (CHIP_IS_E3(bp)) 9890 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 9891 else { 9892 switch (switch_cfg) { 9893 case SWITCH_CFG_1G: 9894 bp->port.phy_addr = REG_RD( 9895 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 9896 break; 9897 case SWITCH_CFG_10G: 9898 bp->port.phy_addr = REG_RD( 9899 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 9900 break; 9901 default: 9902 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 9903 bp->port.link_config[0]); 9904 return; 9905 } 9906 } 9907 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 9908 /* mask what we support according to speed_cap_mask per configuration */ 9909 for (idx = 0; idx < cfg_size; idx++) { 9910 if (!(bp->link_params.speed_cap_mask[idx] & 9911 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 9912 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 9913 9914 if (!(bp->link_params.speed_cap_mask[idx] & 9915 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 9916 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 9917 9918 if (!(bp->link_params.speed_cap_mask[idx] & 9919 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 9920 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 9921 9922 if (!(bp->link_params.speed_cap_mask[idx] & 9923 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 9924 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 9925 9926 if (!(bp->link_params.speed_cap_mask[idx] & 9927 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 9928 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 9929 SUPPORTED_1000baseT_Full); 9930 9931 if (!(bp->link_params.speed_cap_mask[idx] & 9932 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 9933 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 9934 9935 if (!(bp->link_params.speed_cap_mask[idx] & 9936 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 9937 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 9938 9939 } 9940 9941 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 9942 bp->port.supported[1]); 9943 } 9944 9945 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 9946 { 9947 u32 link_config, idx, cfg_size = 0; 9948 bp->port.advertising[0] = 0; 9949 bp->port.advertising[1] = 0; 9950 switch (bp->link_params.num_phys) { 9951 case 1: 9952 case 2: 9953 cfg_size = 1; 9954 break; 9955 case 3: 9956 cfg_size = 2; 9957 break; 9958 } 9959 for (idx = 0; idx < cfg_size; idx++) { 9960 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 9961 link_config = bp->port.link_config[idx]; 9962 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 9963 case PORT_FEATURE_LINK_SPEED_AUTO: 9964 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 9965 bp->link_params.req_line_speed[idx] = 9966 SPEED_AUTO_NEG; 9967 bp->port.advertising[idx] |= 9968 bp->port.supported[idx]; 9969 if (bp->link_params.phy[EXT_PHY1].type == 9970 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 9971 bp->port.advertising[idx] |= 9972 (SUPPORTED_100baseT_Half | 9973 SUPPORTED_100baseT_Full); 9974 } else { 9975 /* force 10G, no AN */ 9976 bp->link_params.req_line_speed[idx] = 9977 SPEED_10000; 9978 bp->port.advertising[idx] |= 9979 (ADVERTISED_10000baseT_Full | 9980 ADVERTISED_FIBRE); 9981 continue; 9982 } 9983 break; 9984 9985 case PORT_FEATURE_LINK_SPEED_10M_FULL: 9986 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 9987 bp->link_params.req_line_speed[idx] = 9988 SPEED_10; 9989 bp->port.advertising[idx] |= 9990 (ADVERTISED_10baseT_Full | 9991 ADVERTISED_TP); 9992 } else { 9993 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 9994 link_config, 9995 bp->link_params.speed_cap_mask[idx]); 9996 return; 9997 } 9998 break; 9999 10000 case PORT_FEATURE_LINK_SPEED_10M_HALF: 10001 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 10002 bp->link_params.req_line_speed[idx] = 10003 SPEED_10; 10004 bp->link_params.req_duplex[idx] = 10005 DUPLEX_HALF; 10006 bp->port.advertising[idx] |= 10007 (ADVERTISED_10baseT_Half | 10008 ADVERTISED_TP); 10009 } else { 10010 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10011 link_config, 10012 bp->link_params.speed_cap_mask[idx]); 10013 return; 10014 } 10015 break; 10016 10017 case PORT_FEATURE_LINK_SPEED_100M_FULL: 10018 if (bp->port.supported[idx] & 10019 SUPPORTED_100baseT_Full) { 10020 bp->link_params.req_line_speed[idx] = 10021 SPEED_100; 10022 bp->port.advertising[idx] |= 10023 (ADVERTISED_100baseT_Full | 10024 ADVERTISED_TP); 10025 } else { 10026 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10027 link_config, 10028 bp->link_params.speed_cap_mask[idx]); 10029 return; 10030 } 10031 break; 10032 10033 case PORT_FEATURE_LINK_SPEED_100M_HALF: 10034 if (bp->port.supported[idx] & 10035 SUPPORTED_100baseT_Half) { 10036 bp->link_params.req_line_speed[idx] = 10037 SPEED_100; 10038 bp->link_params.req_duplex[idx] = 10039 DUPLEX_HALF; 10040 bp->port.advertising[idx] |= 10041 (ADVERTISED_100baseT_Half | 10042 ADVERTISED_TP); 10043 } else { 10044 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10045 link_config, 10046 bp->link_params.speed_cap_mask[idx]); 10047 return; 10048 } 10049 break; 10050 10051 case PORT_FEATURE_LINK_SPEED_1G: 10052 if (bp->port.supported[idx] & 10053 SUPPORTED_1000baseT_Full) { 10054 bp->link_params.req_line_speed[idx] = 10055 SPEED_1000; 10056 bp->port.advertising[idx] |= 10057 (ADVERTISED_1000baseT_Full | 10058 ADVERTISED_TP); 10059 } else { 10060 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10061 link_config, 10062 bp->link_params.speed_cap_mask[idx]); 10063 return; 10064 } 10065 break; 10066 10067 case PORT_FEATURE_LINK_SPEED_2_5G: 10068 if (bp->port.supported[idx] & 10069 SUPPORTED_2500baseX_Full) { 10070 bp->link_params.req_line_speed[idx] = 10071 SPEED_2500; 10072 bp->port.advertising[idx] |= 10073 (ADVERTISED_2500baseX_Full | 10074 ADVERTISED_TP); 10075 } else { 10076 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10077 link_config, 10078 bp->link_params.speed_cap_mask[idx]); 10079 return; 10080 } 10081 break; 10082 10083 case PORT_FEATURE_LINK_SPEED_10G_CX4: 10084 if (bp->port.supported[idx] & 10085 SUPPORTED_10000baseT_Full) { 10086 bp->link_params.req_line_speed[idx] = 10087 SPEED_10000; 10088 bp->port.advertising[idx] |= 10089 (ADVERTISED_10000baseT_Full | 10090 ADVERTISED_FIBRE); 10091 } else { 10092 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10093 link_config, 10094 bp->link_params.speed_cap_mask[idx]); 10095 return; 10096 } 10097 break; 10098 case PORT_FEATURE_LINK_SPEED_20G: 10099 bp->link_params.req_line_speed[idx] = SPEED_20000; 10100 10101 break; 10102 default: 10103 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 10104 link_config); 10105 bp->link_params.req_line_speed[idx] = 10106 SPEED_AUTO_NEG; 10107 bp->port.advertising[idx] = 10108 bp->port.supported[idx]; 10109 break; 10110 } 10111 10112 bp->link_params.req_flow_ctrl[idx] = (link_config & 10113 PORT_FEATURE_FLOW_CONTROL_MASK); 10114 if ((bp->link_params.req_flow_ctrl[idx] == 10115 BNX2X_FLOW_CTRL_AUTO) && 10116 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { 10117 bp->link_params.req_flow_ctrl[idx] = 10118 BNX2X_FLOW_CTRL_NONE; 10119 } 10120 10121 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10122 bp->link_params.req_line_speed[idx], 10123 bp->link_params.req_duplex[idx], 10124 bp->link_params.req_flow_ctrl[idx], 10125 bp->port.advertising[idx]); 10126 } 10127 } 10128 10129 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10130 { 10131 mac_hi = cpu_to_be16(mac_hi); 10132 mac_lo = cpu_to_be32(mac_lo); 10133 memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); 10134 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); 10135 } 10136 10137 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 10138 { 10139 int port = BP_PORT(bp); 10140 u32 config; 10141 u32 ext_phy_type, ext_phy_config, eee_mode; 10142 10143 bp->link_params.bp = bp; 10144 bp->link_params.port = port; 10145 10146 bp->link_params.lane_config = 10147 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 10148 10149 bp->link_params.speed_cap_mask[0] = 10150 SHMEM_RD(bp, 10151 dev_info.port_hw_config[port].speed_capability_mask); 10152 bp->link_params.speed_cap_mask[1] = 10153 SHMEM_RD(bp, 10154 dev_info.port_hw_config[port].speed_capability_mask2); 10155 bp->port.link_config[0] = 10156 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10157 10158 bp->port.link_config[1] = 10159 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 10160 10161 bp->link_params.multi_phy_config = 10162 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 10163 /* If the device is capable of WoL, set the default state according 10164 * to the HW 10165 */ 10166 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 10167 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 10168 (config & PORT_FEATURE_WOL_ENABLED)); 10169 10170 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 10171 bp->link_params.lane_config, 10172 bp->link_params.speed_cap_mask[0], 10173 bp->port.link_config[0]); 10174 10175 bp->link_params.switch_cfg = (bp->port.link_config[0] & 10176 PORT_FEATURE_CONNECTED_SWITCH_MASK); 10177 bnx2x_phy_probe(&bp->link_params); 10178 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 10179 10180 bnx2x_link_settings_requested(bp); 10181 10182 /* 10183 * If connected directly, work with the internal PHY, otherwise, work 10184 * with the external PHY 10185 */ 10186 ext_phy_config = 10187 SHMEM_RD(bp, 10188 dev_info.port_hw_config[port].external_phy_config); 10189 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 10190 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 10191 bp->mdio.prtad = bp->port.phy_addr; 10192 10193 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 10194 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 10195 bp->mdio.prtad = 10196 XGXS_EXT_PHY_ADDR(ext_phy_config); 10197 10198 /* 10199 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) 10200 * In MF mode, it is set to cover self test cases 10201 */ 10202 if (IS_MF(bp)) 10203 bp->port.need_hw_lock = 1; 10204 else 10205 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 10206 bp->common.shmem_base, 10207 bp->common.shmem2_base); 10208 10209 /* Configure link feature according to nvram value */ 10210 eee_mode = (((SHMEM_RD(bp, dev_info. 10211 port_feature_config[port].eee_power_mode)) & 10212 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 10213 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 10214 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 10215 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 10216 EEE_MODE_ENABLE_LPI | 10217 EEE_MODE_OUTPUT_TIME; 10218 } else { 10219 bp->link_params.eee_mode = 0; 10220 } 10221 } 10222 10223 void bnx2x_get_iscsi_info(struct bnx2x *bp) 10224 { 10225 u32 no_flags = NO_ISCSI_FLAG; 10226 #ifdef BCM_CNIC 10227 int port = BP_PORT(bp); 10228 10229 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10230 drv_lic_key[port].max_iscsi_conn); 10231 10232 /* Get the number of maximum allowed iSCSI connections */ 10233 bp->cnic_eth_dev.max_iscsi_conn = 10234 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10235 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 10236 10237 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 10238 bp->cnic_eth_dev.max_iscsi_conn); 10239 10240 /* 10241 * If maximum allowed number of connections is zero - 10242 * disable the feature. 10243 */ 10244 if (!bp->cnic_eth_dev.max_iscsi_conn) 10245 bp->flags |= no_flags; 10246 #else 10247 bp->flags |= no_flags; 10248 #endif 10249 } 10250 10251 #ifdef BCM_CNIC 10252 static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10253 { 10254 /* Port info */ 10255 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10256 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 10257 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10258 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 10259 10260 /* Node info */ 10261 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10262 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 10263 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10264 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10265 } 10266 #endif 10267 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) 10268 { 10269 #ifdef BCM_CNIC 10270 int port = BP_PORT(bp); 10271 int func = BP_ABS_FUNC(bp); 10272 10273 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10274 drv_lic_key[port].max_fcoe_conn); 10275 10276 /* Get the number of maximum allowed FCoE connections */ 10277 bp->cnic_eth_dev.max_fcoe_conn = 10278 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10279 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10280 10281 /* Read the WWN: */ 10282 if (!IS_MF(bp)) { 10283 /* Port info */ 10284 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10285 SHMEM_RD(bp, 10286 dev_info.port_hw_config[port]. 10287 fcoe_wwn_port_name_upper); 10288 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 10289 SHMEM_RD(bp, 10290 dev_info.port_hw_config[port]. 10291 fcoe_wwn_port_name_lower); 10292 10293 /* Node info */ 10294 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 10295 SHMEM_RD(bp, 10296 dev_info.port_hw_config[port]. 10297 fcoe_wwn_node_name_upper); 10298 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10299 SHMEM_RD(bp, 10300 dev_info.port_hw_config[port]. 10301 fcoe_wwn_node_name_lower); 10302 } else if (!IS_MF_SD(bp)) { 10303 /* 10304 * Read the WWN info only if the FCoE feature is enabled for 10305 * this function. 10306 */ 10307 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 10308 bnx2x_get_ext_wwn_info(bp, func); 10309 10310 } else if (IS_MF_FCOE_SD(bp)) 10311 bnx2x_get_ext_wwn_info(bp, func); 10312 10313 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 10314 10315 /* 10316 * If maximum allowed number of connections is zero - 10317 * disable the feature. 10318 */ 10319 if (!bp->cnic_eth_dev.max_fcoe_conn) 10320 bp->flags |= NO_FCOE_FLAG; 10321 #else 10322 bp->flags |= NO_FCOE_FLAG; 10323 #endif 10324 } 10325 10326 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10327 { 10328 /* 10329 * iSCSI may be dynamically disabled but reading 10330 * info here we will decrease memory usage by driver 10331 * if the feature is disabled for good 10332 */ 10333 bnx2x_get_iscsi_info(bp); 10334 bnx2x_get_fcoe_info(bp); 10335 } 10336 10337 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10338 { 10339 u32 val, val2; 10340 int func = BP_ABS_FUNC(bp); 10341 int port = BP_PORT(bp); 10342 #ifdef BCM_CNIC 10343 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10344 u8 *fip_mac = bp->fip_mac; 10345 #endif 10346 10347 /* Zero primary MAC configuration */ 10348 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10349 10350 if (BP_NOMCP(bp)) { 10351 BNX2X_ERROR("warning: random MAC workaround active\n"); 10352 eth_hw_addr_random(bp->dev); 10353 } else if (IS_MF(bp)) { 10354 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 10355 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 10356 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 10357 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 10358 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10359 10360 #ifdef BCM_CNIC 10361 /* 10362 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 10363 * FCoE MAC then the appropriate feature should be disabled. 10364 * 10365 * In non SD mode features configuration comes from 10366 * struct func_ext_config. 10367 */ 10368 if (!IS_MF_SD(bp)) { 10369 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10370 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10371 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10372 iscsi_mac_addr_upper); 10373 val = MF_CFG_RD(bp, func_ext_config[func]. 10374 iscsi_mac_addr_lower); 10375 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10376 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10377 iscsi_mac); 10378 } else 10379 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10380 10381 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10382 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10383 fcoe_mac_addr_upper); 10384 val = MF_CFG_RD(bp, func_ext_config[func]. 10385 fcoe_mac_addr_lower); 10386 bnx2x_set_mac_buf(fip_mac, val, val2); 10387 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10388 fip_mac); 10389 10390 } else 10391 bp->flags |= NO_FCOE_FLAG; 10392 10393 bp->mf_ext_config = cfg; 10394 10395 } else { /* SD MODE */ 10396 if (IS_MF_STORAGE_SD(bp)) { 10397 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10398 /* use primary mac as iscsi mac */ 10399 memcpy(iscsi_mac, bp->dev->dev_addr, 10400 ETH_ALEN); 10401 10402 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10403 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10404 iscsi_mac); 10405 } else { /* FCoE */ 10406 memcpy(fip_mac, bp->dev->dev_addr, 10407 ETH_ALEN); 10408 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10409 BNX2X_DEV_INFO("Read FIP MAC: %pM\n", 10410 fip_mac); 10411 } 10412 /* Zero primary MAC configuration */ 10413 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10414 } 10415 } 10416 10417 if (IS_MF_FCOE_AFEX(bp)) 10418 /* use FIP MAC as primary MAC */ 10419 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10420 10421 #endif 10422 } else { 10423 /* in SF read MACs from port configuration */ 10424 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 10425 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 10426 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 10427 10428 #ifdef BCM_CNIC 10429 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10430 iscsi_mac_upper); 10431 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10432 iscsi_mac_lower); 10433 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10434 10435 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10436 fcoe_fip_mac_upper); 10437 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10438 fcoe_fip_mac_lower); 10439 bnx2x_set_mac_buf(fip_mac, val, val2); 10440 #endif 10441 } 10442 10443 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10444 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 10445 10446 #ifdef BCM_CNIC 10447 /* Disable iSCSI if MAC configuration is 10448 * invalid. 10449 */ 10450 if (!is_valid_ether_addr(iscsi_mac)) { 10451 bp->flags |= NO_ISCSI_FLAG; 10452 memset(iscsi_mac, 0, ETH_ALEN); 10453 } 10454 10455 /* Disable FCoE if MAC configuration is 10456 * invalid. 10457 */ 10458 if (!is_valid_ether_addr(fip_mac)) { 10459 bp->flags |= NO_FCOE_FLAG; 10460 memset(bp->fip_mac, 0, ETH_ALEN); 10461 } 10462 #endif 10463 10464 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10465 dev_err(&bp->pdev->dev, 10466 "bad Ethernet MAC address configuration: %pM\n" 10467 "change it manually before bringing up the appropriate network interface\n", 10468 bp->dev->dev_addr); 10469 10470 10471 } 10472 10473 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 10474 { 10475 int /*abs*/func = BP_ABS_FUNC(bp); 10476 int vn; 10477 u32 val = 0; 10478 int rc = 0; 10479 10480 bnx2x_get_common_hwinfo(bp); 10481 10482 /* 10483 * initialize IGU parameters 10484 */ 10485 if (CHIP_IS_E1x(bp)) { 10486 bp->common.int_block = INT_BLOCK_HC; 10487 10488 bp->igu_dsb_id = DEF_SB_IGU_ID; 10489 bp->igu_base_sb = 0; 10490 } else { 10491 bp->common.int_block = INT_BLOCK_IGU; 10492 10493 /* do not allow device reset during IGU info preocessing */ 10494 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10495 10496 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 10497 10498 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10499 int tout = 5000; 10500 10501 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 10502 10503 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 10504 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 10505 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 10506 10507 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10508 tout--; 10509 usleep_range(1000, 1000); 10510 } 10511 10512 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10513 dev_err(&bp->pdev->dev, 10514 "FORCING Normal Mode failed!!!\n"); 10515 return -EPERM; 10516 } 10517 } 10518 10519 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 10520 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 10521 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 10522 } else 10523 BNX2X_DEV_INFO("IGU Normal Mode\n"); 10524 10525 bnx2x_get_igu_cam_info(bp); 10526 10527 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10528 } 10529 10530 /* 10531 * set base FW non-default (fast path) status block id, this value is 10532 * used to initialize the fw_sb_id saved on the fp/queue structure to 10533 * determine the id used by the FW. 10534 */ 10535 if (CHIP_IS_E1x(bp)) 10536 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 10537 else /* 10538 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 10539 * the same queue are indicated on the same IGU SB). So we prefer 10540 * FW and IGU SBs to be the same value. 10541 */ 10542 bp->base_fw_ndsb = bp->igu_base_sb; 10543 10544 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 10545 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 10546 bp->igu_sb_cnt, bp->base_fw_ndsb); 10547 10548 /* 10549 * Initialize MF configuration 10550 */ 10551 10552 bp->mf_ov = 0; 10553 bp->mf_mode = 0; 10554 vn = BP_VN(bp); 10555 10556 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 10557 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 10558 bp->common.shmem2_base, SHMEM2_RD(bp, size), 10559 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 10560 10561 if (SHMEM2_HAS(bp, mf_cfg_addr)) 10562 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 10563 else 10564 bp->common.mf_cfg_base = bp->common.shmem_base + 10565 offsetof(struct shmem_region, func_mb) + 10566 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 10567 /* 10568 * get mf configuration: 10569 * 1. existence of MF configuration 10570 * 2. MAC address must be legal (check only upper bytes) 10571 * for Switch-Independent mode; 10572 * OVLAN must be legal for Switch-Dependent mode 10573 * 3. SF_MODE configures specific MF mode 10574 */ 10575 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10576 /* get mf configuration */ 10577 val = SHMEM_RD(bp, 10578 dev_info.shared_feature_config.config); 10579 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 10580 10581 switch (val) { 10582 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 10583 val = MF_CFG_RD(bp, func_mf_config[func]. 10584 mac_upper); 10585 /* check for legal mac (upper bytes)*/ 10586 if (val != 0xffff) { 10587 bp->mf_mode = MULTI_FUNCTION_SI; 10588 bp->mf_config[vn] = MF_CFG_RD(bp, 10589 func_mf_config[func].config); 10590 } else 10591 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10592 break; 10593 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 10594 if ((!CHIP_IS_E1x(bp)) && 10595 (MF_CFG_RD(bp, func_mf_config[func]. 10596 mac_upper) != 0xffff) && 10597 (SHMEM2_HAS(bp, 10598 afex_driver_support))) { 10599 bp->mf_mode = MULTI_FUNCTION_AFEX; 10600 bp->mf_config[vn] = MF_CFG_RD(bp, 10601 func_mf_config[func].config); 10602 } else { 10603 BNX2X_DEV_INFO("can not configure afex mode\n"); 10604 } 10605 break; 10606 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10607 /* get OV configuration */ 10608 val = MF_CFG_RD(bp, 10609 func_mf_config[FUNC_0].e1hov_tag); 10610 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 10611 10612 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10613 bp->mf_mode = MULTI_FUNCTION_SD; 10614 bp->mf_config[vn] = MF_CFG_RD(bp, 10615 func_mf_config[func].config); 10616 } else 10617 BNX2X_DEV_INFO("illegal OV for SD\n"); 10618 break; 10619 default: 10620 /* Unknown configuration: reset mf_config */ 10621 bp->mf_config[vn] = 0; 10622 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 10623 } 10624 } 10625 10626 BNX2X_DEV_INFO("%s function mode\n", 10627 IS_MF(bp) ? "multi" : "single"); 10628 10629 switch (bp->mf_mode) { 10630 case MULTI_FUNCTION_SD: 10631 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 10632 FUNC_MF_CFG_E1HOV_TAG_MASK; 10633 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 10634 bp->mf_ov = val; 10635 bp->path_has_ovlan = true; 10636 10637 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 10638 func, bp->mf_ov, bp->mf_ov); 10639 } else { 10640 dev_err(&bp->pdev->dev, 10641 "No valid MF OV for func %d, aborting\n", 10642 func); 10643 return -EPERM; 10644 } 10645 break; 10646 case MULTI_FUNCTION_AFEX: 10647 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 10648 break; 10649 case MULTI_FUNCTION_SI: 10650 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10651 func); 10652 break; 10653 default: 10654 if (vn) { 10655 dev_err(&bp->pdev->dev, 10656 "VN %d is in a single function mode, aborting\n", 10657 vn); 10658 return -EPERM; 10659 } 10660 break; 10661 } 10662 10663 /* check if other port on the path needs ovlan: 10664 * Since MF configuration is shared between ports 10665 * Possible mixed modes are only 10666 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 10667 */ 10668 if (CHIP_MODE_IS_4_PORT(bp) && 10669 !bp->path_has_ovlan && 10670 !IS_MF(bp) && 10671 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 10672 u8 other_port = !BP_PORT(bp); 10673 u8 other_func = BP_PATH(bp) + 2*other_port; 10674 val = MF_CFG_RD(bp, 10675 func_mf_config[other_func].e1hov_tag); 10676 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 10677 bp->path_has_ovlan = true; 10678 } 10679 } 10680 10681 /* adjust igu_sb_cnt to MF for E1x */ 10682 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 10683 bp->igu_sb_cnt /= E1HVN_MAX; 10684 10685 /* port info */ 10686 bnx2x_get_port_hwinfo(bp); 10687 10688 /* Get MAC addresses */ 10689 bnx2x_get_mac_hwinfo(bp); 10690 10691 bnx2x_get_cnic_info(bp); 10692 10693 return rc; 10694 } 10695 10696 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) 10697 { 10698 int cnt, i, block_end, rodi; 10699 char vpd_start[BNX2X_VPD_LEN+1]; 10700 char str_id_reg[VENDOR_ID_LEN+1]; 10701 char str_id_cap[VENDOR_ID_LEN+1]; 10702 char *vpd_data; 10703 char *vpd_extended_data = NULL; 10704 u8 len; 10705 10706 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 10707 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 10708 10709 if (cnt < BNX2X_VPD_LEN) 10710 goto out_not_found; 10711 10712 /* VPD RO tag should be first tag after identifier string, hence 10713 * we should be able to find it in first BNX2X_VPD_LEN chars 10714 */ 10715 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 10716 PCI_VPD_LRDT_RO_DATA); 10717 if (i < 0) 10718 goto out_not_found; 10719 10720 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 10721 pci_vpd_lrdt_size(&vpd_start[i]); 10722 10723 i += PCI_VPD_LRDT_TAG_SIZE; 10724 10725 if (block_end > BNX2X_VPD_LEN) { 10726 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 10727 if (vpd_extended_data == NULL) 10728 goto out_not_found; 10729 10730 /* read rest of vpd image into vpd_extended_data */ 10731 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 10732 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 10733 block_end - BNX2X_VPD_LEN, 10734 vpd_extended_data + BNX2X_VPD_LEN); 10735 if (cnt < (block_end - BNX2X_VPD_LEN)) 10736 goto out_not_found; 10737 vpd_data = vpd_extended_data; 10738 } else 10739 vpd_data = vpd_start; 10740 10741 /* now vpd_data holds full vpd content in both cases */ 10742 10743 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10744 PCI_VPD_RO_KEYWORD_MFR_ID); 10745 if (rodi < 0) 10746 goto out_not_found; 10747 10748 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10749 10750 if (len != VENDOR_ID_LEN) 10751 goto out_not_found; 10752 10753 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10754 10755 /* vendor specific info */ 10756 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 10757 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 10758 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 10759 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 10760 10761 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 10762 PCI_VPD_RO_KEYWORD_VENDOR0); 10763 if (rodi >= 0) { 10764 len = pci_vpd_info_field_size(&vpd_data[rodi]); 10765 10766 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 10767 10768 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 10769 memcpy(bp->fw_ver, &vpd_data[rodi], len); 10770 bp->fw_ver[len] = ' '; 10771 } 10772 } 10773 kfree(vpd_extended_data); 10774 return; 10775 } 10776 out_not_found: 10777 kfree(vpd_extended_data); 10778 return; 10779 } 10780 10781 static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) 10782 { 10783 u32 flags = 0; 10784 10785 if (CHIP_REV_IS_FPGA(bp)) 10786 SET_FLAGS(flags, MODE_FPGA); 10787 else if (CHIP_REV_IS_EMUL(bp)) 10788 SET_FLAGS(flags, MODE_EMUL); 10789 else 10790 SET_FLAGS(flags, MODE_ASIC); 10791 10792 if (CHIP_MODE_IS_4_PORT(bp)) 10793 SET_FLAGS(flags, MODE_PORT4); 10794 else 10795 SET_FLAGS(flags, MODE_PORT2); 10796 10797 if (CHIP_IS_E2(bp)) 10798 SET_FLAGS(flags, MODE_E2); 10799 else if (CHIP_IS_E3(bp)) { 10800 SET_FLAGS(flags, MODE_E3); 10801 if (CHIP_REV(bp) == CHIP_REV_Ax) 10802 SET_FLAGS(flags, MODE_E3_A0); 10803 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 10804 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 10805 } 10806 10807 if (IS_MF(bp)) { 10808 SET_FLAGS(flags, MODE_MF); 10809 switch (bp->mf_mode) { 10810 case MULTI_FUNCTION_SD: 10811 SET_FLAGS(flags, MODE_MF_SD); 10812 break; 10813 case MULTI_FUNCTION_SI: 10814 SET_FLAGS(flags, MODE_MF_SI); 10815 break; 10816 case MULTI_FUNCTION_AFEX: 10817 SET_FLAGS(flags, MODE_MF_AFEX); 10818 break; 10819 } 10820 } else 10821 SET_FLAGS(flags, MODE_SF); 10822 10823 #if defined(__LITTLE_ENDIAN) 10824 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 10825 #else /*(__BIG_ENDIAN)*/ 10826 SET_FLAGS(flags, MODE_BIG_ENDIAN); 10827 #endif 10828 INIT_MODE_FLAGS(bp) = flags; 10829 } 10830 10831 static int __devinit bnx2x_init_bp(struct bnx2x *bp) 10832 { 10833 int func; 10834 int rc; 10835 10836 mutex_init(&bp->port.phy_mutex); 10837 mutex_init(&bp->fw_mb_mutex); 10838 spin_lock_init(&bp->stats_lock); 10839 #ifdef BCM_CNIC 10840 mutex_init(&bp->cnic_mutex); 10841 #endif 10842 10843 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 10844 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 10845 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 10846 rc = bnx2x_get_hwinfo(bp); 10847 if (rc) 10848 return rc; 10849 10850 bnx2x_set_modes_bitmap(bp); 10851 10852 rc = bnx2x_alloc_mem_bp(bp); 10853 if (rc) 10854 return rc; 10855 10856 bnx2x_read_fwinfo(bp); 10857 10858 func = BP_FUNC(bp); 10859 10860 /* need to reset chip if undi was active */ 10861 if (!BP_NOMCP(bp)) { 10862 /* init fw_seq */ 10863 bp->fw_seq = 10864 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 10865 DRV_MSG_SEQ_NUMBER_MASK; 10866 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 10867 10868 bnx2x_prev_unload(bp); 10869 } 10870 10871 10872 if (CHIP_REV_IS_FPGA(bp)) 10873 dev_err(&bp->pdev->dev, "FPGA detected\n"); 10874 10875 if (BP_NOMCP(bp) && (func == 0)) 10876 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10877 10878 bp->disable_tpa = disable_tpa; 10879 10880 #ifdef BCM_CNIC 10881 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 10882 #endif 10883 10884 /* Set TPA flags */ 10885 if (bp->disable_tpa) { 10886 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10887 bp->dev->features &= ~NETIF_F_LRO; 10888 } else { 10889 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 10890 bp->dev->features |= NETIF_F_LRO; 10891 } 10892 10893 if (CHIP_IS_E1(bp)) 10894 bp->dropless_fc = 0; 10895 else 10896 bp->dropless_fc = dropless_fc; 10897 10898 bp->mrrs = mrrs; 10899 10900 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 10901 10902 /* make sure that the numbers are in the right granularity */ 10903 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10904 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 10905 10906 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 10907 10908 init_timer(&bp->timer); 10909 bp->timer.expires = jiffies + bp->current_interval; 10910 bp->timer.data = (unsigned long) bp; 10911 bp->timer.function = bnx2x_timer; 10912 10913 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 10914 bnx2x_dcbx_init_params(bp); 10915 10916 #ifdef BCM_CNIC 10917 if (CHIP_IS_E1x(bp)) 10918 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 10919 else 10920 bp->cnic_base_cl_id = FP_SB_MAX_E2; 10921 #endif 10922 10923 /* multiple tx priority */ 10924 if (CHIP_IS_E1x(bp)) 10925 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 10926 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 10927 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 10928 if (CHIP_IS_E3B0(bp)) 10929 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10930 10931 return rc; 10932 } 10933 10934 10935 /**************************************************************************** 10936 * General service functions 10937 ****************************************************************************/ 10938 10939 /* 10940 * net_device service functions 10941 */ 10942 10943 /* called with rtnl_lock */ 10944 static int bnx2x_open(struct net_device *dev) 10945 { 10946 struct bnx2x *bp = netdev_priv(dev); 10947 bool global = false; 10948 int other_engine = BP_PATH(bp) ? 0 : 1; 10949 bool other_load_status, load_status; 10950 10951 bp->stats_init = true; 10952 10953 netif_carrier_off(dev); 10954 10955 bnx2x_set_power_state(bp, PCI_D0); 10956 10957 other_load_status = bnx2x_get_load_status(bp, other_engine); 10958 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 10959 10960 /* 10961 * If parity had happen during the unload, then attentions 10962 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 10963 * want the first function loaded on the current engine to 10964 * complete the recovery. 10965 */ 10966 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 10967 bnx2x_chk_parity_attn(bp, &global, true)) 10968 do { 10969 /* 10970 * If there are attentions and they are in a global 10971 * blocks, set the GLOBAL_RESET bit regardless whether 10972 * it will be this function that will complete the 10973 * recovery or not. 10974 */ 10975 if (global) 10976 bnx2x_set_reset_global(bp); 10977 10978 /* 10979 * Only the first function on the current engine should 10980 * try to recover in open. In case of attentions in 10981 * global blocks only the first in the chip should try 10982 * to recover. 10983 */ 10984 if ((!load_status && 10985 (!global || !other_load_status)) && 10986 bnx2x_trylock_leader_lock(bp) && 10987 !bnx2x_leader_reset(bp)) { 10988 netdev_info(bp->dev, "Recovered in open\n"); 10989 break; 10990 } 10991 10992 /* recovery has failed... */ 10993 bnx2x_set_power_state(bp, PCI_D3hot); 10994 bp->recovery_state = BNX2X_RECOVERY_FAILED; 10995 10996 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 10997 "If you still see this message after a few retries then power cycle is required.\n"); 10998 10999 return -EAGAIN; 11000 } while (0); 11001 11002 bp->recovery_state = BNX2X_RECOVERY_DONE; 11003 return bnx2x_nic_load(bp, LOAD_OPEN); 11004 } 11005 11006 /* called with rtnl_lock */ 11007 static int bnx2x_close(struct net_device *dev) 11008 { 11009 struct bnx2x *bp = netdev_priv(dev); 11010 11011 /* Unload the driver, release IRQs */ 11012 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 11013 11014 /* Power off */ 11015 bnx2x_set_power_state(bp, PCI_D3hot); 11016 11017 return 0; 11018 } 11019 11020 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 11021 struct bnx2x_mcast_ramrod_params *p) 11022 { 11023 int mc_count = netdev_mc_count(bp->dev); 11024 struct bnx2x_mcast_list_elem *mc_mac = 11025 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 11026 struct netdev_hw_addr *ha; 11027 11028 if (!mc_mac) 11029 return -ENOMEM; 11030 11031 INIT_LIST_HEAD(&p->mcast_list); 11032 11033 netdev_for_each_mc_addr(ha, bp->dev) { 11034 mc_mac->mac = bnx2x_mc_addr(ha); 11035 list_add_tail(&mc_mac->link, &p->mcast_list); 11036 mc_mac++; 11037 } 11038 11039 p->mcast_list_len = mc_count; 11040 11041 return 0; 11042 } 11043 11044 static void bnx2x_free_mcast_macs_list( 11045 struct bnx2x_mcast_ramrod_params *p) 11046 { 11047 struct bnx2x_mcast_list_elem *mc_mac = 11048 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 11049 link); 11050 11051 WARN_ON(!mc_mac); 11052 kfree(mc_mac); 11053 } 11054 11055 /** 11056 * bnx2x_set_uc_list - configure a new unicast MACs list. 11057 * 11058 * @bp: driver handle 11059 * 11060 * We will use zero (0) as a MAC type for these MACs. 11061 */ 11062 static int bnx2x_set_uc_list(struct bnx2x *bp) 11063 { 11064 int rc; 11065 struct net_device *dev = bp->dev; 11066 struct netdev_hw_addr *ha; 11067 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 11068 unsigned long ramrod_flags = 0; 11069 11070 /* First schedule a cleanup up of old configuration */ 11071 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 11072 if (rc < 0) { 11073 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 11074 return rc; 11075 } 11076 11077 netdev_for_each_uc_addr(ha, dev) { 11078 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 11079 BNX2X_UC_LIST_MAC, &ramrod_flags); 11080 if (rc == -EEXIST) { 11081 DP(BNX2X_MSG_SP, 11082 "Failed to schedule ADD operations: %d\n", rc); 11083 /* do not treat adding same MAC as error */ 11084 rc = 0; 11085 11086 } else if (rc < 0) { 11087 11088 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 11089 rc); 11090 return rc; 11091 } 11092 } 11093 11094 /* Execute the pending commands */ 11095 __set_bit(RAMROD_CONT, &ramrod_flags); 11096 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 11097 BNX2X_UC_LIST_MAC, &ramrod_flags); 11098 } 11099 11100 static int bnx2x_set_mc_list(struct bnx2x *bp) 11101 { 11102 struct net_device *dev = bp->dev; 11103 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11104 int rc = 0; 11105 11106 rparam.mcast_obj = &bp->mcast_obj; 11107 11108 /* first, clear all configured multicast MACs */ 11109 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 11110 if (rc < 0) { 11111 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 11112 return rc; 11113 } 11114 11115 /* then, configure a new MACs list */ 11116 if (netdev_mc_count(dev)) { 11117 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 11118 if (rc) { 11119 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 11120 rc); 11121 return rc; 11122 } 11123 11124 /* Now add the new MACs */ 11125 rc = bnx2x_config_mcast(bp, &rparam, 11126 BNX2X_MCAST_CMD_ADD); 11127 if (rc < 0) 11128 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 11129 rc); 11130 11131 bnx2x_free_mcast_macs_list(&rparam); 11132 } 11133 11134 return rc; 11135 } 11136 11137 11138 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 11139 void bnx2x_set_rx_mode(struct net_device *dev) 11140 { 11141 struct bnx2x *bp = netdev_priv(dev); 11142 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 11143 11144 if (bp->state != BNX2X_STATE_OPEN) { 11145 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11146 return; 11147 } 11148 11149 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11150 11151 if (dev->flags & IFF_PROMISC) 11152 rx_mode = BNX2X_RX_MODE_PROMISC; 11153 else if ((dev->flags & IFF_ALLMULTI) || 11154 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11155 CHIP_IS_E1(bp))) 11156 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11157 else { 11158 /* some multicasts */ 11159 if (bnx2x_set_mc_list(bp) < 0) 11160 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11161 11162 if (bnx2x_set_uc_list(bp) < 0) 11163 rx_mode = BNX2X_RX_MODE_PROMISC; 11164 } 11165 11166 bp->rx_mode = rx_mode; 11167 #ifdef BCM_CNIC 11168 /* handle ISCSI SD mode */ 11169 if (IS_MF_ISCSI_SD(bp)) 11170 bp->rx_mode = BNX2X_RX_MODE_NONE; 11171 #endif 11172 11173 /* Schedule the rx_mode command */ 11174 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11175 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11176 return; 11177 } 11178 11179 bnx2x_set_storm_rx_mode(bp); 11180 } 11181 11182 /* called with rtnl_lock */ 11183 static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 11184 int devad, u16 addr) 11185 { 11186 struct bnx2x *bp = netdev_priv(netdev); 11187 u16 value; 11188 int rc; 11189 11190 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 11191 prtad, devad, addr); 11192 11193 /* The HW expects different devad if CL22 is used */ 11194 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11195 11196 bnx2x_acquire_phy_lock(bp); 11197 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 11198 bnx2x_release_phy_lock(bp); 11199 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 11200 11201 if (!rc) 11202 rc = value; 11203 return rc; 11204 } 11205 11206 /* called with rtnl_lock */ 11207 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 11208 u16 addr, u16 value) 11209 { 11210 struct bnx2x *bp = netdev_priv(netdev); 11211 int rc; 11212 11213 DP(NETIF_MSG_LINK, 11214 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 11215 prtad, devad, addr, value); 11216 11217 /* The HW expects different devad if CL22 is used */ 11218 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 11219 11220 bnx2x_acquire_phy_lock(bp); 11221 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 11222 bnx2x_release_phy_lock(bp); 11223 return rc; 11224 } 11225 11226 /* called with rtnl_lock */ 11227 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 11228 { 11229 struct bnx2x *bp = netdev_priv(dev); 11230 struct mii_ioctl_data *mdio = if_mii(ifr); 11231 11232 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 11233 mdio->phy_id, mdio->reg_num, mdio->val_in); 11234 11235 if (!netif_running(dev)) 11236 return -EAGAIN; 11237 11238 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 11239 } 11240 11241 #ifdef CONFIG_NET_POLL_CONTROLLER 11242 static void poll_bnx2x(struct net_device *dev) 11243 { 11244 struct bnx2x *bp = netdev_priv(dev); 11245 int i; 11246 11247 for_each_eth_queue(bp, i) { 11248 struct bnx2x_fastpath *fp = &bp->fp[i]; 11249 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 11250 } 11251 } 11252 #endif 11253 11254 static int bnx2x_validate_addr(struct net_device *dev) 11255 { 11256 struct bnx2x *bp = netdev_priv(dev); 11257 11258 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 11259 BNX2X_ERR("Non-valid Ethernet address\n"); 11260 return -EADDRNOTAVAIL; 11261 } 11262 return 0; 11263 } 11264 11265 static const struct net_device_ops bnx2x_netdev_ops = { 11266 .ndo_open = bnx2x_open, 11267 .ndo_stop = bnx2x_close, 11268 .ndo_start_xmit = bnx2x_start_xmit, 11269 .ndo_select_queue = bnx2x_select_queue, 11270 .ndo_set_rx_mode = bnx2x_set_rx_mode, 11271 .ndo_set_mac_address = bnx2x_change_mac_addr, 11272 .ndo_validate_addr = bnx2x_validate_addr, 11273 .ndo_do_ioctl = bnx2x_ioctl, 11274 .ndo_change_mtu = bnx2x_change_mtu, 11275 .ndo_fix_features = bnx2x_fix_features, 11276 .ndo_set_features = bnx2x_set_features, 11277 .ndo_tx_timeout = bnx2x_tx_timeout, 11278 #ifdef CONFIG_NET_POLL_CONTROLLER 11279 .ndo_poll_controller = poll_bnx2x, 11280 #endif 11281 .ndo_setup_tc = bnx2x_setup_tc, 11282 11283 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11284 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11285 #endif 11286 }; 11287 11288 static int bnx2x_set_coherency_mask(struct bnx2x *bp) 11289 { 11290 struct device *dev = &bp->pdev->dev; 11291 11292 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 11293 bp->flags |= USING_DAC_FLAG; 11294 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 11295 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 11296 return -EIO; 11297 } 11298 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 11299 dev_err(dev, "System does not support DMA, aborting\n"); 11300 return -EIO; 11301 } 11302 11303 return 0; 11304 } 11305 11306 static int __devinit bnx2x_init_dev(struct pci_dev *pdev, 11307 struct net_device *dev, 11308 unsigned long board_type) 11309 { 11310 struct bnx2x *bp; 11311 int rc; 11312 u32 pci_cfg_dword; 11313 bool chip_is_e1x = (board_type == BCM57710 || 11314 board_type == BCM57711 || 11315 board_type == BCM57711E); 11316 11317 SET_NETDEV_DEV(dev, &pdev->dev); 11318 bp = netdev_priv(dev); 11319 11320 bp->dev = dev; 11321 bp->pdev = pdev; 11322 bp->flags = 0; 11323 11324 rc = pci_enable_device(pdev); 11325 if (rc) { 11326 dev_err(&bp->pdev->dev, 11327 "Cannot enable PCI device, aborting\n"); 11328 goto err_out; 11329 } 11330 11331 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11332 dev_err(&bp->pdev->dev, 11333 "Cannot find PCI device base address, aborting\n"); 11334 rc = -ENODEV; 11335 goto err_out_disable; 11336 } 11337 11338 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 11339 dev_err(&bp->pdev->dev, "Cannot find second PCI device" 11340 " base address, aborting\n"); 11341 rc = -ENODEV; 11342 goto err_out_disable; 11343 } 11344 11345 if (atomic_read(&pdev->enable_cnt) == 1) { 11346 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11347 if (rc) { 11348 dev_err(&bp->pdev->dev, 11349 "Cannot obtain PCI resources, aborting\n"); 11350 goto err_out_disable; 11351 } 11352 11353 pci_set_master(pdev); 11354 pci_save_state(pdev); 11355 } 11356 11357 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11358 if (bp->pm_cap == 0) { 11359 dev_err(&bp->pdev->dev, 11360 "Cannot find power management capability, aborting\n"); 11361 rc = -EIO; 11362 goto err_out_release; 11363 } 11364 11365 if (!pci_is_pcie(pdev)) { 11366 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 11367 rc = -EIO; 11368 goto err_out_release; 11369 } 11370 11371 rc = bnx2x_set_coherency_mask(bp); 11372 if (rc) 11373 goto err_out_release; 11374 11375 dev->mem_start = pci_resource_start(pdev, 0); 11376 dev->base_addr = dev->mem_start; 11377 dev->mem_end = pci_resource_end(pdev, 0); 11378 11379 dev->irq = pdev->irq; 11380 11381 bp->regview = pci_ioremap_bar(pdev, 0); 11382 if (!bp->regview) { 11383 dev_err(&bp->pdev->dev, 11384 "Cannot map register space, aborting\n"); 11385 rc = -ENOMEM; 11386 goto err_out_release; 11387 } 11388 11389 /* In E1/E1H use pci device function given by kernel. 11390 * In E2/E3 read physical function from ME register since these chips 11391 * support Physical Device Assignment where kernel BDF maybe arbitrary 11392 * (depending on hypervisor). 11393 */ 11394 if (chip_is_e1x) 11395 bp->pf_num = PCI_FUNC(pdev->devfn); 11396 else {/* chip is E2/3*/ 11397 pci_read_config_dword(bp->pdev, 11398 PCICFG_ME_REGISTER, &pci_cfg_dword); 11399 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 11400 ME_REG_ABS_PF_NUM_SHIFT); 11401 } 11402 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 11403 11404 bnx2x_set_power_state(bp, PCI_D0); 11405 11406 /* clean indirect addresses */ 11407 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 11408 PCICFG_VENDOR_ID_OFFSET); 11409 /* 11410 * Clean the following indirect addresses for all functions since it 11411 * is not used by the driver. 11412 */ 11413 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 11414 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 11415 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 11416 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 11417 11418 if (chip_is_e1x) { 11419 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 11420 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 11421 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 11422 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 11423 } 11424 11425 /* 11426 * Enable internal target-read (in case we are probed after PF FLR). 11427 * Must be done prior to any BAR read access. Only for 57712 and up 11428 */ 11429 if (!chip_is_e1x) 11430 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11431 11432 dev->watchdog_timeo = TX_TIMEOUT; 11433 11434 dev->netdev_ops = &bnx2x_netdev_ops; 11435 bnx2x_set_ethtool_ops(dev); 11436 11437 dev->priv_flags |= IFF_UNICAST_FLT; 11438 11439 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11440 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 11441 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 11442 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 11443 11444 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 11445 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 11446 11447 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; 11448 if (bp->flags & USING_DAC_FLAG) 11449 dev->features |= NETIF_F_HIGHDMA; 11450 11451 /* Add Loopback capability to the device */ 11452 dev->hw_features |= NETIF_F_LOOPBACK; 11453 11454 #ifdef BCM_DCBNL 11455 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 11456 #endif 11457 11458 /* get_port_hwinfo() will set prtad and mmds properly */ 11459 bp->mdio.prtad = MDIO_PRTAD_NONE; 11460 bp->mdio.mmds = 0; 11461 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 11462 bp->mdio.dev = dev; 11463 bp->mdio.mdio_read = bnx2x_mdio_read; 11464 bp->mdio.mdio_write = bnx2x_mdio_write; 11465 11466 return 0; 11467 11468 err_out_release: 11469 if (atomic_read(&pdev->enable_cnt) == 1) 11470 pci_release_regions(pdev); 11471 11472 err_out_disable: 11473 pci_disable_device(pdev); 11474 pci_set_drvdata(pdev, NULL); 11475 11476 err_out: 11477 return rc; 11478 } 11479 11480 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, 11481 int *width, int *speed) 11482 { 11483 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); 11484 11485 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; 11486 11487 /* return value of 1=2.5GHz 2=5GHz */ 11488 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 11489 } 11490 11491 static int bnx2x_check_firmware(struct bnx2x *bp) 11492 { 11493 const struct firmware *firmware = bp->firmware; 11494 struct bnx2x_fw_file_hdr *fw_hdr; 11495 struct bnx2x_fw_file_section *sections; 11496 u32 offset, len, num_ops; 11497 u16 *ops_offsets; 11498 int i; 11499 const u8 *fw_ver; 11500 11501 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 11502 BNX2X_ERR("Wrong FW size\n"); 11503 return -EINVAL; 11504 } 11505 11506 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 11507 sections = (struct bnx2x_fw_file_section *)fw_hdr; 11508 11509 /* Make sure none of the offsets and sizes make us read beyond 11510 * the end of the firmware data */ 11511 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 11512 offset = be32_to_cpu(sections[i].offset); 11513 len = be32_to_cpu(sections[i].len); 11514 if (offset + len > firmware->size) { 11515 BNX2X_ERR("Section %d length is out of bounds\n", i); 11516 return -EINVAL; 11517 } 11518 } 11519 11520 /* Likewise for the init_ops offsets */ 11521 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 11522 ops_offsets = (u16 *)(firmware->data + offset); 11523 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 11524 11525 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 11526 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 11527 BNX2X_ERR("Section offset %d is out of bounds\n", i); 11528 return -EINVAL; 11529 } 11530 } 11531 11532 /* Check FW version */ 11533 offset = be32_to_cpu(fw_hdr->fw_version.offset); 11534 fw_ver = firmware->data + offset; 11535 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 11536 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 11537 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 11538 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 11539 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 11540 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 11541 BCM_5710_FW_MAJOR_VERSION, 11542 BCM_5710_FW_MINOR_VERSION, 11543 BCM_5710_FW_REVISION_VERSION, 11544 BCM_5710_FW_ENGINEERING_VERSION); 11545 return -EINVAL; 11546 } 11547 11548 return 0; 11549 } 11550 11551 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11552 { 11553 const __be32 *source = (const __be32 *)_source; 11554 u32 *target = (u32 *)_target; 11555 u32 i; 11556 11557 for (i = 0; i < n/4; i++) 11558 target[i] = be32_to_cpu(source[i]); 11559 } 11560 11561 /* 11562 Ops array is stored in the following format: 11563 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11564 */ 11565 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11566 { 11567 const __be32 *source = (const __be32 *)_source; 11568 struct raw_op *target = (struct raw_op *)_target; 11569 u32 i, j, tmp; 11570 11571 for (i = 0, j = 0; i < n/8; i++, j += 2) { 11572 tmp = be32_to_cpu(source[j]); 11573 target[i].op = (tmp >> 24) & 0xff; 11574 target[i].offset = tmp & 0xffffff; 11575 target[i].raw_data = be32_to_cpu(source[j + 1]); 11576 } 11577 } 11578 11579 /* IRO array is stored in the following format: 11580 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11581 */ 11582 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11583 { 11584 const __be32 *source = (const __be32 *)_source; 11585 struct iro *target = (struct iro *)_target; 11586 u32 i, j, tmp; 11587 11588 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 11589 target[i].base = be32_to_cpu(source[j]); 11590 j++; 11591 tmp = be32_to_cpu(source[j]); 11592 target[i].m1 = (tmp >> 16) & 0xffff; 11593 target[i].m2 = tmp & 0xffff; 11594 j++; 11595 tmp = be32_to_cpu(source[j]); 11596 target[i].m3 = (tmp >> 16) & 0xffff; 11597 target[i].size = tmp & 0xffff; 11598 j++; 11599 } 11600 } 11601 11602 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11603 { 11604 const __be16 *source = (const __be16 *)_source; 11605 u16 *target = (u16 *)_target; 11606 u32 i; 11607 11608 for (i = 0; i < n/2; i++) 11609 target[i] = be16_to_cpu(source[i]); 11610 } 11611 11612 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 11613 do { \ 11614 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 11615 bp->arr = kmalloc(len, GFP_KERNEL); \ 11616 if (!bp->arr) \ 11617 goto lbl; \ 11618 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 11619 (u8 *)bp->arr, len); \ 11620 } while (0) 11621 11622 static int bnx2x_init_firmware(struct bnx2x *bp) 11623 { 11624 const char *fw_file_name; 11625 struct bnx2x_fw_file_hdr *fw_hdr; 11626 int rc; 11627 11628 if (bp->firmware) 11629 return 0; 11630 11631 if (CHIP_IS_E1(bp)) 11632 fw_file_name = FW_FILE_NAME_E1; 11633 else if (CHIP_IS_E1H(bp)) 11634 fw_file_name = FW_FILE_NAME_E1H; 11635 else if (!CHIP_IS_E1x(bp)) 11636 fw_file_name = FW_FILE_NAME_E2; 11637 else { 11638 BNX2X_ERR("Unsupported chip revision\n"); 11639 return -EINVAL; 11640 } 11641 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 11642 11643 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 11644 if (rc) { 11645 BNX2X_ERR("Can't load firmware file %s\n", 11646 fw_file_name); 11647 goto request_firmware_exit; 11648 } 11649 11650 rc = bnx2x_check_firmware(bp); 11651 if (rc) { 11652 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 11653 goto request_firmware_exit; 11654 } 11655 11656 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 11657 11658 /* Initialize the pointers to the init arrays */ 11659 /* Blob */ 11660 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 11661 11662 /* Opcodes */ 11663 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 11664 11665 /* Offsets */ 11666 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 11667 be16_to_cpu_n); 11668 11669 /* STORMs firmware */ 11670 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11671 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 11672 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 11673 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 11674 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11675 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 11676 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 11677 be32_to_cpu(fw_hdr->usem_pram_data.offset); 11678 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11679 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 11680 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 11681 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 11682 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 11683 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 11684 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 11685 be32_to_cpu(fw_hdr->csem_pram_data.offset); 11686 /* IRO */ 11687 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 11688 11689 return 0; 11690 11691 iro_alloc_err: 11692 kfree(bp->init_ops_offsets); 11693 init_offsets_alloc_err: 11694 kfree(bp->init_ops); 11695 init_ops_alloc_err: 11696 kfree(bp->init_data); 11697 request_firmware_exit: 11698 release_firmware(bp->firmware); 11699 bp->firmware = NULL; 11700 11701 return rc; 11702 } 11703 11704 static void bnx2x_release_firmware(struct bnx2x *bp) 11705 { 11706 kfree(bp->init_ops_offsets); 11707 kfree(bp->init_ops); 11708 kfree(bp->init_data); 11709 release_firmware(bp->firmware); 11710 bp->firmware = NULL; 11711 } 11712 11713 11714 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 11715 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 11716 .init_hw_cmn = bnx2x_init_hw_common, 11717 .init_hw_port = bnx2x_init_hw_port, 11718 .init_hw_func = bnx2x_init_hw_func, 11719 11720 .reset_hw_cmn = bnx2x_reset_common, 11721 .reset_hw_port = bnx2x_reset_port, 11722 .reset_hw_func = bnx2x_reset_func, 11723 11724 .gunzip_init = bnx2x_gunzip_init, 11725 .gunzip_end = bnx2x_gunzip_end, 11726 11727 .init_fw = bnx2x_init_firmware, 11728 .release_fw = bnx2x_release_firmware, 11729 }; 11730 11731 void bnx2x__init_func_obj(struct bnx2x *bp) 11732 { 11733 /* Prepare DMAE related driver resources */ 11734 bnx2x_setup_dmae(bp); 11735 11736 bnx2x_init_func_obj(bp, &bp->func_obj, 11737 bnx2x_sp(bp, func_rdata), 11738 bnx2x_sp_mapping(bp, func_rdata), 11739 bnx2x_sp(bp, func_afex_rdata), 11740 bnx2x_sp_mapping(bp, func_afex_rdata), 11741 &bnx2x_func_sp_drv); 11742 } 11743 11744 /* must be called after sriov-enable */ 11745 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11746 { 11747 int cid_count = BNX2X_L2_MAX_CID(bp); 11748 11749 #ifdef BCM_CNIC 11750 cid_count += CNIC_CID_MAX; 11751 #endif 11752 return roundup(cid_count, QM_CID_ROUND); 11753 } 11754 11755 /** 11756 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 11757 * 11758 * @dev: pci device 11759 * 11760 */ 11761 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11762 { 11763 int pos; 11764 u16 control; 11765 11766 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 11767 11768 /* 11769 * If MSI-X is not supported - return number of SBs needed to support 11770 * one fast path queue: one FP queue + SB for CNIC 11771 */ 11772 if (!pos) 11773 return 1 + CNIC_PRESENT; 11774 11775 /* 11776 * The value in the PCI configuration space is the index of the last 11777 * entry, namely one less than the actual size of the table, which is 11778 * exactly what we want to return from this function: number of all SBs 11779 * without the default SB. 11780 */ 11781 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 11782 return control & PCI_MSIX_FLAGS_QSIZE; 11783 } 11784 11785 static int __devinit bnx2x_init_one(struct pci_dev *pdev, 11786 const struct pci_device_id *ent) 11787 { 11788 struct net_device *dev = NULL; 11789 struct bnx2x *bp; 11790 int pcie_width, pcie_speed; 11791 int rc, max_non_def_sbs; 11792 int rx_count, tx_count, rss_count, doorbell_size; 11793 /* 11794 * An estimated maximum supported CoS number according to the chip 11795 * version. 11796 * We will try to roughly estimate the maximum number of CoSes this chip 11797 * may support in order to minimize the memory allocated for Tx 11798 * netdev_queue's. This number will be accurately calculated during the 11799 * initialization of bp->max_cos based on the chip versions AND chip 11800 * revision in the bnx2x_init_bp(). 11801 */ 11802 u8 max_cos_est = 0; 11803 11804 switch (ent->driver_data) { 11805 case BCM57710: 11806 case BCM57711: 11807 case BCM57711E: 11808 max_cos_est = BNX2X_MULTI_TX_COS_E1X; 11809 break; 11810 11811 case BCM57712: 11812 case BCM57712_MF: 11813 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; 11814 break; 11815 11816 case BCM57800: 11817 case BCM57800_MF: 11818 case BCM57810: 11819 case BCM57810_MF: 11820 case BCM57840_O: 11821 case BCM57840_4_10: 11822 case BCM57840_2_20: 11823 case BCM57840_MFO: 11824 case BCM57840_MF: 11825 case BCM57811: 11826 case BCM57811_MF: 11827 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11828 break; 11829 11830 default: 11831 pr_err("Unknown board_type (%ld), aborting\n", 11832 ent->driver_data); 11833 return -ENODEV; 11834 } 11835 11836 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11837 11838 WARN_ON(!max_non_def_sbs); 11839 11840 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11841 rss_count = max_non_def_sbs - CNIC_PRESENT; 11842 11843 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 11844 rx_count = rss_count + FCOE_PRESENT; 11845 11846 /* 11847 * Maximum number of netdev Tx queues: 11848 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11849 */ 11850 tx_count = rss_count * max_cos_est + FCOE_PRESENT; 11851 11852 /* dev zeroed in init_etherdev */ 11853 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11854 if (!dev) 11855 return -ENOMEM; 11856 11857 bp = netdev_priv(dev); 11858 11859 bp->igu_sb_cnt = max_non_def_sbs; 11860 bp->msg_enable = debug; 11861 pci_set_drvdata(pdev, dev); 11862 11863 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 11864 if (rc < 0) { 11865 free_netdev(dev); 11866 return rc; 11867 } 11868 11869 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11870 11871 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 11872 tx_count, rx_count); 11873 11874 rc = bnx2x_init_bp(bp); 11875 if (rc) 11876 goto init_one_exit; 11877 11878 /* 11879 * Map doorbels here as we need the real value of bp->max_cos which 11880 * is initialized in bnx2x_init_bp(). 11881 */ 11882 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 11883 if (doorbell_size > pci_resource_len(pdev, 2)) { 11884 dev_err(&bp->pdev->dev, 11885 "Cannot map doorbells, bar size too small, aborting\n"); 11886 rc = -ENOMEM; 11887 goto init_one_exit; 11888 } 11889 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11890 doorbell_size); 11891 if (!bp->doorbells) { 11892 dev_err(&bp->pdev->dev, 11893 "Cannot map doorbell space, aborting\n"); 11894 rc = -ENOMEM; 11895 goto init_one_exit; 11896 } 11897 11898 /* calc qm_cid_count */ 11899 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 11900 11901 #ifdef BCM_CNIC 11902 /* disable FCOE L2 queue for E1x */ 11903 if (CHIP_IS_E1x(bp)) 11904 bp->flags |= NO_FCOE_FLAG; 11905 11906 #endif 11907 11908 11909 /* Set bp->num_queues for MSI-X mode*/ 11910 bnx2x_set_num_queues(bp); 11911 11912 /* Configure interrupt mode: try to enable MSI-X/MSI if 11913 * needed. 11914 */ 11915 bnx2x_set_int_mode(bp); 11916 11917 rc = register_netdev(dev); 11918 if (rc) { 11919 dev_err(&pdev->dev, "Cannot register net device\n"); 11920 goto init_one_exit; 11921 } 11922 11923 #ifdef BCM_CNIC 11924 if (!NO_FCOE(bp)) { 11925 /* Add storage MAC address */ 11926 rtnl_lock(); 11927 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11928 rtnl_unlock(); 11929 } 11930 #endif 11931 11932 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 11933 11934 BNX2X_DEV_INFO( 11935 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 11936 board_info[ent->driver_data].name, 11937 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 11938 pcie_width, 11939 ((!CHIP_IS_E2(bp) && pcie_speed == 2) || 11940 (CHIP_IS_E2(bp) && pcie_speed == 1)) ? 11941 "5GHz (Gen2)" : "2.5GHz", 11942 dev->base_addr, bp->pdev->irq, dev->dev_addr); 11943 11944 return 0; 11945 11946 init_one_exit: 11947 if (bp->regview) 11948 iounmap(bp->regview); 11949 11950 if (bp->doorbells) 11951 iounmap(bp->doorbells); 11952 11953 free_netdev(dev); 11954 11955 if (atomic_read(&pdev->enable_cnt) == 1) 11956 pci_release_regions(pdev); 11957 11958 pci_disable_device(pdev); 11959 pci_set_drvdata(pdev, NULL); 11960 11961 return rc; 11962 } 11963 11964 static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 11965 { 11966 struct net_device *dev = pci_get_drvdata(pdev); 11967 struct bnx2x *bp; 11968 11969 if (!dev) { 11970 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 11971 return; 11972 } 11973 bp = netdev_priv(dev); 11974 11975 #ifdef BCM_CNIC 11976 /* Delete storage MAC address */ 11977 if (!NO_FCOE(bp)) { 11978 rtnl_lock(); 11979 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 11980 rtnl_unlock(); 11981 } 11982 #endif 11983 11984 #ifdef BCM_DCBNL 11985 /* Delete app tlvs from dcbnl */ 11986 bnx2x_dcbnl_update_applist(bp, true); 11987 #endif 11988 11989 unregister_netdev(dev); 11990 11991 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11992 bnx2x_set_power_state(bp, PCI_D0); 11993 11994 /* Disable MSI/MSI-X */ 11995 bnx2x_disable_msi(bp); 11996 11997 /* Power off */ 11998 bnx2x_set_power_state(bp, PCI_D3hot); 11999 12000 /* Make sure RESET task is not scheduled before continuing */ 12001 cancel_delayed_work_sync(&bp->sp_rtnl_task); 12002 12003 if (bp->regview) 12004 iounmap(bp->regview); 12005 12006 if (bp->doorbells) 12007 iounmap(bp->doorbells); 12008 12009 bnx2x_release_firmware(bp); 12010 12011 bnx2x_free_mem_bp(bp); 12012 12013 free_netdev(dev); 12014 12015 if (atomic_read(&pdev->enable_cnt) == 1) 12016 pci_release_regions(pdev); 12017 12018 pci_disable_device(pdev); 12019 pci_set_drvdata(pdev, NULL); 12020 } 12021 12022 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12023 { 12024 int i; 12025 12026 bp->state = BNX2X_STATE_ERROR; 12027 12028 bp->rx_mode = BNX2X_RX_MODE_NONE; 12029 12030 #ifdef BCM_CNIC 12031 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12032 #endif 12033 /* Stop Tx */ 12034 bnx2x_tx_disable(bp); 12035 12036 bnx2x_netif_stop(bp, 0); 12037 /* Delete all NAPI objects */ 12038 bnx2x_del_all_napi(bp); 12039 12040 del_timer_sync(&bp->timer); 12041 12042 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 12043 12044 /* Release IRQs */ 12045 bnx2x_free_irq(bp); 12046 12047 /* Free SKBs, SGEs, TPA pool and driver internals */ 12048 bnx2x_free_skbs(bp); 12049 12050 for_each_rx_queue(bp, i) 12051 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12052 12053 bnx2x_free_mem(bp); 12054 12055 bp->state = BNX2X_STATE_CLOSED; 12056 12057 netif_carrier_off(bp->dev); 12058 12059 return 0; 12060 } 12061 12062 static void bnx2x_eeh_recover(struct bnx2x *bp) 12063 { 12064 u32 val; 12065 12066 mutex_init(&bp->port.phy_mutex); 12067 12068 12069 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 12070 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 12071 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 12072 BNX2X_ERR("BAD MCP validity signature\n"); 12073 } 12074 12075 /** 12076 * bnx2x_io_error_detected - called when PCI error is detected 12077 * @pdev: Pointer to PCI device 12078 * @state: The current pci connection state 12079 * 12080 * This function is called after a PCI bus error affecting 12081 * this device has been detected. 12082 */ 12083 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 12084 pci_channel_state_t state) 12085 { 12086 struct net_device *dev = pci_get_drvdata(pdev); 12087 struct bnx2x *bp = netdev_priv(dev); 12088 12089 rtnl_lock(); 12090 12091 netif_device_detach(dev); 12092 12093 if (state == pci_channel_io_perm_failure) { 12094 rtnl_unlock(); 12095 return PCI_ERS_RESULT_DISCONNECT; 12096 } 12097 12098 if (netif_running(dev)) 12099 bnx2x_eeh_nic_unload(bp); 12100 12101 pci_disable_device(pdev); 12102 12103 rtnl_unlock(); 12104 12105 /* Request a slot reset */ 12106 return PCI_ERS_RESULT_NEED_RESET; 12107 } 12108 12109 /** 12110 * bnx2x_io_slot_reset - called after the PCI bus has been reset 12111 * @pdev: Pointer to PCI device 12112 * 12113 * Restart the card from scratch, as if from a cold-boot. 12114 */ 12115 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 12116 { 12117 struct net_device *dev = pci_get_drvdata(pdev); 12118 struct bnx2x *bp = netdev_priv(dev); 12119 12120 rtnl_lock(); 12121 12122 if (pci_enable_device(pdev)) { 12123 dev_err(&pdev->dev, 12124 "Cannot re-enable PCI device after reset\n"); 12125 rtnl_unlock(); 12126 return PCI_ERS_RESULT_DISCONNECT; 12127 } 12128 12129 pci_set_master(pdev); 12130 pci_restore_state(pdev); 12131 12132 if (netif_running(dev)) 12133 bnx2x_set_power_state(bp, PCI_D0); 12134 12135 rtnl_unlock(); 12136 12137 return PCI_ERS_RESULT_RECOVERED; 12138 } 12139 12140 /** 12141 * bnx2x_io_resume - called when traffic can start flowing again 12142 * @pdev: Pointer to PCI device 12143 * 12144 * This callback is called when the error recovery driver tells us that 12145 * its OK to resume normal operation. 12146 */ 12147 static void bnx2x_io_resume(struct pci_dev *pdev) 12148 { 12149 struct net_device *dev = pci_get_drvdata(pdev); 12150 struct bnx2x *bp = netdev_priv(dev); 12151 12152 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 12153 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 12154 return; 12155 } 12156 12157 rtnl_lock(); 12158 12159 bnx2x_eeh_recover(bp); 12160 12161 if (netif_running(dev)) 12162 bnx2x_nic_load(bp, LOAD_NORMAL); 12163 12164 netif_device_attach(dev); 12165 12166 rtnl_unlock(); 12167 } 12168 12169 static const struct pci_error_handlers bnx2x_err_handler = { 12170 .error_detected = bnx2x_io_error_detected, 12171 .slot_reset = bnx2x_io_slot_reset, 12172 .resume = bnx2x_io_resume, 12173 }; 12174 12175 static struct pci_driver bnx2x_pci_driver = { 12176 .name = DRV_MODULE_NAME, 12177 .id_table = bnx2x_pci_tbl, 12178 .probe = bnx2x_init_one, 12179 .remove = __devexit_p(bnx2x_remove_one), 12180 .suspend = bnx2x_suspend, 12181 .resume = bnx2x_resume, 12182 .err_handler = &bnx2x_err_handler, 12183 }; 12184 12185 static int __init bnx2x_init(void) 12186 { 12187 int ret; 12188 12189 pr_info("%s", version); 12190 12191 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 12192 if (bnx2x_wq == NULL) { 12193 pr_err("Cannot create workqueue\n"); 12194 return -ENOMEM; 12195 } 12196 12197 ret = pci_register_driver(&bnx2x_pci_driver); 12198 if (ret) { 12199 pr_err("Cannot register driver\n"); 12200 destroy_workqueue(bnx2x_wq); 12201 } 12202 return ret; 12203 } 12204 12205 static void __exit bnx2x_cleanup(void) 12206 { 12207 struct list_head *pos, *q; 12208 pci_unregister_driver(&bnx2x_pci_driver); 12209 12210 destroy_workqueue(bnx2x_wq); 12211 12212 /* Free globablly allocated resources */ 12213 list_for_each_safe(pos, q, &bnx2x_prev_list) { 12214 struct bnx2x_prev_path_list *tmp = 12215 list_entry(pos, struct bnx2x_prev_path_list, list); 12216 list_del(pos); 12217 kfree(tmp); 12218 } 12219 } 12220 12221 void bnx2x_notify_link_changed(struct bnx2x *bp) 12222 { 12223 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 12224 } 12225 12226 module_init(bnx2x_init); 12227 module_exit(bnx2x_cleanup); 12228 12229 #ifdef BCM_CNIC 12230 /** 12231 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12232 * 12233 * @bp: driver handle 12234 * @set: set or clear the CAM entry 12235 * 12236 * This function will wait until the ramdord completion returns. 12237 * Return 0 if success, -ENODEV if ramrod doesn't return. 12238 */ 12239 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12240 { 12241 unsigned long ramrod_flags = 0; 12242 12243 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12244 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 12245 &bp->iscsi_l2_mac_obj, true, 12246 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 12247 } 12248 12249 /* count denotes the number of new completions we have seen */ 12250 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12251 { 12252 struct eth_spe *spe; 12253 int cxt_index, cxt_offset; 12254 12255 #ifdef BNX2X_STOP_ON_ERROR 12256 if (unlikely(bp->panic)) 12257 return; 12258 #endif 12259 12260 spin_lock_bh(&bp->spq_lock); 12261 BUG_ON(bp->cnic_spq_pending < count); 12262 bp->cnic_spq_pending -= count; 12263 12264 12265 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 12266 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 12267 & SPE_HDR_CONN_TYPE) >> 12268 SPE_HDR_CONN_TYPE_SHIFT; 12269 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 12270 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 12271 12272 /* Set validation for iSCSI L2 client before sending SETUP 12273 * ramrod 12274 */ 12275 if (type == ETH_CONNECTION_TYPE) { 12276 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 12277 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 12278 ILT_PAGE_CIDS; 12279 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 12280 (cxt_index * ILT_PAGE_CIDS); 12281 bnx2x_set_ctx_validation(bp, 12282 &bp->context[cxt_index]. 12283 vcxt[cxt_offset].eth, 12284 BNX2X_ISCSI_ETH_CID(bp)); 12285 } 12286 } 12287 12288 /* 12289 * There may be not more than 8 L2, not more than 8 L5 SPEs 12290 * and in the air. We also check that number of outstanding 12291 * COMMON ramrods is not more than the EQ and SPQ can 12292 * accommodate. 12293 */ 12294 if (type == ETH_CONNECTION_TYPE) { 12295 if (!atomic_read(&bp->cq_spq_left)) 12296 break; 12297 else 12298 atomic_dec(&bp->cq_spq_left); 12299 } else if (type == NONE_CONNECTION_TYPE) { 12300 if (!atomic_read(&bp->eq_spq_left)) 12301 break; 12302 else 12303 atomic_dec(&bp->eq_spq_left); 12304 } else if ((type == ISCSI_CONNECTION_TYPE) || 12305 (type == FCOE_CONNECTION_TYPE)) { 12306 if (bp->cnic_spq_pending >= 12307 bp->cnic_eth_dev.max_kwqe_pending) 12308 break; 12309 else 12310 bp->cnic_spq_pending++; 12311 } else { 12312 BNX2X_ERR("Unknown SPE type: %d\n", type); 12313 bnx2x_panic(); 12314 break; 12315 } 12316 12317 spe = bnx2x_sp_get_next(bp); 12318 *spe = *bp->cnic_kwq_cons; 12319 12320 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 12321 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 12322 12323 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 12324 bp->cnic_kwq_cons = bp->cnic_kwq; 12325 else 12326 bp->cnic_kwq_cons++; 12327 } 12328 bnx2x_sp_prod_update(bp); 12329 spin_unlock_bh(&bp->spq_lock); 12330 } 12331 12332 static int bnx2x_cnic_sp_queue(struct net_device *dev, 12333 struct kwqe_16 *kwqes[], u32 count) 12334 { 12335 struct bnx2x *bp = netdev_priv(dev); 12336 int i; 12337 12338 #ifdef BNX2X_STOP_ON_ERROR 12339 if (unlikely(bp->panic)) { 12340 BNX2X_ERR("Can't post to SP queue while panic\n"); 12341 return -EIO; 12342 } 12343 #endif 12344 12345 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 12346 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 12347 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 12348 return -EAGAIN; 12349 } 12350 12351 spin_lock_bh(&bp->spq_lock); 12352 12353 for (i = 0; i < count; i++) { 12354 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 12355 12356 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 12357 break; 12358 12359 *bp->cnic_kwq_prod = *spe; 12360 12361 bp->cnic_kwq_pending++; 12362 12363 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 12364 spe->hdr.conn_and_cmd_data, spe->hdr.type, 12365 spe->data.update_data_addr.hi, 12366 spe->data.update_data_addr.lo, 12367 bp->cnic_kwq_pending); 12368 12369 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 12370 bp->cnic_kwq_prod = bp->cnic_kwq; 12371 else 12372 bp->cnic_kwq_prod++; 12373 } 12374 12375 spin_unlock_bh(&bp->spq_lock); 12376 12377 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 12378 bnx2x_cnic_sp_post(bp, 0); 12379 12380 return i; 12381 } 12382 12383 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12384 { 12385 struct cnic_ops *c_ops; 12386 int rc = 0; 12387 12388 mutex_lock(&bp->cnic_mutex); 12389 c_ops = rcu_dereference_protected(bp->cnic_ops, 12390 lockdep_is_held(&bp->cnic_mutex)); 12391 if (c_ops) 12392 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12393 mutex_unlock(&bp->cnic_mutex); 12394 12395 return rc; 12396 } 12397 12398 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 12399 { 12400 struct cnic_ops *c_ops; 12401 int rc = 0; 12402 12403 rcu_read_lock(); 12404 c_ops = rcu_dereference(bp->cnic_ops); 12405 if (c_ops) 12406 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 12407 rcu_read_unlock(); 12408 12409 return rc; 12410 } 12411 12412 /* 12413 * for commands that have no data 12414 */ 12415 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 12416 { 12417 struct cnic_ctl_info ctl = {0}; 12418 12419 ctl.cmd = cmd; 12420 12421 return bnx2x_cnic_ctl_send(bp, &ctl); 12422 } 12423 12424 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 12425 { 12426 struct cnic_ctl_info ctl = {0}; 12427 12428 /* first we tell CNIC and only then we count this as a completion */ 12429 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 12430 ctl.data.comp.cid = cid; 12431 ctl.data.comp.error = err; 12432 12433 bnx2x_cnic_ctl_send_bh(bp, &ctl); 12434 bnx2x_cnic_sp_post(bp, 0); 12435 } 12436 12437 12438 /* Called with netif_addr_lock_bh() taken. 12439 * Sets an rx_mode config for an iSCSI ETH client. 12440 * Doesn't block. 12441 * Completion should be checked outside. 12442 */ 12443 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 12444 { 12445 unsigned long accept_flags = 0, ramrod_flags = 0; 12446 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12447 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 12448 12449 if (start) { 12450 /* Start accepting on iSCSI L2 ring. Accept all multicasts 12451 * because it's the only way for UIO Queue to accept 12452 * multicasts (in non-promiscuous mode only one Queue per 12453 * function will receive multicast packets (leading in our 12454 * case). 12455 */ 12456 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 12457 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 12458 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 12459 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 12460 12461 /* Clear STOP_PENDING bit if START is requested */ 12462 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 12463 12464 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 12465 } else 12466 /* Clear START_PENDING bit if STOP is requested */ 12467 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 12468 12469 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 12470 set_bit(sched_state, &bp->sp_state); 12471 else { 12472 __set_bit(RAMROD_RX, &ramrod_flags); 12473 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 12474 ramrod_flags); 12475 } 12476 } 12477 12478 12479 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 12480 { 12481 struct bnx2x *bp = netdev_priv(dev); 12482 int rc = 0; 12483 12484 switch (ctl->cmd) { 12485 case DRV_CTL_CTXTBL_WR_CMD: { 12486 u32 index = ctl->data.io.offset; 12487 dma_addr_t addr = ctl->data.io.dma_addr; 12488 12489 bnx2x_ilt_wr(bp, index, addr); 12490 break; 12491 } 12492 12493 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 12494 int count = ctl->data.credit.credit_count; 12495 12496 bnx2x_cnic_sp_post(bp, count); 12497 break; 12498 } 12499 12500 /* rtnl_lock is held. */ 12501 case DRV_CTL_START_L2_CMD: { 12502 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12503 unsigned long sp_bits = 0; 12504 12505 /* Configure the iSCSI classification object */ 12506 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 12507 cp->iscsi_l2_client_id, 12508 cp->iscsi_l2_cid, BP_FUNC(bp), 12509 bnx2x_sp(bp, mac_rdata), 12510 bnx2x_sp_mapping(bp, mac_rdata), 12511 BNX2X_FILTER_MAC_PENDING, 12512 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 12513 &bp->macs_pool); 12514 12515 /* Set iSCSI MAC address */ 12516 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 12517 if (rc) 12518 break; 12519 12520 mmiowb(); 12521 barrier(); 12522 12523 /* Start accepting on iSCSI L2 ring */ 12524 12525 netif_addr_lock_bh(dev); 12526 bnx2x_set_iscsi_eth_rx_mode(bp, true); 12527 netif_addr_unlock_bh(dev); 12528 12529 /* bits to wait on */ 12530 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12531 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 12532 12533 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12534 BNX2X_ERR("rx_mode completion timed out!\n"); 12535 12536 break; 12537 } 12538 12539 /* rtnl_lock is held. */ 12540 case DRV_CTL_STOP_L2_CMD: { 12541 unsigned long sp_bits = 0; 12542 12543 /* Stop accepting on iSCSI L2 ring */ 12544 netif_addr_lock_bh(dev); 12545 bnx2x_set_iscsi_eth_rx_mode(bp, false); 12546 netif_addr_unlock_bh(dev); 12547 12548 /* bits to wait on */ 12549 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 12550 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 12551 12552 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 12553 BNX2X_ERR("rx_mode completion timed out!\n"); 12554 12555 mmiowb(); 12556 barrier(); 12557 12558 /* Unset iSCSI L2 MAC */ 12559 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 12560 BNX2X_ISCSI_ETH_MAC, true); 12561 break; 12562 } 12563 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 12564 int count = ctl->data.credit.credit_count; 12565 12566 smp_mb__before_atomic_inc(); 12567 atomic_add(count, &bp->cq_spq_left); 12568 smp_mb__after_atomic_inc(); 12569 break; 12570 } 12571 case DRV_CTL_ULP_REGISTER_CMD: { 12572 int ulp_type = ctl->data.register_data.ulp_type; 12573 12574 if (CHIP_IS_E3(bp)) { 12575 int idx = BP_FW_MB_IDX(bp); 12576 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12577 int path = BP_PATH(bp); 12578 int port = BP_PORT(bp); 12579 int i; 12580 u32 scratch_offset; 12581 u32 *host_addr; 12582 12583 /* first write capability to shmem2 */ 12584 if (ulp_type == CNIC_ULP_ISCSI) 12585 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12586 else if (ulp_type == CNIC_ULP_FCOE) 12587 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12588 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12589 12590 if ((ulp_type != CNIC_ULP_FCOE) || 12591 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 12592 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 12593 break; 12594 12595 /* if reached here - should write fcoe capabilities */ 12596 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 12597 if (!scratch_offset) 12598 break; 12599 scratch_offset += offsetof(struct glob_ncsi_oem_data, 12600 fcoe_features[path][port]); 12601 host_addr = (u32 *) &(ctl->data.register_data. 12602 fcoe_features); 12603 for (i = 0; i < sizeof(struct fcoe_capabilities); 12604 i += 4) 12605 REG_WR(bp, scratch_offset + i, 12606 *(host_addr + i/4)); 12607 } 12608 break; 12609 } 12610 12611 case DRV_CTL_ULP_UNREGISTER_CMD: { 12612 int ulp_type = ctl->data.ulp_type; 12613 12614 if (CHIP_IS_E3(bp)) { 12615 int idx = BP_FW_MB_IDX(bp); 12616 u32 cap; 12617 12618 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12619 if (ulp_type == CNIC_ULP_ISCSI) 12620 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12621 else if (ulp_type == CNIC_ULP_FCOE) 12622 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12623 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12624 } 12625 break; 12626 } 12627 12628 default: 12629 BNX2X_ERR("unknown command %x\n", ctl->cmd); 12630 rc = -EINVAL; 12631 } 12632 12633 return rc; 12634 } 12635 12636 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 12637 { 12638 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12639 12640 if (bp->flags & USING_MSIX_FLAG) { 12641 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 12642 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 12643 cp->irq_arr[0].vector = bp->msix_table[1].vector; 12644 } else { 12645 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 12646 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 12647 } 12648 if (!CHIP_IS_E1x(bp)) 12649 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 12650 else 12651 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 12652 12653 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 12654 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 12655 cp->irq_arr[1].status_blk = bp->def_status_blk; 12656 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 12657 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 12658 12659 cp->num_irq = 2; 12660 } 12661 12662 void bnx2x_setup_cnic_info(struct bnx2x *bp) 12663 { 12664 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12665 12666 12667 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 12668 bnx2x_cid_ilt_lines(bp); 12669 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 12670 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 12671 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 12672 12673 if (NO_ISCSI_OOO(bp)) 12674 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12675 } 12676 12677 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12678 void *data) 12679 { 12680 struct bnx2x *bp = netdev_priv(dev); 12681 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12682 12683 if (ops == NULL) { 12684 BNX2X_ERR("NULL ops received\n"); 12685 return -EINVAL; 12686 } 12687 12688 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12689 if (!bp->cnic_kwq) 12690 return -ENOMEM; 12691 12692 bp->cnic_kwq_cons = bp->cnic_kwq; 12693 bp->cnic_kwq_prod = bp->cnic_kwq; 12694 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 12695 12696 bp->cnic_spq_pending = 0; 12697 bp->cnic_kwq_pending = 0; 12698 12699 bp->cnic_data = data; 12700 12701 cp->num_irq = 0; 12702 cp->drv_state |= CNIC_DRV_STATE_REGD; 12703 cp->iro_arr = bp->iro_arr; 12704 12705 bnx2x_setup_cnic_irq_info(bp); 12706 12707 rcu_assign_pointer(bp->cnic_ops, ops); 12708 12709 return 0; 12710 } 12711 12712 static int bnx2x_unregister_cnic(struct net_device *dev) 12713 { 12714 struct bnx2x *bp = netdev_priv(dev); 12715 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12716 12717 mutex_lock(&bp->cnic_mutex); 12718 cp->drv_state = 0; 12719 RCU_INIT_POINTER(bp->cnic_ops, NULL); 12720 mutex_unlock(&bp->cnic_mutex); 12721 synchronize_rcu(); 12722 kfree(bp->cnic_kwq); 12723 bp->cnic_kwq = NULL; 12724 12725 return 0; 12726 } 12727 12728 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 12729 { 12730 struct bnx2x *bp = netdev_priv(dev); 12731 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12732 12733 /* If both iSCSI and FCoE are disabled - return NULL in 12734 * order to indicate CNIC that it should not try to work 12735 * with this device. 12736 */ 12737 if (NO_ISCSI(bp) && NO_FCOE(bp)) 12738 return NULL; 12739 12740 cp->drv_owner = THIS_MODULE; 12741 cp->chip_id = CHIP_ID(bp); 12742 cp->pdev = bp->pdev; 12743 cp->io_base = bp->regview; 12744 cp->io_base2 = bp->doorbells; 12745 cp->max_kwqe_pending = 8; 12746 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 12747 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 12748 bnx2x_cid_ilt_lines(bp); 12749 cp->ctx_tbl_len = CNIC_ILT_LINES; 12750 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 12751 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 12752 cp->drv_ctl = bnx2x_drv_ctl; 12753 cp->drv_register_cnic = bnx2x_register_cnic; 12754 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12755 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 12756 cp->iscsi_l2_client_id = 12757 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12758 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 12759 12760 if (NO_ISCSI_OOO(bp)) 12761 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12762 12763 if (NO_ISCSI(bp)) 12764 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 12765 12766 if (NO_FCOE(bp)) 12767 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 12768 12769 BNX2X_DEV_INFO( 12770 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 12771 cp->ctx_blk_size, 12772 cp->ctx_tbl_offset, 12773 cp->ctx_tbl_len, 12774 cp->starting_cid); 12775 return cp; 12776 } 12777 EXPORT_SYMBOL(bnx2x_cnic_probe); 12778 12779 #endif /* BCM_CNIC */ 12780 12781