1 /******************************************************************************* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright 2014 QLogic Corporation 22 * The contents of this file are subject to the terms of the 23 * QLogic End User License (the "License"). 24 * You may not use this file except in compliance with the License. 25 * 26 * You can obtain a copy of the License at 27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/ 28 * QLogic_End_User_Software_License.txt 29 * See the License for the specific language governing permissions 30 * and limitations under the License. 31 * 32 * 33 * Module Description: 34 * This file contains functions having to do with Device info, licensing 35 * and Bandwidth Allocation 36 * 37 ******************************************************************************/ 38 39 #include "lm5710.h" 40 41 unsigned long log2_align(unsigned long n); 42 43 u64_t lm_get_timestamp_of_recent_cid_recycling(struct _lm_device_t *pdev) 44 { 45 return pdev->vars.last_recycling_timestamp; 46 } 47 48 u32_t lm_get_max_supported_toe_cons(struct _lm_device_t *pdev) 49 { 50 if ( CHK_NULL(pdev) ) 51 { 52 return 0; 53 } 54 return pdev->params.max_supported_toe_cons; 55 } 56 57 u8_t lm_get_toe_rss_possibility(struct _lm_device_t *pdev) 58 { 59 if ( CHK_NULL(pdev) ) 60 { 61 return 0; 62 } 63 return (pdev->params.l4_rss_is_possible != L4_RSS_DISABLED); 64 } 65 66 /******************************************************************************* 67 * Description: 68 * reads iscsi_boot info block from shmem 69 * Return: 70 * lm_status 71 ******************************************************************************/ 72 lm_status_t lm_get_iscsi_boot_info_block( struct _lm_device_t *pdev, struct _iscsi_info_block_hdr_t* iscsi_info_block_hdr_ptr ) 73 { 74 u32_t val = 0; 75 u32_t offset = 0; 76 const u8_t func_mb_id = FUNC_MAILBOX_ID(pdev); 77 78 // dummy variables so we have convenience way to know the shmem offsets 79 // This is a pointer so it doesn't load the stack. 80 // If we delete these lines we won't have shmem_region_t symbols 81 shmem_region_t* shmem_region_dummy = NULL; 82 shmem2_region_t* shmem2_region_dummy = NULL; 83 shared_hw_cfg_t* shared_hw_cfg_dummy = NULL; 84 port_hw_cfg_t* port_hw_cfg_dummy = NULL; 85 shared_feat_cfg_t* shared_feat_cfg_dummy = NULL; 86 port_feat_cfg_t* port_feat_cfg_dummy = NULL; 87 mf_cfg_t* mf_cfg_dummy = NULL; 88 89 UNREFERENCED_PARAMETER_(shmem_region_dummy); 90 UNREFERENCED_PARAMETER_(shmem2_region_dummy); 91 UNREFERENCED_PARAMETER_(shared_hw_cfg_dummy); 92 UNREFERENCED_PARAMETER_(port_hw_cfg_dummy); 93 UNREFERENCED_PARAMETER_(shared_feat_cfg_dummy); 94 UNREFERENCED_PARAMETER_(port_feat_cfg_dummy); 95 UNREFERENCED_PARAMETER_(mf_cfg_dummy); 96 97 if ( CHK_NULL( iscsi_info_block_hdr_ptr ) ) 98 { 99 return LM_STATUS_INVALID_PARAMETER ; 100 } 101 102 if (pdev->hw_info.mcp_detected == 1) 103 { 104 offset = OFFSETOF(shmem_region_t,func_mb[func_mb_id].iscsi_boot_signature); 105 LM_SHMEM_READ(pdev, offset, &val ); 106 iscsi_info_block_hdr_ptr->signature = val ; 107 // only for debugging 108 offset = OFFSETOF(shmem_region_t,func_mb[func_mb_id].iscsi_boot_block_offset); 109 LM_SHMEM_READ(pdev, offset, &val ); 110 if (val == UEFI_BOOT_SIGNATURE) 111 { 112 SET_FLAGS(iscsi_info_block_hdr_ptr->boot_flags, BOOT_INFO_FLAGS_UEFI_BOOT ); 113 } 114 else 115 { 116 RESET_FLAGS(iscsi_info_block_hdr_ptr->boot_flags, BOOT_INFO_FLAGS_UEFI_BOOT ); 117 } 118 } 119 else 120 { 121 // If mcp is detected the shmenm is not initialized and 122 iscsi_info_block_hdr_ptr->signature = 0; 123 } 124 return LM_STATUS_SUCCESS ; 125 } 126 127 lm_status_t 128 lm_get_ibft_physical_addr_for_efi( 129 struct _lm_device_t *pdev, u32_t *phy_hi, u32_t *phy_lo 130 ) 131 { 132 u32_t offset = 0; 133 u32_t val = 0; 134 const u8_t func_mb_id = FUNC_MAILBOX_ID(pdev); 135 136 if (pdev->hw_info.mcp_detected == 1) 137 { 138 offset = OFFSETOF(shmem_region_t,func_mb[func_mb_id].iscsi_boot_signature); 139 LM_SHMEM_READ(pdev, offset, &val ); 140 //iscsi_info_block_hdr_ptr->signature = val ; 141 // only for debugging 142 offset = OFFSETOF(shmem_region_t,func_mb[func_mb_id].iscsi_boot_block_offset); 143 LM_SHMEM_READ(pdev, offset, &val ); 144 if (val == UEFI_BOOT_SIGNATURE) 145 { 146 offset = OFFSETOF(shmem2_region_t,ibft_host_addr); 147 LM_SHMEM2_READ(pdev, offset , &val); 148 *phy_lo = val; 149 *phy_hi = 0; 150 151 return LM_STATUS_SUCCESS; 152 } 153 } 154 return LM_STATUS_FAILURE; 155 } 156 lm_status_t 157 lm_get_sriov_info(lm_device_t *pdev) 158 { 159 lm_status_t rc = LM_STATUS_SUCCESS; 160 u32_t val; 161 if (!CHIP_IS_E1x(pdev)) { 162 /* get bars... */ 163 #ifdef VF_INVOLVED 164 rc = mm_get_sriov_info(pdev, &pdev->hw_info.sriov_info); 165 if (rc != LM_STATUS_SUCCESS) { 166 return rc; 167 } 168 #endif 169 170 #ifdef __LINUX 171 lm_set_virt_mode(pdev, DEVICE_TYPE_PF, (pdev->hw_info.sriov_info.total_vfs? VT_BASIC_VF : VT_NONE)); 172 #elif defined(_VBD_CMD_) 173 lm_set_virt_mode(pdev, DEVICE_TYPE_PF, (pdev->hw_info.sriov_info.total_vfs? VT_CHANNEL_VF : VT_NONE)); 174 #endif 175 /* Since registers from 0x000-0x7ff are spilt across functions, each PF will have the same location for the same 4 bits*/ 176 val = REG_RD(pdev, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 177 pdev->hw_info.sriov_info.first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) * 8) - E2_MAX_NUM_OF_VFS*PATH_ID(pdev); 178 DbgMessage(pdev, WARN, "First VF in PF = %d\n", pdev->hw_info.sriov_info.first_vf_in_pf); 179 } 180 return rc; 181 } 182 183 184 static void lm_print_func_info(lm_device_t *pdev) 185 { 186 DbgMessage(pdev, WARN, "lm_get_shmem_info: FUNC_ID: %d\n", FUNC_ID(pdev)); 187 DbgMessage(pdev, WARN, "lm_get_shmem_info: PCI_FUNC_ID: %d\n", ABS_FUNC_ID(pdev)); 188 DbgMessage(pdev, WARN, "lm_get_shmem_info: PORT_ID: %d\n", PORT_ID(pdev)); 189 190 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 191 { 192 DbgMessage(pdev, WARN, "lm_get_shmem_info: ETH_PORT_ID: %d\n", PATH_ID(pdev) + 2*PORT_ID(pdev)); 193 } 194 else 195 { 196 DbgMessage(pdev, WARN, "lm_get_shmem_info: ETH_PORT_ID: %d\n", PATH_ID(pdev) + PORT_ID(pdev)); 197 } 198 199 DbgMessage(pdev, WARN, "lm_get_shmem_info: PATH_ID: %d\n", PATH_ID(pdev)); 200 DbgMessage(pdev, WARN, "lm_get_shmem_info: VNIC_ID: %d\n", VNIC_ID(pdev)); 201 DbgMessage(pdev, WARN, "lm_get_shmem_info: FUNC_MAILBOX_ID: %d\n", FUNC_MAILBOX_ID(pdev)); 202 203 } 204 205 206 /******************************************************************************* 207 * Description: 208 * 209 * Return: 210 ******************************************************************************/ 211 lm_status_t 212 lm_get_function_num(lm_device_t *pdev) 213 { 214 u32_t val = 0; 215 /* read the me register to get function number. */ 216 /* Me register: holds the relative-function num + absolute-function num, 217 * absolute-function-num appears only from E2 and above. Before that these bits 218 * always contained zero, therefore we can't take as is. */ 219 val = REG_RD(pdev, BAR_ME_REGISTER); 220 pdev->params.pfunc_rel = (u8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 221 pdev->params.path_id = (u8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 222 223 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 224 { 225 pdev->params.pfunc_abs = (pdev->params.pfunc_rel << 1) | pdev->params.path_id; 226 } 227 else 228 { 229 pdev->params.pfunc_abs = pdev->params.pfunc_rel | pdev->params.path_id; 230 } 231 pdev->params.pfunc_mb_id = FUNC_MAILBOX_ID(pdev); 232 233 DbgMessage(pdev, INFORM , "relative function %d absolute function %d\n", pdev->params.pfunc_rel, pdev->params.pfunc_abs); 234 235 lm_print_func_info(pdev); 236 return LM_STATUS_SUCCESS; 237 } 238 239 240 // reads max_payload_size & max_read_req_size from pci config space 241 lm_status_t lm_get_pcicfg_mps_mrrs(lm_device_t * pdev) 242 { 243 lm_status_t lm_status = LM_STATUS_SUCCESS; 244 u32_t val = 0; 245 246 /* get max payload size and max read size we need it for pxp configuration 247 in the real chip it should be done by the MCP.*/ 248 lm_status = mm_read_pci(pdev, PCICFG_DEVICE_CONTROL, &val); 249 if (lm_status != LM_STATUS_SUCCESS) 250 { 251 return lm_status; 252 } 253 // bit 5-7 254 pdev->hw_info.max_payload_size = (val & 0xe0)>>5; 255 // bit 12-14 256 pdev->hw_info.max_read_req_size = (val & 0x7000)>>12; 257 DbgMessage(pdev, INFORMi, "reg 0xd8 0x%x \n max_payload %d max_read_req %d \n", 258 val,pdev->hw_info.max_payload_size,pdev->hw_info.max_read_req_size); 259 260 return lm_status ; 261 } 262 263 lm_status_t lm_get_pcicfg_info(lm_device_t *pdev) 264 { 265 lm_status_t lm_status; 266 u32_t val; 267 /* Get PCI device and vendor id. (need to be read from parent */ 268 if (IS_PFDEV(pdev) || IS_CHANNEL_VFDEV(pdev)) 269 { 270 lm_status = mm_read_pci(pdev, PCICFG_VENDOR_ID_OFFSET, &val); 271 if (lm_status != LM_STATUS_SUCCESS) 272 { 273 return lm_status; 274 } 275 if (val != 0xFFFFFFFF) 276 { 277 pdev->hw_info.vid = (u16_t) val; 278 pdev->hw_info.did = (u16_t) (val >> 16); 279 } 280 else if (IS_SW_CHANNEL_VIRT_MODE(pdev)) 281 { 282 pdev->hw_info.vid = 0x14E4; 283 pdev->hw_info.did = 0x166F; 284 } 285 DbgMessage(pdev, INFORMi, "vid 0x%x\n", pdev->hw_info.vid); 286 DbgMessage(pdev, INFORMi, "did 0x%x\n", pdev->hw_info.did); 287 } 288 else 289 { 290 DbgMessage(pdev, WARN, "vid&did for VBD VF will be known later\n"); /*Must be known earlier*/ 291 } 292 /* Get subsystem and subvendor id. */ 293 lm_status = mm_read_pci(pdev, PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET, &val); 294 if (lm_status != LM_STATUS_SUCCESS) 295 { 296 return lm_status; 297 } 298 299 pdev->hw_info.svid = (u16_t) val; 300 DbgMessage(pdev, INFORMi, "svid 0x%x\n", pdev->hw_info.svid); 301 pdev->hw_info.ssid = (u16_t) (val >> 16); 302 DbgMessage(pdev, INFORMi, "ssid 0x%x\n", pdev->hw_info.ssid); 303 304 /* Get IRQ, and interrupt pin. */ 305 lm_status = mm_read_pci(pdev, PCICFG_INT_LINE, &val); 306 if (lm_status != LM_STATUS_SUCCESS) 307 { 308 return lm_status; 309 } 310 pdev->hw_info.irq = (u8_t) val; 311 DbgMessage(pdev, INFORMi, "IRQ 0x%x\n", pdev->hw_info.irq); 312 pdev->hw_info.int_pin = (u8_t) (val >> 8); 313 DbgMessage(pdev, INFORMi, "Int pin 0x%x\n", pdev->hw_info.int_pin); 314 315 /* Get cache line size. */ 316 lm_status = mm_read_pci(pdev, PCICFG_CACHE_LINE_SIZE, &val); 317 if (lm_status != LM_STATUS_SUCCESS) 318 { 319 return lm_status; 320 } 321 322 pdev->hw_info.cache_line_size = (u8_t) val; 323 DbgMessage(pdev, INFORMi, "Cache line size 0x%x\n", (u8_t) val); 324 pdev->hw_info.latency_timer = (u8_t) (val >> 8); 325 DbgMessage(pdev, INFORMi, "Latency timer 0x%x\n", (u8_t) (val >> 8)); 326 327 /* Get PCI revision id. */ 328 lm_status = mm_read_pci(pdev, PCICFG_REVISION_ID_OFFSET, &val); 329 if (lm_status != LM_STATUS_SUCCESS) 330 { 331 return lm_status; 332 } 333 pdev->hw_info.rev_id = (u8_t) val; 334 DbgMessage(pdev, INFORMi, "Revision id 0x%x\n", pdev->hw_info.rev_id); 335 336 /* Get PCI-E speed*/ 337 /* only for PF */ 338 if (IS_PFDEV(pdev)) 339 { 340 lm_status = mm_read_pci(pdev, PCICFG_LINK_CONTROL, &val); 341 if (lm_status != LM_STATUS_SUCCESS) 342 { 343 return lm_status; 344 } 345 346 /* bit 20-25 */ 347 pdev->hw_info.pcie_lane_width = (val & 0x3f00000) >> 20; 348 DbgMessage(pdev, INFORMi, "pcie_lane_width 0x%x\n", pdev->hw_info.pcie_lane_width); 349 /* bit 16 - 19 */ 350 pdev->hw_info.pcie_lane_speed = (val & 0xf0000) >> 16; 351 DbgMessage(pdev, INFORMi, "pcie_lane_speed 0x%x\n", pdev->hw_info.pcie_lane_speed); 352 353 lm_status = lm_get_pcicfg_mps_mrrs(pdev); 354 } 355 356 // CQ61532 - Fan Failure test fails when stop the fan for more than 10 seconds and reboot. 357 // Actually most chances we won't get until here if the value is error = we might read other registers before that will hang the machine in Windows 358 // Hopefully this read will help with other LM drivers 359 // anyway, we'll fail the bind for that... 360 if (GET_FLAGS(pdev->hw_info.rev_id,PCICFG_REVESION_ID_MASK) == PCICFG_REVESION_ID_ERROR_VAL) 361 { 362 return LM_STATUS_FAILURE; 363 } 364 365 return lm_status; 366 } 367 /** 368 * This function reads bar offset from PCI configuration 369 * header. 370 * 371 * @param _pdev 372 * @param bar_num Bar index: BAR_0 or BAR_1 or BAR_2 373 * @param bar_addr Output value (bar offset). 374 * 375 * @return LM_STATUS_SUCCESS if bar offset has been read 376 * successfully. 377 */ 378 static __inline lm_status_t lm_get_bar_offset_direct( 379 IN struct _lm_device_t * pdev, 380 IN u8_t bar_num, /* Bar index: BAR_0 or BAR_1 or BAR_2 */ 381 OUT lm_address_t * bar_addr ) 382 { 383 u32_t pci_reg, val; 384 lm_status_t lm_status; 385 /* Get BARs addresses. */ 386 switch (bar_num) { 387 case BAR_0: 388 pci_reg = PCICFG_BAR_1_LOW; 389 break; 390 case BAR_1: 391 pci_reg = PCICFG_BAR_1_LOW + 8; 392 break; 393 case BAR_2: 394 pci_reg = PCICFG_BAR_1_LOW + 16; 395 break; 396 default: 397 DbgMessage(pdev, FATAL, "Unsupported bar index: %d\n", bar_num); 398 DbgBreakIfAll(1); 399 return LM_STATUS_INVALID_PARAMETER; 400 } 401 lm_status = mm_read_pci(pdev, pci_reg, &val); 402 if(lm_status != LM_STATUS_SUCCESS) { 403 return lm_status; 404 } 405 bar_addr->as_u32.low = val & 0xfffffff0;; 406 DbgMessage(pdev, INFORMi, "BAR %d low 0x%x\n", bar_num, 407 bar_addr->as_u32.low); 408 pci_reg += 4; /* sizeof configuration space bar address register */ 409 lm_status = mm_read_pci(pdev, pci_reg, &val); 410 if(lm_status != LM_STATUS_SUCCESS) { 411 return lm_status; 412 } 413 bar_addr->as_u32.high = val; 414 DbgMessage(pdev, INFORMi, "BAR %d high 0x%x\n", bar_num, 415 bar_addr->as_u32.high); 416 return LM_STATUS_SUCCESS; 417 } 418 419 static __inline lm_status_t lm_get_bar_size_direct ( 420 IN lm_device_t *pdev, 421 IN u8_t bar_num, 422 OUT u32_t * val_p) 423 { 424 u32_t bar_address = 0; 425 u32_t bar_size; 426 switch (bar_num) { 427 case BAR_0: 428 bar_address = GRC_CONFIG_2_SIZE_REG; 429 break; 430 case BAR_1: 431 bar_address = GRC_BAR2_CONFIG; 432 break; 433 case BAR_2: 434 bar_address = GRC_BAR3_CONFIG; 435 break; 436 default: 437 DbgMessage(pdev, FATAL, "Invalid Bar Num\n"); 438 return LM_STATUS_INVALID_PARAMETER; 439 } 440 lm_reg_rd_ind(pdev,PCICFG_OFFSET + bar_address,&bar_size); 441 /*extract only bar size*/ 442 ASSERT_STATIC(PCI_CONFIG_2_BAR1_SIZE == PCI_CONFIG_2_BAR2_SIZE); 443 ASSERT_STATIC(PCI_CONFIG_2_BAR2_SIZE == PCI_CONFIG_2_BAR3_SIZE); 444 445 bar_size = (bar_size & PCI_CONFIG_2_BAR1_SIZE); 446 if (bar_size == 0) 447 { 448 /*bar size disabled*/ 449 return LM_STATUS_FAILURE; 450 } 451 else 452 { 453 /*bit 1 stand for 64K each bit multiply it by two */ 454 *val_p = (0x40 << ((bar_size - 1)))*0x400; 455 } 456 457 return LM_STATUS_SUCCESS; 458 } 459 /* init pdev->hw_info with data from pcicfg */ 460 lm_status_t lm_get_bars_info(lm_device_t *pdev) 461 { 462 lm_status_t lm_status; 463 u32_t bar_map_size = 0; 464 u8_t i; 465 466 /* Get BARs addresses. */ 467 for (i = 0; i < ARRSIZE(pdev->hw_info.mem_base); i++) 468 { 469 lm_status = mm_get_bar_offset(pdev, i, &pdev->hw_info.mem_base[i]); 470 DbgMessage(pdev, INFORMi, "Bar_Offset=0x%x\n", pdev->hw_info.mem_base[i]); 471 472 if(lm_status != LM_STATUS_SUCCESS) 473 { 474 return lm_status; 475 } 476 if(pdev->hw_info.mem_base[i].as_u64 == 0) 477 { 478 DbgMessage(pdev, WARNi, "BAR %d IS NOT PRESENT\n", i); 479 if(i==0) 480 { 481 DbgBreakMsg("BAR 0 must be present\n"); 482 } 483 } 484 } 485 /* TBA: review two intializations done in Teton here (are they needed? are they part of "get_bars_info"): 486 - Enable PCI bus master.... 487 - Configure byte swap and enable write to the reg_window registers 488 */ 489 for (i = 0; i < MAX_NUM_BAR; i++) 490 { 491 if(pdev->hw_info.mem_base[i].as_u64 == 0) 492 { 493 continue; 494 } 495 496 /* get bar i size*/ 497 lm_status = mm_get_bar_size(pdev, i, &(pdev->hw_info.bar_size[i])); 498 499 if ( lm_status != LM_STATUS_SUCCESS ) 500 { 501 return lm_status; 502 } 503 DbgMessage(pdev, INFORMi, "bar %d size 0x%x\n", i, pdev->hw_info.bar_size[i]); 504 /* Change in BAR1 505 * The function will map in case of BAR1 only the ETH cid doorbell space to a virtual address. 506 * (Map from BAR1 base address, to BAR1 base address plus MAX_ETH_CONS* LM_PAGE_SIZE). 507 */ 508 if (BAR_1 == i ) 509 { 510 if (IS_PFDEV(pdev)) 511 { //TODO Revise it 512 #ifdef VF_INVOLVED 513 bar_map_size = pdev->hw_info.bar_size[i]; 514 #else 515 bar_map_size = LM_DQ_CID_SIZE * MAX_ETH_CONS; 516 #endif 517 } 518 else 519 { 520 bar_map_size = LM_DQ_CID_SIZE; 521 } 522 #ifndef VF_INVOLVED 523 DbgBreakIf(bar_map_size >= pdev->hw_info.bar_size[i]); 524 #endif 525 } 526 else 527 { 528 bar_map_size = pdev->hw_info.bar_size[i]; 529 } 530 /* Map bar i to system address space. If not mapped already. */ 531 if(lm_is_function_after_flr(pdev) || 532 #ifdef VF_INVOLVED 533 lm_is_function_after_flr(PFDEV(pdev)) || 534 #endif 535 (pdev->vars.mapped_bar_addr[i] == NULL)) 536 { 537 pdev->vars.mapped_bar_addr[i] = NULL; 538 pdev->vars.mapped_bar_addr[i] = mm_map_io_base( 539 pdev, 540 pdev->hw_info.mem_base[i], 541 bar_map_size, 542 i); 543 if(pdev->vars.mapped_bar_addr[i] == NULL) 544 { 545 DbgMessage(pdev, FATAL, "bar %d map io failed\n", i); 546 return LM_STATUS_FAILURE; 547 } 548 else 549 { 550 DbgMessage(pdev, INFORMi, "mem_base[%d]=%p size=0x%x\n", i, pdev->vars.mapped_bar_addr[i], pdev->hw_info.bar_size[i]); 551 } 552 } 553 } 554 /* Now that the bars are mapped, we need to enable target read + write and master-enable, 555 * we can't do this before bars are mapped, but we need to do this before we start any chip 556 * initializations... */ 557 #if defined(__LINUX) || defined(_VBD_) 558 if (IS_PFDEV(pdev)) 559 { 560 pdev->hw_info.pcie_caps_offset = mm_get_cap_offset(pdev, PCI_CAP_PCIE); 561 if (pdev->hw_info.pcie_caps_offset != 0 && pdev->hw_info.pcie_caps_offset != 0xFFFFFFFF) 562 { 563 mm_read_pci(pdev, pdev->hw_info.pcie_caps_offset + PCIE_DEV_CAPS, &pdev->hw_info.pcie_dev_capabilities); 564 565 DbgMessage(pdev, WARN,"Device Capability of PCIe caps is %x\n",pdev->hw_info.pcie_dev_capabilities); 566 567 if (pdev->hw_info.pcie_dev_capabilities) 568 { 569 if (pdev->hw_info.pcie_dev_capabilities & PCIE_DEV_CAPS_FLR_CAPABILITY) 570 { 571 pdev->hw_info.flr_capable = TRUE; 572 } 573 else 574 { 575 pdev->hw_info.flr_capable = FALSE; /*Not trusted for PCI_CFG accesible via hypervisor*/ 576 } 577 } 578 else 579 { 580 pdev->hw_info.pci_cfg_trust = PCI_CFG_NOT_TRUSTED; 581 } 582 } 583 else 584 { 585 pdev->hw_info.pci_cfg_trust = PCI_CFG_NOT_TRUSTED; 586 } 587 588 if (!lm_is_function_after_flr(pdev)) 589 { 590 pdev->hw_info.grc_didvid = REG_RD(pdev, (PCICFG_OFFSET + PCICFG_VENDOR_ID_OFFSET)); 591 lm_status = mm_read_pci(pdev, PCICFG_VENDOR_ID_OFFSET, &pdev->hw_info.pci_cfg_didvid); 592 if (lm_status == LM_STATUS_SUCCESS) 593 { 594 if (pdev->hw_info.grc_didvid != pdev->hw_info.pci_cfg_didvid) 595 { 596 pdev->hw_info.flr_capable = TRUE; 597 pdev->params.is_flr = TRUE; 598 } 599 } 600 } 601 } 602 #endif 603 if (lm_is_function_after_flr(pdev)) 604 { 605 u32_t m_e,tr_e,tw_e; 606 u32_t i_cycles; 607 REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 608 for (i_cycles = 0; i_cycles < 1000; i_cycles++) 609 { 610 mm_wait(pdev,999); 611 } 612 tr_e = REG_RD(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ); 613 tw_e = REG_RD(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE); 614 m_e = REG_RD(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 615 DbgMessage(pdev, INFORM, "M:0x%x, TR:0x%x, TW:0x%x\n",m_e,tr_e,tw_e); 616 if (tw_e != 0x1) 617 { 618 DbgBreakMsg("BAR 0 must be present\n"); 619 return LM_STATUS_FAILURE; 620 } 621 } 622 return LM_STATUS_SUCCESS; 623 } 624 625 lm_status_t lm_get_chip_id_and_mode(lm_device_t *pdev) 626 { 627 u32_t val; 628 u32_t chip_rev; 629 630 /* Get the chip revision id and number. */ 631 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 632 val=REG_RD(PFDEV(pdev),MISC_REG_CHIP_NUM); 633 CHIP_NUM_SET(pdev->hw_info.chip_id,val); 634 635 /* If OTP process was done on the device, change chip number to 57811 */ 636 val=REG_RD(PFDEV(pdev),MISC_REG_CHIP_TYPE); 637 if (val & CHIP_OPT_MISC_DO_BIT) 638 { 639 switch (pdev->hw_info.chip_id) 640 { 641 case CHIP_NUM_57810: 642 pdev->hw_info.chip_id = CHIP_NUM_57811; 643 break; 644 case CHIP_NUM_57810_MF: 645 pdev->hw_info.chip_id = CHIP_NUM_57811_MF; 646 break; 647 default: 648 DbgMessage(pdev, FATAL, "Un-supported chip id for OTP: %d\n", pdev->hw_info.chip_id); 649 DbgBreakIfAll(1); 650 return LM_STATUS_FAILURE; 651 } 652 } 653 654 val=REG_RD(PFDEV(pdev),MISC_REG_CHIP_REV); 655 // the chip rev is realy ASIC when it < 5 656 // when it > 5 odd mean FPGA even EMUL. 657 chip_rev = (val & 0xF)<<CHIP_REV_SHIFT; 658 pdev->hw_info.chip_id |= chip_rev; 659 660 if(chip_rev <= CHIP_REV_ASIC_MAX) 661 { 662 pdev->vars.clk_factor = 1; 663 } 664 else if(chip_rev & CHIP_REV_SIM_IS_FPGA) 665 { 666 pdev->vars.clk_factor = LM_FPGA_FACTOR; 667 DbgMessage(pdev, INFORMi, "FPGA: forcing MPS from %d to 0.\n", pdev->hw_info.max_payload_size); 668 pdev->hw_info.max_payload_size = 0; 669 } 670 else 671 { 672 pdev->vars.clk_factor = LM_EMUL_FACTOR; 673 } 674 675 val=REG_RD(PFDEV(pdev),MISC_REG_CHIP_METAL); 676 pdev->hw_info.chip_id |= (val & 0xff) << 4; 677 val=REG_RD(PFDEV(pdev),MISC_REG_BOND_ID); 678 pdev->hw_info.chip_id |= (val & 0xf); 679 DbgMessage(pdev, INFORMi , "chip id 0x%x\n", pdev->hw_info.chip_id); 680 /* Read silent revision */ 681 val=REG_RD(PFDEV(pdev),MISC_REG_CHIP_TEST_REG); 682 pdev->hw_info.silent_chip_rev = (val & 0xff); 683 DbgMessage(pdev, INFORMi , "silent chip rev 0x%x\n", pdev->hw_info.silent_chip_rev); 684 if (!CHIP_IS_E1x(pdev)) 685 { 686 /* Determine whether we are 2 port or 4 port mode */ 687 /* read port4mode_en_ovwr[0]; 688 * b) if 0 read port4mode_en (0 2-port; 1 4-port); 689 * c) if 1 read port4mode_en_ovwr[1] (0 2-port; 1 4-port); 690 */ 691 val = REG_RD(PFDEV(pdev), MISC_REG_PORT4MODE_EN_OVWR); 692 DbgMessage(pdev, WARN, "MISC_REG_PORT4MODE_EN_OVWR = %d\n", val); 693 if ((val & 1) == 0) 694 { 695 val = REG_RD(PFDEV(pdev), MISC_REG_PORT4MODE_EN); 696 } 697 else 698 { 699 val = (val >> 1) & 1; 700 } 701 pdev->hw_info.chip_port_mode = val? LM_CHIP_PORT_MODE_4 : LM_CHIP_PORT_MODE_2; 702 DbgMessage(pdev, WARN, "chip_port_mode %s\n", (pdev->hw_info.chip_port_mode == LM_CHIP_PORT_MODE_4 )? "4_PORT" : "2_PORT"); 703 } 704 else 705 { 706 pdev->hw_info.chip_port_mode = LM_CHIP_PORT_MODE_NONE; /* N/A */ 707 DbgMessage(pdev, WARN, "chip_port_mode NONE\n"); 708 } 709 return LM_STATUS_SUCCESS; 710 } 711 static void lm_get_igu_cam_info(lm_device_t *pdev) 712 { 713 lm_intr_blk_info_t *blk_info = &pdev->hw_info.intr_blk_info; 714 u8_t igu_test_vectors = FALSE; 715 #define IGU_CAM_VFID_MATCH(pdev, igu_fid) (!(igu_fid & IGU_FID_ENCODE_IS_PF) && ((igu_fid & IGU_FID_VF_NUM_MASK) == ABS_VFID(pdev))) 716 #define IGU_CAM_PFID_MATCH(pdev, igu_fid) ((igu_fid & IGU_FID_ENCODE_IS_PF) && ((igu_fid & IGU_FID_PF_NUM_MASK) == FUNC_ID(pdev))) 717 if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) 718 { 719 blk_info->igu_info.igu_sb_cnt = MAX_RSS_CHAINS; 720 blk_info->igu_info.igu_u_sb_offset = 0; 721 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2) 722 { 723 blk_info->igu_info.igu_base_sb = VNIC_ID(pdev) * MAX_RSS_CHAINS; 724 blk_info->igu_info.igu_dsb_id = MAX_VNIC_NUM * MAX_RSS_CHAINS + VNIC_ID(pdev); 725 } 726 else 727 { 728 blk_info->igu_info.igu_base_sb = FUNC_ID(pdev) * MAX_RSS_CHAINS; 729 blk_info->igu_info.igu_dsb_id = MAX_VNIC_NUM * MAX_RSS_CHAINS + FUNC_ID(pdev); 730 } 731 } 732 else 733 { 734 u8_t igu_sb_id; 735 u8_t fid; 736 u8_t vec; 737 u8_t vf_id; 738 u32_t val; 739 u8_t current_pf_id = 0; 740 u8_t recent_vf_id = 0xFF; 741 blk_info->igu_info.igu_sb_cnt = 0; 742 blk_info->igu_info.igu_test_sb_cnt = 0; 743 blk_info->igu_info.igu_base_sb = 0xff; 744 for (vf_id = 0; vf_id < E2_MAX_NUM_OF_VFS; vf_id++) 745 { 746 blk_info->igu_info.vf_igu_info[vf_id].igu_base_sb = 0xFF; 747 blk_info->igu_info.vf_igu_info[vf_id].igu_sb_cnt = 0; 748 blk_info->igu_info.vf_igu_info[vf_id].igu_test_sb_cnt = 0; 749 blk_info->igu_info.vf_igu_info[vf_id].igu_test_mode = FALSE; 750 } 751 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++ ) 752 { 753 // mapping CAM; relevant for E2 operating mode only. 754 // [0] - valid. 755 // [6:1] - vector number; 756 // [13:7] - FID (if VF - [13] = 0; [12:7] = VF number; if PF - [13] = 1; [12:9] = 0; [8:7] = PF number); 757 lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_id); 758 lm_igu_sb->block_dump = val = REG_RD(PFDEV(pdev), IGU_REG_MAPPING_MEMORY + 4*igu_sb_id); 759 DbgMessage(pdev, WARN, "addr:0x%x IGU_CAM[%d]=%x\n",IGU_REG_MAPPING_MEMORY + 4*igu_sb_id, igu_sb_id, val); 760 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 761 { 762 if (!IS_MULTI_VNIC(pdev) && (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2)) 763 { 764 lm_igu_sb->status = LM_IGU_STATUS_AVAILABLE; 765 } 766 else if (current_pf_id == FUNC_ID(pdev)) 767 { 768 lm_igu_sb->status = LM_IGU_STATUS_AVAILABLE; 769 } 770 else 771 { 772 lm_igu_sb->status = 0; 773 } 774 continue; 775 } 776 else 777 { 778 lm_igu_sb->status = LM_IGU_STATUS_VALID; 779 } 780 fid = (val & IGU_REG_MAPPING_MEMORY_FID_MASK) >> IGU_REG_MAPPING_MEMORY_FID_SHIFT; 781 if (fid & IGU_FID_ENCODE_IS_PF) 782 { 783 current_pf_id = lm_igu_sb->pf_number = fid & IGU_FID_PF_NUM_MASK; 784 if (lm_igu_sb->pf_number == FUNC_ID(pdev)) 785 { 786 lm_igu_sb->status |= (LM_IGU_STATUS_AVAILABLE | LM_IGU_STATUS_PF); 787 } 788 else 789 { 790 lm_igu_sb->status |= LM_IGU_STATUS_PF; 791 } 792 } 793 else 794 { 795 lm_igu_sb->vf_number = fid & IGU_FID_VF_NUM_MASK; 796 if ((lm_igu_sb->vf_number >= pdev->hw_info.sriov_info.first_vf_in_pf) 797 && (lm_igu_sb->vf_number < (pdev->hw_info.sriov_info.first_vf_in_pf + pdev->hw_info.sriov_info.total_vfs))) 798 { 799 lm_igu_sb->status |= LM_IGU_STATUS_AVAILABLE; 800 } 801 } 802 lm_igu_sb->vector_number = (val & IGU_REG_MAPPING_MEMORY_VECTOR_MASK) >> IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT; 803 DbgMessage(pdev, VERBOSEi, "FID[%d]=%d\n", igu_sb_id, fid); 804 if ((IS_PFDEV(pdev) && IGU_CAM_PFID_MATCH(pdev, fid)) || 805 (IS_VFDEV(pdev) && IGU_CAM_VFID_MATCH(pdev, fid))) 806 { 807 vec = (val & IGU_REG_MAPPING_MEMORY_VECTOR_MASK) >> IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT; 808 DbgMessage(pdev, INFORMi, "VEC[%d]=%d\n", igu_sb_id, vec); 809 if (igu_test_vectors) 810 { 811 blk_info->igu_info.igu_test_sb_cnt++; 812 } 813 else 814 { 815 if (vec == 0 && IS_PFDEV(pdev)) 816 { 817 /* default status block for default segment + attn segment */ 818 blk_info->igu_info.igu_dsb_id = igu_sb_id; 819 } 820 else 821 { 822 if (blk_info->igu_info.igu_base_sb == 0xff) 823 { 824 blk_info->igu_info.igu_base_sb = igu_sb_id; 825 } 826 /* we don't count the default */ 827 blk_info->igu_info.igu_sb_cnt++; 828 } 829 } 830 if (recent_vf_id != 0xFF) 831 { 832 if (!blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode) 833 { 834 DbgMessage(pdev, WARN, "Consecutiveness of IGU for VF%d is broken. My be it's IGU test mode\n",recent_vf_id); 835 } 836 blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode = TRUE; 837 } 838 } 839 else if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) 840 { 841 if (!(fid & IGU_FID_ENCODE_IS_PF)) 842 { 843 vf_id = fid & IGU_FID_VF_NUM_MASK; 844 if (blk_info->igu_info.vf_igu_info[vf_id].igu_base_sb == 0xff) 845 { 846 blk_info->igu_info.vf_igu_info[vf_id].igu_base_sb = igu_sb_id; 847 } 848 /* we don't count the default */ 849 if (recent_vf_id != vf_id) 850 { 851 if (recent_vf_id != 0xFF) 852 { 853 if (!blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode) 854 { 855 DbgMessage(pdev, WARN, "Consecutiveness of IGU for VF%d is broken. My be it's IGU test mode\n",recent_vf_id); 856 } 857 blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode = TRUE; 858 } 859 } 860 recent_vf_id = vf_id; 861 if (blk_info->igu_info.vf_igu_info[vf_id].igu_test_mode) 862 { 863 blk_info->igu_info.vf_igu_info[vf_id].igu_test_sb_cnt++; 864 } 865 else 866 { 867 blk_info->igu_info.vf_igu_info[vf_id].igu_sb_cnt++; 868 } 869 } 870 else 871 { 872 if (recent_vf_id != 0xFF) 873 { 874 if (!blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode) 875 { 876 DbgMessage(pdev, WARN, "Consecutiveness of IGU for VF%d is broken. My be it's IGU test mode\n",recent_vf_id); 877 } 878 blk_info->igu_info.vf_igu_info[recent_vf_id].igu_test_mode = TRUE; 879 } 880 } 881 if (blk_info->igu_info.igu_base_sb != 0xff) 882 { 883 /* We've already found our base... but now we don't match... these are now igu-test-vectors */ 884 if (!igu_test_vectors) 885 { 886 DbgMessage(pdev, WARN, "Consecutiveness of IGU is broken. My be it's IGU test mode\n"); 887 } 888 igu_test_vectors = TRUE; //TODO Michals: take care of this!!!e2 igu_test will fail. 889 } 890 } 891 else 892 { 893 /* No Match - belongs to someone else, check if breaks consecutiveness, if so, break at this point 894 * driver doesn't support non-consecutive vectors (EXCEPT Def sb...) */ 895 if (blk_info->igu_info.igu_base_sb != 0xff) 896 { 897 /* We've already found our base... but now we don't match... these are now igu-test-vectors */ 898 if (!igu_test_vectors) { 899 DbgMessage(pdev, WARN, "Consecutiveness of IGU is broken. My be it's IGU test mode\n"); 900 } 901 igu_test_vectors = TRUE; //TODO Michals: take care of this!!!e2 igu_test will fail. 902 } 903 } 904 } 905 // TODO check cam is valid... 906 #ifndef _VBD_ 907 blk_info->igu_info.igu_sb_cnt = min(blk_info->igu_info.igu_sb_cnt, (u8_t)16); 908 #endif 909 /* E2 TODO: if we don't want to separate u/c/ producers in IGU, this line needs to 910 * be removed, and igu_u_offset needs to be set to 'zero' 911 blk_info->igu_info.igu_u_sb_offset = blk_info->igu_info.igu_sb_cnt / 2;*/ 912 DbgMessage(pdev, WARN, "igu_sb_cnt=%d igu_dsb_id=%d igu_base_sb = %d igu_us_sb_offset = %d igu_test_cnt=%d\n", 913 blk_info->igu_info.igu_sb_cnt, blk_info->igu_info.igu_dsb_id, blk_info->igu_info.igu_base_sb, blk_info->igu_info.igu_u_sb_offset, 914 blk_info->igu_info.igu_test_sb_cnt); 915 916 /* CQ61438 - do not show this error message in case of mf mode changed to SF and func >= 2*/ 917 if ((FUNC_ID(pdev) < 2) && (pdev->hw_info.mf_info.mf_mode != SINGLE_FUNCTION)) 918 { 919 if (blk_info->igu_info.igu_sb_cnt < 1) 920 { 921 DbgMessage(pdev, FATAL, "Igu sb cnt is not valid value=%d\n", blk_info->igu_info.igu_sb_cnt); 922 } 923 if (blk_info->igu_info.igu_base_sb == 0xff) 924 { 925 DbgMessage(pdev, FATAL, "Igu base sb is not valid value=%d\n", blk_info->igu_info.igu_base_sb); 926 } 927 } 928 929 #define IGU_MAX_INTA_SB_CNT 31 930 931 /* CQ72933/CQ72546 932 In case we are in INTA mode, we limit the igu count to 31 as we can't handle more than that */ 933 if (pdev->params.b_inta_mode_prvided_by_os && (blk_info->igu_info.igu_sb_cnt > IGU_MAX_INTA_SB_CNT )) 934 { 935 blk_info->igu_info.igu_sb_cnt = IGU_MAX_INTA_SB_CNT ; 936 } 937 } 938 939 DbgMessage(pdev, WARN, "IGU CAM INFO: BASE_SB: %d DSB: %d IGU_SB_CNT: %d\n", blk_info->igu_info.igu_base_sb, blk_info->igu_info.igu_dsb_id, blk_info->igu_info.igu_sb_cnt); 940 } 941 /* 942 * Assumptions: 943 * - the following are initialized before call to this function: 944 * chip-id, func-rel, 945 */ 946 lm_status_t lm_get_intr_blk_info(lm_device_t *pdev) 947 { 948 lm_intr_blk_info_t *blk_info = &pdev->hw_info.intr_blk_info; 949 u32_t bar_base; 950 u8_t igu_func_id = 0; 951 952 if (CHIP_IS_E1x(pdev)) 953 { 954 blk_info->blk_type = INTR_BLK_HC; 955 blk_info->access_type = INTR_BLK_ACCESS_GRC; 956 blk_info->blk_mode = INTR_BLK_MODE_NORM; 957 blk_info->simd_addr_womask = HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_SIMD_NOMASK; 958 /* The next part is tricky... and has to do with an emulation work-around for handling interrupts, in which 959 * we want to read without mask - always... so we take care of it here, instead of changing different ums to 960 * call approriate function */ 961 if (CHIP_REV_IS_EMUL(pdev)) 962 { 963 blk_info->simd_addr_wmask = HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_SIMD_NOMASK; 964 } 965 else 966 { 967 blk_info->simd_addr_wmask = HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_SIMD_MASK; 968 } 969 } 970 else 971 { 972 /* If we have more than 32 status blocks we'll need to read from IGU_REG_SISR_MDPC_WMASK_UPPER */ 973 ASSERT_STATIC(MAX_RSS_CHAINS <= 32); 974 pdev->hw_info.intr_blk_info.blk_type = INTR_BLK_IGU; 975 if (REG_RD(PFDEV(pdev), IGU_REG_BLOCK_CONFIGURATION) & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) 976 { 977 DbgMessage(pdev, FATAL, "IGU Backward Compatible Mode\n"); 978 blk_info->blk_mode = INTR_BLK_MODE_BC; 979 } 980 else 981 { 982 DbgMessage(pdev, WARN, "IGU Normal Mode\n"); 983 blk_info->blk_mode = INTR_BLK_MODE_NORM; 984 } 985 /* read CAM to get igu info (must be called after we know if we're in backward compatible mode or not )*/ 986 lm_get_igu_cam_info(pdev); 987 988 igu_func_id = (1 << IGU_FID_ENCODE_IS_PF_SHIFT) | FUNC_ID(pdev); 989 blk_info->igu_info.igu_func_id = igu_func_id; 990 if (pdev->params.igu_access_mode == INTR_BLK_ACCESS_GRC) 991 { 992 DbgMessage(pdev, FATAL, "IGU - GRC\n"); 993 if (IS_VFDEV(pdev)) 994 { 995 DbgBreakMsg("VF Can't work in GRC Access mode!\n"); 996 return LM_STATUS_FAILURE; 997 } 998 blk_info->access_type = INTR_BLK_ACCESS_GRC; 999 /* [18:12] - FID (if VF - [18] = 0; [17:12] = VF number; if PF - [18] = 1; [17:14] = 0; [13:12] = PF number) */ 1000 blk_info->cmd_ctrl_rd_womask = 1001 ((IGU_REG_SISR_MDPC_WOMASK_UPPER << IGU_CTRL_REG_ADDRESS_SHIFT) | 1002 (igu_func_id << IGU_CTRL_REG_FID_SHIFT) | 1003 (IGU_CTRL_CMD_TYPE_RD << IGU_CTRL_REG_TYPE_SHIFT)); 1004 blk_info->simd_addr_womask = IGU_REG_COMMAND_REG_32LSB_DATA; /* this is where data will be after writing ctrol reg... */ 1005 /* The next part is tricky... and has to do with an emulation work-around for handling interrupts, in which 1006 * we want to read without mask - always... so we take care of it here, instead of changing different ums to 1007 * call approriate function */ 1008 if (CHIP_REV_IS_EMUL(pdev)) 1009 { 1010 blk_info->cmd_ctrl_rd_wmask = 1011 ((IGU_REG_SISR_MDPC_WOMASK_UPPER << IGU_CTRL_REG_ADDRESS_SHIFT) | 1012 (igu_func_id << IGU_CTRL_REG_FID_SHIFT) | 1013 (IGU_CTRL_CMD_TYPE_RD << IGU_CTRL_REG_TYPE_SHIFT)); 1014 } 1015 else 1016 { 1017 blk_info->cmd_ctrl_rd_wmask = 1018 ((IGU_REG_SISR_MDPC_WMASK_LSB_UPPER << IGU_CTRL_REG_ADDRESS_SHIFT) | 1019 (igu_func_id << IGU_CTRL_REG_FID_SHIFT) | 1020 (IGU_CTRL_CMD_TYPE_RD << IGU_CTRL_REG_TYPE_SHIFT)); 1021 } 1022 blk_info->simd_addr_wmask = IGU_REG_COMMAND_REG_32LSB_DATA; /* this is where data will be after writing ctrol reg... */ 1023 } 1024 else 1025 { 1026 DbgMessage(pdev, WARN, "IGU - IGUMEM\n"); 1027 blk_info->access_type = INTR_BLK_ACCESS_IGUMEM; 1028 bar_base = IS_PFDEV(pdev)? BAR_IGU_INTMEM : VF_BAR0_IGU_OFFSET; 1029 blk_info->simd_addr_womask = bar_base + IGU_REG_SISR_MDPC_WOMASK_UPPER*8; 1030 /* The next part is tricky... and has to do with an emulation work-around for handling interrupts, in which 1031 * we want to read without mask - always... so we take care of it here, instead of changing different ums to 1032 * call approriate function */ 1033 if (CHIP_REV_IS_EMUL(pdev)) 1034 { 1035 blk_info->simd_addr_wmask = bar_base + IGU_REG_SISR_MDPC_WOMASK_UPPER*8; 1036 } 1037 else 1038 { 1039 blk_info->simd_addr_wmask = bar_base + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8; 1040 } 1041 } 1042 } 1043 return LM_STATUS_SUCCESS; 1044 } 1045 1046 lm_status_t lm_get_nvm_info(lm_device_t *pdev) 1047 { 1048 u32_t val = REG_RD(pdev,MCP_REG_MCPR_NVM_CFG4); 1049 1050 pdev->hw_info.flash_spec.total_size = NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE); 1051 pdev->hw_info.flash_spec.page_size = NVRAM_PAGE_SIZE; 1052 1053 return LM_STATUS_SUCCESS; 1054 } 1055 #if defined(DOS) || defined(__LINUX) 1056 /* for ediag + lediat we don't really care about licensing!... */ 1057 #define DEFAULT_CONNECTIONS_TOE 1880 1058 #define MAX_CONNECTIONS 2048 /* Max 32K Connections per port / vnic-per-port (rounded to power2)*/ 1059 #define MAX_CONNECTIONS_ISCSI 128 1060 #define MAX_CONNECTIONS_RDMA 10 1061 #define MAX_CONNECTIONS_TOE 1880 1062 #define MAX_CONNECTIONS_FCOE 0 1063 #define MAX_CONNECTIONS_VF 128 1064 1065 #else 1066 1067 #define MAX_CONNECTIONS (min(16384,(32768 / (log2_align(pdev->hw_info.mf_info.vnics_per_port))))) /* Max 32K Connections per port / vnic-per-port (rounded to power2) 1068 but no more 16K to limit ilt client page size by 64KB*/ 1069 1070 #define DEFAULT_CONNECTIONS_TOE 1880 1071 #define MAX_CONNECTIONS_ISCSI 128 1072 #define MAX_CONNECTIONS_RDMA 10 1073 #define MAX_CONNECTIONS_FCOE 1024 1074 #define MAX_CONNECTIONS_VF (1 << (LM_VF_MAX_RVFID_SIZE + LM_MAX_VF_CID_WND_SIZE + 1)) 1075 #define MAX_CONNECTIONS_TOE (min(8192,MAX_CONNECTIONS - MAX_CONNECTIONS_ISCSI - MAX_CONNECTIONS_RDMA - MAX_CONNECTIONS_FCOE - MAX_ETH_CONS - MAX_CONNECTIONS_VF)) 1076 1077 #endif 1078 1079 1080 #define MAX_CONNECTIONS_TOE_NO_LICENSE 0 1081 #define MAX_CONNECTIONS_ISCSI_NO_LICENSE 0 1082 #define MAX_CONNECTIONS_RDMA_NO_LICENSE 0 1083 #define MAX_CONNECTIONS_FCOE_NO_LICENSE 0 1084 1085 #define MAX_CONNECTIONS_FCOE_NO_MCP 128 1086 1087 static u32_t lm_parse_license_info(u32 val, u8_t is_high) 1088 { 1089 if (is_high) 1090 { 1091 val &=0xFFFF0000; 1092 if(val) 1093 { 1094 val ^= FW_ENCODE_32BIT_PATTERN; 1095 } 1096 val >>= 16; 1097 } 1098 else 1099 { 1100 val &= 0xffff; 1101 if(val) 1102 { 1103 val ^= FW_ENCODE_16BIT_PATTERN; 1104 } 1105 } 1106 return val; 1107 } 1108 1109 static u32_t lm_parse_license_info_bounded(u32 val, u32_t max_cons, u8_t is_high) 1110 { 1111 u32_t license_from_shmem =0; 1112 license_from_shmem = lm_parse_license_info(val, is_high); 1113 1114 val = min(license_from_shmem, max_cons); 1115 return val; 1116 } 1117 /* No special MCP handling for a specific E1H configuration */ 1118 /* WARNING: Do Not Change these defines!!! They are used in an external tcl script that assumes their values!!! */ 1119 #define NO_MCP_WA_CFG_SET_ADDR (0xA0000) 1120 #define NO_MCP_WA_CFG_SET_MAGIC (0x88AA55FF) 1121 #define NO_MCP_WA_MULTI_VNIC_MODE (0xA0004) 1122 #define NO_MCP_WA_VNICS_PER_PORT(port) (0xA0008 + 4*(port)) 1123 #define NO_MCP_WA_OVLAN(func) (0xA0010 + 4*(func)) // --> 0xA0030 1124 #define NO_MCP_WA_FORCE_5710 (0xA0030) 1125 #define NO_MCP_WA_VALID_LIC_ADDR (0xA0040) 1126 #define NO_MCP_WA_VALID_LIC_MAGIC (0xCCAAFFEE) 1127 #define NO_MCP_WA_TOE_LIC (0xA0048) 1128 #define NO_MCP_WA_ISCSI_LIC (0xA0050) 1129 #define NO_MCP_WA_RDMA_LIC (0xA0058) 1130 #define NO_MCP_WA_CLC_SHMEM (0xAF900) 1131 1132 static lm_status_t lm_get_shmem_license_info(lm_device_t *pdev) 1133 { 1134 u32_t max_toe_cons[PORT_MAX] = {0,0}; 1135 u32_t max_rdma_cons[PORT_MAX] = {0,0}; 1136 u32_t max_iscsi_cons[PORT_MAX] = {0,0}; 1137 u32_t max_fcoe_cons[PORT_MAX] = {0,0}; 1138 u32_t max_eth_cons[PORT_MAX] = {0,0}; /* Includes VF connections */ 1139 u32_t max_bar_supported_cons[PORT_MAX] = {0}; 1140 u32_t max_supported_cons[PORT_MAX] = {0}; 1141 u32_t val = 0; 1142 u8_t port = 0; 1143 u32_t offset = 0; 1144 1145 /* Even though only one port actually does the initialization, ALL functions need to know the maximum number of connections 1146 * because that's how they know what the page-size-is, and based on that do per-function initializations as well. */ 1147 pdev->hw_info.max_common_conns = 0; 1148 1149 /* get values for relevant ports. */ 1150 for (port = 0; port < PORT_MAX; port++) 1151 { 1152 if (pdev->hw_info.mcp_detected == 1) 1153 { 1154 LM_SHMEM_READ(pdev, OFFSETOF(shmem_region_t, validity_map[port]),&val); 1155 1156 // check that licensing is enabled 1157 if(GET_FLAGS(val, SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT | SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT)) 1158 { 1159 // align to 32 bit 1160 offset = OFFSETOF(shmem_region_t, drv_lic_key[port].max_toe_conn) & 0xfffffffc; 1161 LM_SHMEM_READ(pdev, offset, &val); 1162 max_toe_cons[port] = lm_parse_license_info_bounded(val, MAX_CONNECTIONS_TOE,FALSE); 1163 DbgMessage(pdev, INFORMi, "max_toe_conn from shmem %d for port %d\n",val, port); 1164 /* RDMA */ 1165 offset = OFFSETOF(shmem_region_t, drv_lic_key[port].max_um_rdma_conn) & 0xfffffffc; 1166 LM_SHMEM_READ(pdev, offset, &val); 1167 max_rdma_cons[port] = lm_parse_license_info_bounded(val, MAX_CONNECTIONS_RDMA,FALSE); 1168 DbgMessage(pdev, INFORMi, "max_rdma_conn from shmem %d for port %d\n",val, port); 1169 /* ISCSI */ 1170 offset = OFFSETOF(shmem_region_t, drv_lic_key[port].max_iscsi_trgt_conn) & 0xfffffffc; 1171 LM_SHMEM_READ(pdev, offset, &val); 1172 max_iscsi_cons[port] = lm_parse_license_info_bounded(val, MAX_CONNECTIONS_ISCSI,TRUE); 1173 DbgMessage(pdev, INFORMi, "max_iscsi_conn from shmem %d for port %d\n",val, port); 1174 /* FCOE */ 1175 offset = OFFSETOF(shmem_region_t, drv_lic_key[port].max_fcoe_init_conn) & 0xfffffffc; 1176 LM_SHMEM_READ(pdev, offset, &val); 1177 if(0 == lm_parse_license_info(val,TRUE)) 1178 { 1179 max_fcoe_cons[port] = 0; 1180 } 1181 else 1182 { 1183 max_fcoe_cons[port] = MAX_CONNECTIONS_FCOE; 1184 } 1185 DbgMessage(pdev, INFORMi, "max_fcoe_conn from shmem %d for port %d\n",val, port); 1186 1187 } 1188 else 1189 { 1190 // In case MCP is enabled and there is no licence => there should be no offload connection. 1191 max_toe_cons[port] = MAX_CONNECTIONS_TOE_NO_LICENSE; 1192 max_rdma_cons[port] = MAX_CONNECTIONS_ISCSI_NO_LICENSE; 1193 max_iscsi_cons[port] = MAX_CONNECTIONS_RDMA_NO_LICENSE; 1194 max_fcoe_cons[port] = MAX_CONNECTIONS_FCOE_NO_LICENSE; 1195 } 1196 if (CHIP_IS_E1x(pdev)) 1197 { 1198 max_eth_cons[port] = MAX_ETH_REG_CONS; 1199 } 1200 else 1201 { 1202 max_eth_cons[port] = MAX_CONNECTIONS_VF; 1203 } 1204 1205 /* get the bar size... unless it's current port and then we have it. otherwise, read from shmem W.C which 1206 * is what the other ports asked for, they could have gotten less, but we're looking into the worst case. */ 1207 if (PORT_ID(pdev) == port) 1208 { 1209 max_bar_supported_cons[port] = pdev->hw_info.bar_size[BAR_1] / LM_DQ_CID_SIZE; 1210 } 1211 else 1212 { 1213 LM_SHMEM_READ(pdev, OFFSETOF(shmem_region_t, dev_info.port_feature_config[port].config), &val); 1214 val = (val & PORT_FEAT_CFG_BAR2_SIZE_MASK) >> PORT_FEAT_CFG_BAR2_SIZE_SHIFT; 1215 if (val != 0) 1216 { 1217 /* bit 1 stand for 64K each bit multiply it by two */ 1218 val = (0x40 << ((val - 1)))*0x400; 1219 } 1220 max_bar_supported_cons[port] = val / LM_DQ_CID_SIZE; 1221 } 1222 } 1223 else 1224 { 1225 // MCP_WA 1226 LM_SHMEM_READ(pdev, NO_MCP_WA_VALID_LIC_ADDR+4*port, &val); 1227 1228 if (val == NO_MCP_WA_VALID_LIC_MAGIC) 1229 { 1230 LM_SHMEM_READ(pdev, NO_MCP_WA_TOE_LIC+4*port, &val); 1231 max_toe_cons[port] = val; 1232 LM_SHMEM_READ(pdev, NO_MCP_WA_ISCSI_LIC+4*port, &val); 1233 max_iscsi_cons[port] = val; 1234 LM_SHMEM_READ(pdev, NO_MCP_WA_RDMA_LIC+4*port, &val); 1235 max_rdma_cons[port] = val; 1236 1237 /* FCOE */ 1238 // For backward compatibility, same value if it will be required we can add NO_MCP_WA_FCOE_LIC 1239 max_fcoe_cons[port] = MAX_CONNECTIONS_FCOE_NO_MCP; 1240 // Fcoe licencing isn't supported. 1241 /* 1242 LM_SHMEM_READ(pdev, NO_MCP_WA_FCOE_LIC+4*port, &val); 1243 max_fcoe_cons[port] = val; 1244 */ 1245 } 1246 else 1247 { 1248 #ifdef VF_INVOLVED 1249 max_toe_cons[port] = DEFAULT_CONNECTIONS_TOE - 100; 1250 #else 1251 max_toe_cons[port] = DEFAULT_CONNECTIONS_TOE; 1252 #endif 1253 max_iscsi_cons[port] = MAX_CONNECTIONS_ISCSI; 1254 max_rdma_cons[port] = MAX_CONNECTIONS_RDMA; 1255 // Need to review this value seems like we take in this case the max value 1256 max_fcoe_cons[port] = MAX_CONNECTIONS_FCOE_NO_MCP; 1257 } 1258 if (CHIP_IS_E1x(pdev)) 1259 { 1260 max_eth_cons[port] = MAX_ETH_REG_CONS; 1261 } 1262 else 1263 { 1264 max_eth_cons[port] = MAX_CONNECTIONS_VF; 1265 } 1266 /* For MCP - WA, we always assume the same bar size for all ports: makes life simpler... */ 1267 max_bar_supported_cons[port] = pdev->hw_info.bar_size[BAR_1] / LM_DQ_CID_SIZE; 1268 } 1269 /* so after all this - what is the maximum number of connections supported for this port? */ 1270 max_supported_cons[port] = log2_align(max_toe_cons[port] + max_rdma_cons[port] + max_iscsi_cons[port] + max_fcoe_cons[port] + max_eth_cons[port]); 1271 max_supported_cons[port] = min(max_supported_cons[port], max_bar_supported_cons[port]); 1272 1273 /* And after all this... in lediag / ediag... we assume a maximum of 1024 connections */ 1274 #if defined(DOS) || defined(__LINUX) 1275 max_supported_cons[port] = min(max_supported_cons[port], (u32_t)1024); 1276 #endif 1277 1278 if (max_supported_cons[port] > pdev->hw_info.max_common_conns) 1279 { 1280 pdev->hw_info.max_common_conns = max_supported_cons[port]; 1281 } 1282 1283 1284 } 1285 /* Now, port specific... */ 1286 port = PORT_ID(pdev); 1287 /* now, there could be a problem where the bar limited us, and the max-connections is smaller than the total above, in this case we need to decrease the 1288 * numbers relatively... can't touch MAX_ETH_CONS... */ 1289 if (ERR_IF(max_supported_cons[port] < max_eth_cons[port])) 1290 { 1291 return LM_STATUS_INVALID_PARAMETER; 1292 } 1293 if ((max_iscsi_cons[port] + max_rdma_cons[port] + max_toe_cons[port] + max_fcoe_cons[port] + max_eth_cons[port]) > max_supported_cons[port]) 1294 { 1295 /* we first try giving iscsi + rdma what they asked for... */ 1296 if ((max_iscsi_cons[port] + max_rdma_cons[port] + max_fcoe_cons[port] + max_eth_cons[port]) > max_supported_cons[port]) 1297 { 1298 u32_t s = max_iscsi_cons[port] + max_rdma_cons[port] + max_toe_cons[port] + max_fcoe_cons[port]; /* eth out of the game... */ 1299 u32_t t = max_supported_cons[port] - pdev->params.max_eth_including_vfs_conns; /* what we want to reach... */ 1300 /* relatively decrease all... (x+y+z=s, actual = t: xt/s+yt/s+zt/s = t) */ 1301 max_iscsi_cons[port] *=t; 1302 max_iscsi_cons[port] /=s; 1303 max_rdma_cons[port] *=t; 1304 max_rdma_cons[port] /=s; 1305 max_toe_cons[port] *=t; 1306 max_toe_cons[port] /=s; 1307 max_fcoe_cons[port] *=t; 1308 max_fcoe_cons[port] /=s; 1309 } 1310 else 1311 { 1312 /* just give toe what's left... */ 1313 max_toe_cons[port] = max_supported_cons[port] - (max_iscsi_cons[port] + max_rdma_cons[port] + max_fcoe_cons[port] + max_eth_cons[port]); 1314 } 1315 } 1316 if (ERR_IF((max_iscsi_cons[port] + max_rdma_cons[port] + max_fcoe_cons[port] + max_toe_cons[port] + max_eth_cons[port]) > max_supported_cons[port])) 1317 { 1318 return LM_STATUS_INVALID_PARAMETER; 1319 } 1320 1321 /* Now lets save our port-specific variables. By this stage we have the maximum supported connections for our port. */ 1322 pdev->hw_info.max_port_toe_conn = max_toe_cons[port]; 1323 DbgMessage(pdev, INFORMi, "max_toe_conn from shmem %d\n",pdev->hw_info.max_port_toe_conn); 1324 /* RDMA */ 1325 pdev->hw_info.max_port_rdma_conn = max_rdma_cons[port]; 1326 DbgMessage(pdev, INFORMi, "max_rdma_conn from shmem %d\n",pdev->hw_info.max_port_rdma_conn); 1327 /* ISCSI */ 1328 pdev->hw_info.max_port_iscsi_conn = max_iscsi_cons[port]; 1329 DbgMessage(pdev, INFORMi, "max_iscsi_conn from shmem %d\n",pdev->hw_info.max_port_iscsi_conn); 1330 /* FCOE */ 1331 pdev->hw_info.max_port_fcoe_conn = max_fcoe_cons[port]; 1332 DbgMessage(pdev, INFORMi, "max_fcoe_conn from shmem %d\n",pdev->hw_info.max_port_fcoe_conn); 1333 1334 pdev->hw_info.max_port_conns = log2_align(pdev->hw_info.max_port_toe_conn + 1335 pdev->hw_info.max_port_rdma_conn + pdev->hw_info.max_port_iscsi_conn 1336 + pdev->hw_info.max_port_fcoe_conn + pdev->params.max_eth_including_vfs_conns); 1337 1338 if (ERR_IF(pdev->hw_info.max_port_conns > max_bar_supported_cons[port])) 1339 { 1340 /* this would mean an error in the calculations above. */ 1341 return LM_STATUS_INVALID_PARAMETER; 1342 } 1343 1344 return LM_STATUS_SUCCESS; 1345 } 1346 static lm_status_t lm_check_valid_mf_cfg(lm_device_t *pdev) 1347 { 1348 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1349 lm_status_t lm_status = LM_STATUS_SUCCESS; 1350 const u8_t func_id = FUNC_ID(pdev); 1351 u8_t i = 0; 1352 u8_t j = 0; 1353 u32_t mf_cfg1 = 0; 1354 u32_t mf_cfg2 = 0; 1355 u32_t ovlan1 = 0; 1356 u32_t ovlan2 = 0; 1357 u32_t dynamic_cfg = 0; 1358 1359 /* hard coded offsets in vnic_cfg.tcl. if assertion here fails, 1360 * need to fix vnic_cfg.tcl script as well. */ 1361 // ASSERT_STATIC(OFFSETOF(shmem_region_t,mf_cfg) == 0x7e4); 1362 ASSERT_STATIC(OFFSETOF(mf_cfg_t,shared_mf_config.clp_mb) == 0); 1363 //ASSERT_STATIC(MCP_CLP_MB_NO_CLP == 0x80000000); not yet defined 1364 ASSERT_STATIC(OFFSETOF(mf_cfg_t,func_mf_config) == 36); 1365 ASSERT_STATIC(OFFSETOF(func_mf_cfg_t,config) == 0); 1366 ASSERT_STATIC(FUNC_MF_CFG_FUNC_HIDE == 0x1); 1367 ASSERT_STATIC(FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA == 0x4); 1368 ASSERT_STATIC(FUNC_MF_CFG_FUNC_DISABLED == 0x8); 1369 ASSERT_STATIC(OFFSETOF(func_mf_cfg_t,mac_upper) == 4); 1370 ASSERT_STATIC(OFFSETOF(func_mf_cfg_t,mac_lower) == 8); 1371 ASSERT_STATIC(FUNC_MF_CFG_UPPERMAC_DEFAULT == 0x0000ffff); 1372 ASSERT_STATIC(FUNC_MF_CFG_LOWERMAC_DEFAULT == 0xffffffff); 1373 ASSERT_STATIC(OFFSETOF(func_mf_cfg_t,e1hov_tag) == 12); 1374 ASSERT_STATIC(FUNC_MF_CFG_E1HOV_TAG_DEFAULT == 0x0000ffff); 1375 ASSERT_STATIC(sizeof(func_mf_cfg_t) == 24); 1376 1377 /* trace mf cfg parameters */ 1378 DbgMessage(pdev, INFORMi, "MF cfg parameters for function %d:\n", func_id); 1379 DbgMessage(pdev, INFORMi, "\t func_mf_cfg=0x%x\n\t multi_vnics_mode=%d\n\t vnics_per_port=%d\n\t ovlan/vifid=%d\n\t min_bw=%d\n\t max_bw=%d\n", 1380 mf_info->func_mf_cfg, 1381 mf_info->vnics_per_port, 1382 mf_info->multi_vnics_mode, 1383 mf_info->ext_id, 1384 mf_info->min_bw, 1385 mf_info->max_bw); 1386 DbgMessage(pdev, INFORMi, "\t mac addr (overiding main and iscsi): %02x %02x %02x %02x %02x %02x\n", 1387 pdev->hw_info.mac_addr[0], 1388 pdev->hw_info.mac_addr[1], 1389 pdev->hw_info.mac_addr[2], 1390 pdev->hw_info.mac_addr[3], 1391 pdev->hw_info.mac_addr[4], 1392 pdev->hw_info.mac_addr[5]); 1393 1394 /* verify that function is not hidden */ 1395 if (GET_FLAGS(mf_info->func_mf_cfg, FUNC_MF_CFG_FUNC_HIDE)) 1396 { 1397 DbgMessage(pdev, FATAL, "Enumerated function %d, is marked as hidden\n", func_id); 1398 lm_status = LM_STATUS_FAILURE; 1399 goto _end; 1400 } 1401 1402 if (mf_info->vnics_per_port > 1 && !mf_info->multi_vnics_mode) 1403 { 1404 DbgMessage(pdev, FATAL, "invalid mf mode configuration: vnics_per_port=%d, multi_vnics_mode=%d\n", 1405 mf_info->vnics_per_port, 1406 mf_info->multi_vnics_mode); 1407 lm_status = LM_STATUS_FAILURE; 1408 //DbgBreakIf(1); 1409 goto _end; 1410 } 1411 1412 /* Sanity checks on outer-vlan for switch_dependent_mode... */ 1413 if (mf_info->mf_mode == MULTI_FUNCTION_SD) 1414 { 1415 /* enumerated vnic id > 0 must have valid ovlan if we're in switch-dependet mode */ 1416 if ((VNIC_ID(pdev) > 0) && !VALID_OVLAN(OVLAN(pdev))) 1417 { 1418 DbgMessage(pdev, WARNi, "invalid mf mode configuration: VNICID=%d, Function is enumerated, ovlan (%d) is invalid\n", 1419 VNIC_ID(pdev), OVLAN(pdev)); 1420 #ifdef EDIAG 1421 // Allow OVLAN 0xFFFF in ediag UFP mode 1422 if (mf_info->sd_mode != SD_UFP_MODE) 1423 { 1424 lm_status = LM_STATUS_FAILURE; 1425 } 1426 #else 1427 lm_status = LM_STATUS_FAILURE; 1428 #endif 1429 goto _end; 1430 } 1431 1432 /* additional sanity checks */ 1433 if (!VALID_OVLAN(OVLAN(pdev)) && mf_info->multi_vnics_mode) 1434 { 1435 DbgMessage(pdev, FATAL, "invalid mf mode configuration: multi_vnics_mode=%d, ovlan=%d\n", 1436 mf_info->multi_vnics_mode, 1437 OVLAN(pdev)); 1438 #ifdef EDIAG 1439 // Allow OVLAN 0xFFFF in ediag UFP mode 1440 if (mf_info->sd_mode != SD_UFP_MODE) 1441 { 1442 lm_status = LM_STATUS_FAILURE; 1443 } 1444 #else 1445 lm_status = LM_STATUS_FAILURE; 1446 #endif 1447 goto _end; 1448 } 1449 /* verify all functions are either mf mode or sf mode: 1450 * if we set mode to mf, make sure that all non hidden functions have valid ovlan 1451 * if we set mode to sf, make sure that all non hidden functions have invalid ovlan */ 1452 LM_FOREACH_ABS_FUNC_IN_PORT(pdev, i) 1453 { 1454 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[i].config),&mf_cfg1); 1455 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[i].e1hov_tag), &ovlan1); 1456 if (!GET_FLAGS(mf_cfg1, FUNC_MF_CFG_FUNC_HIDE) && 1457 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 1458 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) 1459 { 1460 #ifdef EDIAG 1461 // Allow OVLAN 0xFFFF in eDiag UFP mode 1462 if (mf_info->sd_mode != SD_UFP_MODE) 1463 { 1464 lm_status = LM_STATUS_FAILURE; 1465 } 1466 #else 1467 lm_status= LM_STATUS_FAILURE; 1468 #endif 1469 goto _end; 1470 } 1471 } 1472 /* verify different ovlan between funcs on same port */ 1473 LM_FOREACH_ABS_FUNC_IN_PORT(pdev, i) 1474 { 1475 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[i].config),&mf_cfg1); 1476 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[i].e1hov_tag), &ovlan1); 1477 /* iterate from the next function in the port till max func */ 1478 for (j = i + 2; j < E1H_FUNC_MAX; j += 2) 1479 { 1480 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[j].config),&mf_cfg2); 1481 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[j].e1hov_tag), &ovlan2); 1482 if (!GET_FLAGS(mf_cfg1, FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && 1483 !GET_FLAGS(mf_cfg2, FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && 1484 (ovlan1 == ovlan2) ) 1485 { 1486 lm_status = LM_STATUS_FAILURE; 1487 DbgBreakIf(1); 1488 goto _end; 1489 } 1490 } 1491 } 1492 // Check if DCC is active (Debugging only) 1493 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, port_mf_config[PATH_ID(pdev)][PORT_ID(pdev)].dynamic_cfg),&dynamic_cfg ); 1494 if( PORT_MF_CFG_E1HOV_TAG_DEFAULT == ( dynamic_cfg & PORT_MF_CFG_E1HOV_TAG_MASK ) ) 1495 { 1496 pdev->hw_info.is_dcc_active = FALSE; 1497 } 1498 else 1499 { 1500 pdev->hw_info.is_dcc_active = TRUE; 1501 } 1502 } // MULTI_FUNCTION_SD 1503 _end: 1504 return lm_status; 1505 } 1506 1507 void lm_cmng_get_shmem_info( lm_device_t* pdev ) 1508 { 1509 u32_t val = 0; 1510 u8_t i = 0; 1511 u8_t vnic = 0; 1512 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info;; 1513 1514 if( !IS_MF_MODE_CAPABLE(pdev) ) 1515 { 1516 DbgBreakIf(1) ; 1517 return; 1518 } 1519 1520 LM_FOREACH_ABS_FUNC_IN_PORT(pdev, i) 1521 { 1522 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[i].config),&val); 1523 /* get min/max bw */ 1524 mf_info->min_bw[vnic] = (GET_FLAGS(val, FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 1525 mf_info->max_bw[vnic] = (GET_FLAGS(val, FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 1526 vnic++; 1527 } 1528 } 1529 1530 /**lm_get_vnics_per_port 1531 * Get the value of vnics_per_port according to the MF mode and 1532 * port mode. 1533 * 1534 * Note: This function assumes that multi_vnics_mode and 1535 * chip_port_mode are initialized in hw_info. 1536 * 1537 * @param pdev 1538 * 1539 * @return u8_t the value of vnics_per_port for this pdev's port 1540 * mode and MF mode. This value does not consider hidden 1541 * PFs. 1542 */ 1543 static u8_t lm_get_vnics_per_port(lm_device_t* pdev) 1544 { 1545 if (pdev->hw_info.mf_info.multi_vnics_mode) 1546 { 1547 return LM_PFS_PER_PORT(pdev); 1548 } 1549 else 1550 { 1551 return 1; 1552 } 1553 } 1554 1555 /* Get shmem multi function config info for switch dependent mode */ 1556 static lm_status_t lm_get_shmem_mf_cfg_info_sd(lm_device_t *pdev) 1557 { 1558 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1559 u32_t val = 0; 1560 1561 /* get ovlan if we're in switch-dependent mode... */ 1562 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].e1hov_tag),&val); 1563 mf_info->ext_id = (u16_t)val; 1564 1565 mf_info->multi_vnics_mode = 1; 1566 if(!VALID_OVLAN(OVLAN(pdev))) 1567 { 1568 /* Unexpected at this time */ 1569 DbgMessage(pdev, FATAL, "Invalid mf mode configuration: VNICID=%d, Function is enumerated, ovlan (%d) is invalid\n", 1570 VNIC_ID(pdev), OVLAN(pdev)); 1571 #ifdef EDIAG 1572 // Allow OVLAN 0xFFFF in ediag UFP mode 1573 if (mf_info->sd_mode != SD_UFP_MODE) 1574 { 1575 return LM_STATUS_FAILURE; 1576 } 1577 #else 1578 return LM_STATUS_FAILURE; 1579 #endif 1580 } 1581 1582 /* Get capabilities */ 1583 if (GET_FLAGS(mf_info->func_mf_cfg, FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) 1584 { 1585 pdev->params.mf_proto_support_flags |= LM_PROTO_SUPPORT_ISCSI; 1586 } 1587 else if (GET_FLAGS(mf_info->func_mf_cfg, FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) 1588 { 1589 pdev->params.mf_proto_support_flags |= LM_PROTO_SUPPORT_FCOE; 1590 } 1591 else 1592 { 1593 pdev->params.mf_proto_support_flags |= LM_PROTO_SUPPORT_ETHERNET; 1594 } 1595 1596 mf_info->vnics_per_port = lm_get_vnics_per_port(pdev); 1597 1598 return LM_STATUS_SUCCESS; 1599 } 1600 1601 1602 /* Get shmem multi function config info for switch dependent mode */ 1603 static lm_status_t lm_get_shmem_mf_cfg_info_sd_bd(lm_device_t *pdev) 1604 { 1605 lm_status_t lm_status = lm_get_shmem_mf_cfg_info_sd(pdev); 1606 1607 return lm_status; 1608 } 1609 1610 1611 /* Get shmem multi function config info for switch dependent mode */ 1612 static lm_status_t lm_get_shmem_mf_cfg_info_sd_ufp(lm_device_t *pdev) 1613 { 1614 lm_status_t lm_status = lm_get_shmem_mf_cfg_info_sd(pdev); 1615 1616 return lm_status; 1617 } 1618 1619 static void _copy_mac_upper_lower_to_arr(IN u32_t mac_upper, IN u32_t mac_lower, OUT u8_t* mac_addr) 1620 { 1621 if(mac_addr) 1622 { 1623 mac_addr[0] = (u8_t) (mac_upper >> 8); 1624 mac_addr[1] = (u8_t) mac_upper; 1625 mac_addr[2] = (u8_t) (mac_lower >> 24); 1626 mac_addr[3] = (u8_t) (mac_lower >> 16); 1627 mac_addr[4] = (u8_t) (mac_lower >> 8); 1628 mac_addr[5] = (u8_t) mac_lower; 1629 } 1630 } 1631 1632 static void lm_get_shmem_ext_mac_addresses(lm_device_t *pdev) 1633 { 1634 u32_t mac_upper = 0; 1635 u32_t mac_lower = 0; 1636 u32_t offset = 0; 1637 const u8_t abs_func_id = ABS_FUNC_ID(pdev); 1638 1639 /* We have a different mac address per iscsi / fcoe - we'll set it from extended multi function info, but only if it's valid, otherwise 1640 * we'll leave the same mac as for L2 1641 */ 1642 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].iscsi_mac_addr_upper); 1643 LM_MFCFG_READ(pdev, offset, &mac_upper); 1644 1645 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].iscsi_mac_addr_lower); 1646 LM_MFCFG_READ(pdev, offset, &mac_lower); 1647 1648 _copy_mac_upper_lower_to_arr(mac_upper, mac_lower, pdev->hw_info.iscsi_mac_addr); 1649 1650 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_mac_addr_upper); 1651 LM_MFCFG_READ(pdev, offset, &mac_upper); 1652 1653 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_mac_addr_lower); 1654 LM_MFCFG_READ(pdev, offset, &mac_lower); 1655 1656 _copy_mac_upper_lower_to_arr(mac_upper, mac_lower, pdev->hw_info.fcoe_mac_addr); 1657 1658 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_wwn_port_name_upper); 1659 LM_MFCFG_READ(pdev, offset, &mac_upper); 1660 1661 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_wwn_port_name_lower); 1662 LM_MFCFG_READ(pdev, offset, &mac_lower); 1663 1664 _copy_mac_upper_lower_to_arr(mac_upper, mac_lower, &(pdev->hw_info.fcoe_wwn_port_name[2])); 1665 pdev->hw_info.fcoe_wwn_port_name[0] = (u8_t) (mac_upper >> 24); 1666 pdev->hw_info.fcoe_wwn_port_name[1] = (u8_t) (mac_upper >> 16); 1667 1668 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_wwn_node_name_upper); 1669 LM_MFCFG_READ(pdev, offset, &mac_upper); 1670 1671 offset = OFFSETOF(mf_cfg_t, func_ext_config[abs_func_id].fcoe_wwn_node_name_lower); 1672 LM_MFCFG_READ(pdev, offset, &mac_lower); 1673 1674 _copy_mac_upper_lower_to_arr(mac_upper, mac_lower, &(pdev->hw_info.fcoe_wwn_node_name[2])); 1675 pdev->hw_info.fcoe_wwn_node_name[0] = (u8_t) (mac_upper >> 24); 1676 pdev->hw_info.fcoe_wwn_node_name[1] = (u8_t) (mac_upper >> 16); 1677 } 1678 1679 static u32_t 1680 lm_get_shmem_ext_proto_support_flags(lm_device_t *pdev) 1681 { 1682 u32_t func_ext_cfg = 0; 1683 u32_t proto_support_flags = 0; 1684 1685 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_ext_config[ABS_FUNC_ID(pdev)].func_cfg),&func_ext_cfg); 1686 1687 if (GET_FLAGS(func_ext_cfg, MACP_FUNC_CFG_FLAGS_ENABLED )) 1688 { 1689 if (GET_FLAGS(func_ext_cfg, MACP_FUNC_CFG_FLAGS_ETHERNET)) 1690 { 1691 proto_support_flags |= LM_PROTO_SUPPORT_ETHERNET; 1692 } 1693 if (GET_FLAGS(func_ext_cfg, MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) 1694 { 1695 proto_support_flags |= LM_PROTO_SUPPORT_ISCSI; 1696 } 1697 if (GET_FLAGS(func_ext_cfg, MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 1698 { 1699 proto_support_flags |= LM_PROTO_SUPPORT_FCOE; 1700 } 1701 } 1702 1703 return proto_support_flags; 1704 } 1705 1706 /* Get shmem multi function config info for switch independent mode */ 1707 static lm_status_t lm_get_shmem_mf_cfg_info_si(lm_device_t *pdev) 1708 { 1709 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1710 u32_t val = 0; 1711 1712 /* No outer-vlan... we're in switch-independent mode, so if the mac is valid - assume multi-function */ 1713 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_ext_config[ABS_FUNC_ID(pdev)].func_cfg),&val); 1714 val = val & MACP_FUNC_CFG_FLAGS_MASK; 1715 mf_info->multi_vnics_mode = (val != 0); 1716 mf_info->path_has_ovlan = FALSE; 1717 1718 pdev->params.mf_proto_support_flags = lm_get_shmem_ext_proto_support_flags(pdev); 1719 1720 mf_info->vnics_per_port = lm_get_vnics_per_port(pdev); 1721 1722 return LM_STATUS_SUCCESS; 1723 1724 } 1725 1726 lm_status_t lm_get_shmem_mf_cfg_info_niv(lm_device_t *pdev) 1727 { 1728 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1729 u32_t func_config = 0; 1730 u32_t niv_config = 0; 1731 u32_t e1hov_tag = 0; 1732 1733 mf_info->multi_vnics_mode = TRUE; 1734 1735 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].e1hov_tag),&e1hov_tag); 1736 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].config), &func_config); 1737 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].afex_config), &niv_config); 1738 1739 mf_info->ext_id = (u16_t)(GET_FLAGS(e1hov_tag, FUNC_MF_CFG_E1HOV_TAG_MASK)>>FUNC_MF_CFG_E1HOV_TAG_SHIFT); 1740 mf_info->default_vlan = (u16_t)(GET_FLAGS(e1hov_tag, FUNC_MF_CFG_AFEX_VLAN_MASK)>>FUNC_MF_CFG_AFEX_VLAN_SHIFT); 1741 1742 mf_info->niv_allowed_priorities = (u8_t)(GET_FLAGS(niv_config, FUNC_MF_CFG_AFEX_COS_FILTER_MASK)>>FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 1743 mf_info->niv_default_cos = (u8_t)(GET_FLAGS(func_config, FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK)>>FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 1744 mf_info->afex_vlan_mode = GET_FLAGS(niv_config, FUNC_MF_CFG_AFEX_VLAN_MODE_MASK)>>FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 1745 mf_info->niv_mba_enabled = GET_FLAGS(niv_config, FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK)>>FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT; 1746 1747 1748 pdev->params.mf_proto_support_flags = lm_get_shmem_ext_proto_support_flags(pdev); 1749 1750 mf_info->vnics_per_port = lm_get_vnics_per_port(pdev); 1751 1752 return LM_STATUS_SUCCESS; 1753 } 1754 1755 static lm_status_t lm_shmem_set_default(lm_device_t *pdev) 1756 { 1757 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1758 u8_t i; 1759 1760 /* set defaults: */ 1761 mf_info->multi_vnics_mode = 0; 1762 mf_info->vnics_per_port = 1; 1763 mf_info->ext_id = 0xffff; /* invalid ovlan */ /* TBD - E1H: - what is the right value for Cisco? */ 1764 1765 ASSERT_STATIC( ARRSIZE(mf_info->min_bw) == ARRSIZE(mf_info->max_bw) ) 1766 1767 for (i = 0; i < ARRSIZE(mf_info->min_bw); i++) 1768 { 1769 mf_info->min_bw[i] = 0; 1770 mf_info->max_bw[i] = 200; 1771 } 1772 pdev->hw_info.shmem_base = 0; 1773 pdev->hw_info.max_port_toe_conn = MAX_CONNECTIONS_TOE; 1774 pdev->hw_info.max_port_rdma_conn = MAX_CONNECTIONS_RDMA; 1775 pdev->hw_info.max_port_iscsi_conn = MAX_CONNECTIONS_ISCSI; 1776 pdev->hw_info.max_port_fcoe_conn = MAX_CONNECTIONS_FCOE; 1777 pdev->hw_info.max_port_conns = MAX_CONNECTIONS; 1778 pdev->hw_info.max_common_conns = MAX_CONNECTIONS; 1779 1780 return LM_STATUS_SUCCESS; 1781 } 1782 1783 static u32_t lm_get_shmem_base_addr(lm_device_t *pdev) 1784 { 1785 u32_t val = 0; 1786 u32_t min_shmem_addr = 0; 1787 u32_t max_shmem_addr = 0; 1788 1789 val = REG_RD(pdev,MISC_REG_SHARED_MEM_ADDR); 1790 if (CHIP_IS_E1(pdev)) 1791 { 1792 min_shmem_addr = 0xa0000; 1793 max_shmem_addr = 0xb0000; 1794 } 1795 else if (CHIP_IS_E1H(pdev)) 1796 { 1797 min_shmem_addr = 0xa0000; 1798 max_shmem_addr = 0xc0000; 1799 } 1800 else if (CHIP_IS_E2E3(pdev)) 1801 { 1802 min_shmem_addr = 0x3a0000; 1803 max_shmem_addr = 0x3c8000; 1804 } 1805 else 1806 { 1807 u32 pcicfg_chip; 1808 mm_read_pci(pdev, 0, &pcicfg_chip); 1809 DbgMessage(pdev, FATAL , "Unknown chip 0x%x, pcicfg[0]=0x%x, GRC[0x2000]=0x%x\n", 1810 CHIP_NUM(pdev), pcicfg_chip, REG_RD(pdev, 0x2000)); 1811 DbgBreakMsg("Unknown chip version"); 1812 } 1813 1814 if (val < min_shmem_addr || val >= max_shmem_addr) 1815 { 1816 /* Invalid shmem base address return '0' */ 1817 val = 0; 1818 } 1819 1820 return val; 1821 } 1822 1823 /** 1824 * @Description 1825 * This function is called when MCP is not detected. It 1826 * initializes lmdevice parameters that are required for 1827 * functional running with default values or values read 1828 * from vnic_cfg.tcl script. 1829 * 1830 * @param pdev 1831 * 1832 * @return lm_status_t 1833 */ 1834 static lm_status_t lm_get_shmem_info_no_mcp_bypass(lm_device_t *pdev) 1835 { 1836 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 1837 lm_status_t lm_status = LM_STATUS_SUCCESS; 1838 u32_t val = 0; 1839 1840 1841 DbgMessage(pdev, WARN, "MCP Down Detected\n"); 1842 #ifndef _VBD_CMD_ 1843 val = REG_RD(pdev,MISC_REG_SHARED_MEM_ADDR); 1844 DbgMessage(pdev, FATAL, "FW ShMem addr: 0x%x\n", val); 1845 #endif // _VBD_CMD_ 1846 1847 pdev->hw_info.mcp_detected = 0; 1848 /* should have a magic number written if configuration was set otherwise, use default above */ 1849 LM_SHMEM_READ(pdev, NO_MCP_WA_CFG_SET_ADDR, &val); 1850 if (val == NO_MCP_WA_CFG_SET_MAGIC) 1851 { 1852 LM_SHMEM_READ(pdev, NO_MCP_WA_FORCE_5710, &val); 1853 LM_SHMEM_READ(pdev, NO_MCP_WA_MULTI_VNIC_MODE, &val); 1854 mf_info->multi_vnics_mode = (u8_t)val; 1855 if (mf_info->multi_vnics_mode) 1856 { 1857 LM_SHMEM_READ(pdev, NO_MCP_WA_OVLAN(ABS_FUNC_ID(pdev)), &val); 1858 mf_info->ext_id = (u16_t)val; 1859 1860 mf_info->multi_vnics_mode = VALID_OVLAN(mf_info->ext_id)? 1 : 0; 1861 mf_info->path_has_ovlan = mf_info->multi_vnics_mode; 1862 1863 /* decide on path multi vnics mode - incase we're not in mf mode...and in 4-port-mode good enough to check vnic-0 of the other port, on the same path */ 1864 if ((CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) && !mf_info->multi_vnics_mode) 1865 { 1866 u8_t other_port = !PORT_ID(pdev); 1867 u8_t abs_func_on_other_port = PATH_ID(pdev) + 2*other_port; 1868 LM_SHMEM_READ(pdev, NO_MCP_WA_OVLAN(abs_func_on_other_port), &val); 1869 1870 mf_info->path_has_ovlan = VALID_OVLAN((u16_t)val) ? 1 : 0; 1871 } 1872 1873 /* For simplicity, we leave vnics_per_port to be 2, for resource splitting issues... */ 1874 if (mf_info->path_has_ovlan) 1875 { 1876 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 1877 { 1878 mf_info->vnics_per_port = 2; 1879 } 1880 else 1881 { 1882 mf_info->vnics_per_port = 4; 1883 } 1884 } 1885 1886 /* If we're multi-vnic, we'll set a default mf_mode of switch-dependent, this could be overriden 1887 * later on by registry */ 1888 mf_info->mf_mode = MULTI_FUNCTION_SD; 1889 1890 } 1891 lm_status = lm_get_shmem_license_info(pdev); 1892 if (lm_status != LM_STATUS_SUCCESS) 1893 { 1894 return lm_status; 1895 } 1896 } 1897 /* sanity checks on vnic params */ 1898 if (mf_info->multi_vnics_mode) 1899 { 1900 if (!VALID_OVLAN(mf_info->ext_id)) 1901 { 1902 DbgMessage(pdev, FATAL, "Invalid ovlan (0x%x) configured for Func %d. Can't load the function.\n", 1903 mf_info->ext_id, ABS_FUNC_ID(pdev)); 1904 lm_status = LM_STATUS_FAILURE; 1905 } 1906 } 1907 if ((mf_info->vnics_per_port - 1 < VNIC_ID(pdev)) || ( !mf_info->multi_vnics_mode && (VNIC_ID(pdev) > 0))) 1908 { 1909 DbgMessage(pdev, FATAL, "Invalid vnics_per_port (%d) configured for Func %d. Can't load the function.\n", 1910 mf_info->vnics_per_port, ABS_FUNC_ID(pdev)); 1911 lm_status = LM_STATUS_FAILURE; 1912 } 1913 return lm_status; 1914 } 1915 1916 1917 1918 static lm_status_t lm_get_shmem_shared_hw_config(lm_device_t *pdev) 1919 { 1920 u32_t val = 0; 1921 u8_t i = 0; 1922 1923 /* Get the hw config words. */ 1924 LM_SHMEM_READ(pdev, OFFSETOF(shmem_region_t, dev_info.shared_hw_config.config),&val); 1925 pdev->hw_info.nvm_hw_config = val; 1926 pdev->params.link.hw_led_mode = ((pdev->hw_info.nvm_hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); 1927 DbgMessage(pdev, INFORMi, "nvm_hw_config %d\n",val); 1928 1929 LM_SHMEM_READ(pdev, 1930 OFFSETOF(shmem_region_t, dev_info.shared_hw_config.config2),&val); 1931 pdev->hw_info.nvm_hw_config2 = val; 1932 DbgMessage(pdev, INFORMi, "nvm_hw_configs %d\n",val); 1933 1934 //board_sn; 1935 LM_SHMEM_READ(pdev, 1936 OFFSETOF(shmem_region_t, dev_info.shared_hw_config.part_num),&val); 1937 pdev->hw_info.board_num[0] = (u8_t) val; 1938 pdev->hw_info.board_num[1] = (u8_t) (val >> 8); 1939 pdev->hw_info.board_num[2] = (u8_t) (val >> 16); 1940 pdev->hw_info.board_num[3] = (u8_t) (val >> 24); 1941 1942 LM_SHMEM_READ(pdev, 1943 OFFSETOF(shmem_region_t, dev_info.shared_hw_config.part_num)+4,&val); 1944 pdev->hw_info.board_num[4] = (u8_t) val; 1945 pdev->hw_info.board_num[5] = (u8_t) (val >> 8); 1946 pdev->hw_info.board_num[6] = (u8_t) (val >> 16); 1947 pdev->hw_info.board_num[7] = (u8_t) (val >> 24); 1948 1949 LM_SHMEM_READ(pdev, 1950 OFFSETOF(shmem_region_t, dev_info.shared_hw_config.part_num)+8,&val); 1951 pdev->hw_info.board_num[8] = (u8_t) val; 1952 pdev->hw_info.board_num[9] = (u8_t) (val >> 8); 1953 pdev->hw_info.board_num[10] =(u8_t) (val >> 16); 1954 pdev->hw_info.board_num[11] =(u8_t) (val >> 24); 1955 1956 LM_SHMEM_READ(pdev, 1957 OFFSETOF(shmem_region_t, dev_info.shared_hw_config.part_num)+12,&val); 1958 pdev->hw_info.board_num[12] = (u8_t) val; 1959 pdev->hw_info.board_num[13] = (u8_t) (val >> 8); 1960 pdev->hw_info.board_num[14] = (u8_t) (val >> 16); 1961 pdev->hw_info.board_num[15] = (u8_t) (val >> 24); 1962 DbgMessage(pdev, INFORMi, "board_sn: "); 1963 for (i = 0 ; i < 16 ; i++ ) 1964 { 1965 DbgMessage(pdev, INFORMi, "%02x",pdev->hw_info.board_num[i]); 1966 } 1967 DbgMessage(pdev, INFORMi, "\n"); 1968 1969 /* Get the override preemphasis flag */ 1970 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.shared_feature_config.config),&val); 1971 if GET_FLAGS(val, SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 1972 { 1973 SET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED); 1974 } 1975 else 1976 { 1977 RESET_FLAGS(pdev->params.link.feature_config_flags,ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED); 1978 } 1979 #ifdef EDIAG 1980 /* Diag doesn't support remote fault detection */ 1981 SET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET); 1982 /* Only Diag supports IEEE PHY testing */ 1983 SET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_IEEE_PHY_TEST); 1984 #endif 1985 return LM_STATUS_SUCCESS; 1986 } 1987 1988 static u32_t lm_get_shmem_mf_cfg_base(lm_device_t *pdev) 1989 { 1990 u32_t shmem2_size; 1991 u32_t offset; 1992 u32_t mf_cfg_offset_value; 1993 1994 offset = pdev->hw_info.shmem_base + OFFSETOF(shmem_region_t, func_mb) + E1H_FUNC_MAX * sizeof(struct drv_func_mb); 1995 if (pdev->hw_info.shmem_base2 != 0) 1996 { 1997 LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t,size), &shmem2_size); 1998 if (shmem2_size > OFFSETOF(shmem2_region_t,mf_cfg_addr)) 1999 { 2000 LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t,mf_cfg_addr), &mf_cfg_offset_value); 2001 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) 2002 { 2003 offset = mf_cfg_offset_value; 2004 } 2005 } 2006 } 2007 return offset; 2008 } 2009 2010 static lm_status_t lm_get_shmem_port_hw_config(lm_device_t *pdev) 2011 { 2012 u32_t val; 2013 const u8_t port = PORT_ID(pdev); 2014 2015 /* mba features*/ 2016 LM_SHMEM_READ(pdev, 2017 OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].mba_config), 2018 &val); 2019 pdev->hw_info.mba_features = (val & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK); 2020 DbgMessage(pdev, INFORMi, "mba_features %d\n",pdev->hw_info.mba_features); 2021 /* mba_vlan_cfg */ 2022 LM_SHMEM_READ(pdev, 2023 OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].mba_vlan_cfg), 2024 &val); 2025 pdev->hw_info.mba_vlan_cfg = val ; 2026 DbgMessage(pdev, INFORMi, "mba_vlan_cfg 0x%x\n",pdev->hw_info.mba_vlan_cfg); 2027 2028 // port_feature_config bits 2029 LM_SHMEM_READ(pdev, 2030 OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].config), 2031 &val); 2032 pdev->hw_info.port_feature_config = val; 2033 DbgMessage(pdev, INFORMi, "port_feature_config 0x%x\n",pdev->hw_info.port_feature_config); 2034 2035 #ifndef DOS 2036 /* AutogrEEEn settings */ 2037 if(val & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 2038 SET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED); 2039 } else { 2040 RESET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED); 2041 } 2042 #endif 2043 /* clc params*/ 2044 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_hw_config[port].speed_capability_mask),&val); 2045 pdev->params.link.speed_cap_mask[0] = val & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 2046 DbgMessage(pdev, INFORMi, "speed_cap_mask1 %d\n",val); 2047 2048 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_hw_config[port].speed_capability_mask2),&val); 2049 pdev->params.link.speed_cap_mask[1] = val & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 2050 DbgMessage(pdev, INFORMi, "speed_cap_mask2 %d\n",val); 2051 2052 /* Get lane swap*/ 2053 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_hw_config[port].lane_config),&val); 2054 pdev->params.link.lane_config = val; 2055 DbgMessage(pdev, INFORMi, "lane_config %d\n",val); 2056 2057 /*link config */ 2058 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].link_config),&val); 2059 pdev->hw_info.link_config[ELINK_INT_PHY] = val; 2060 pdev->params.link.switch_cfg = val & PORT_FEATURE_CONNECTED_SWITCH_MASK; 2061 DbgMessage(pdev, INFORMi, "link config %d\n",val); 2062 2063 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].link_config2),&val); 2064 pdev->hw_info.link_config[ELINK_EXT_PHY1] = val; 2065 2066 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_hw_config[port].multi_phy_config),&val); 2067 /* set the initial value to the link params */ 2068 pdev->params.link.multi_phy_config = val; 2069 /* save the initial value if we'll want to restore it later */ 2070 pdev->hw_info.multi_phy_config = val; 2071 /* check if 10g KR is blocked on this session */ 2072 pdev->hw_info.no_10g_kr = FALSE ; 2073 2074 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_hw_config[port].default_cfg),&val); 2075 pdev->hw_info.phy_force_kr_enabler = (val & PORT_HW_CFG_FORCE_KR_ENABLER_MASK) ; 2076 2077 /* If the force KR enabler is on, 10G/20G should have been enabled in the 2078 * nvram as well. If 10G/20G capbility is not set, it means that the MFW 2079 * disabled it and we should set the no_10g_kr flag */ 2080 if(( PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED != pdev->hw_info.phy_force_kr_enabler ) && 2081 ( FALSE == ( pdev->params.link.speed_cap_mask[0] & (PORT_HW_CFG_SPEED_CAPABILITY_D0_10G | PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))) ) 2082 { 2083 pdev->hw_info.no_10g_kr = TRUE ; 2084 } 2085 2086 /* read EEE mode from shmem (original source is NVRAM) */ 2087 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].eee_power_mode),&val); 2088 pdev->params.link.eee_mode = val & PORT_FEAT_CFG_EEE_POWER_MODE_MASK; 2089 DbgMessage(pdev, INFORMi, "eee_power_mode 0x%x\n", pdev->params.link.eee_mode); 2090 2091 if ((pdev->params.link.eee_mode & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) 2092 { 2093 SET_FLAGS(pdev->params.link.eee_mode, 2094 ELINK_EEE_MODE_ENABLE_LPI | 2095 ELINK_EEE_MODE_ADV_LPI); 2096 } 2097 2098 return LM_STATUS_SUCCESS; 2099 } 2100 2101 /* Check if other path is in multi_function_mode */ 2102 static void lm_set_path_has_ovlan(lm_device_t *pdev) 2103 { 2104 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 2105 u32_t val = 0; 2106 2107 mf_info->path_has_ovlan = FALSE; 2108 2109 if (mf_info->mf_mode == MULTI_FUNCTION_SD) 2110 { 2111 mf_info->path_has_ovlan = TRUE; 2112 } 2113 else if (mf_info->mf_mode == SINGLE_FUNCTION) 2114 { 2115 /* decide on path multi vnics mode - incase we're not in mf mode...and in 4-port-mode good enough to check vnic-0 of the other port, on the same path */ 2116 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 2117 { 2118 u8_t other_port = !PORT_ID(pdev); 2119 u8_t abs_func_on_other_port = PATH_ID(pdev) + 2*other_port; 2120 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[abs_func_on_other_port].e1hov_tag),&val); 2121 2122 mf_info->path_has_ovlan = VALID_OVLAN((u16_t)val) ? 1 : 0; 2123 } 2124 } 2125 } 2126 2127 /** 2128 * @Description 2129 * Initializes mf mode and data, checks that mf info is valid 2130 * by checking that MAC address must be legal (check only upper 2131 * bytes) for Switch-Independent mode; 2132 * OVLAN must be legal for Switch-Dependent mode 2133 * 2134 * @param pdev 2135 * 2136 * @return lm_status_t 2137 */ 2138 lm_status_t lm_get_shmem_mf_cfg_info(lm_device_t *pdev) 2139 { 2140 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 2141 u32_t val = 0; 2142 u32_t val2 = 0; 2143 u32_t mac_upper = 0; 2144 lm_status_t status = LM_STATUS_SUCCESS; 2145 2146 /* Set some mf_info defaults */ 2147 mf_info->vnics_per_port = 1; 2148 mf_info->multi_vnics_mode = FALSE; 2149 mf_info->path_has_ovlan = FALSE; 2150 mf_info->mf_mode = SINGLE_FUNCTION; 2151 pdev->params.mf_proto_support_flags = 0; 2152 2153 2154 /* Get the multi-function-mode value (switch dependent / independent / single-function ) */ 2155 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.shared_feature_config.config),&val); 2156 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 2157 2158 switch (val) 2159 { 2160 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 2161 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].mac_upper),&mac_upper); 2162 /* check for legal mac (upper bytes)*/ 2163 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) 2164 { 2165 mf_info->mf_mode = MULTI_FUNCTION_SI; 2166 } 2167 else 2168 { 2169 DbgMessage(pdev, WARNi, "Illegal configuration for switch independent mode\n"); 2170 } 2171 DbgBreakIf(CHIP_IS_E1x(pdev)); 2172 break; 2173 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 2174 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 2175 /* get OV configuration */ 2176 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].e1hov_tag),&val); 2177 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 2178 2179 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 2180 { 2181 mf_info->mf_mode = MULTI_FUNCTION_SD; 2182 mf_info->sd_mode = SD_REGULAR_MODE; 2183 } 2184 else 2185 { 2186 DbgMessage(pdev, WARNi, "Illegal configuration for switch dependent mode\n"); 2187 } 2188 break; 2189 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 2190 /* We're not in multi-function mode - return with vnics_per_port=1 & multi_vnics_mode = FALSE*/ 2191 return LM_STATUS_SUCCESS; 2192 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 2193 /* mark mf mode as NIV if MCP version includes NPAR-SD support 2194 and the MAC address is valid. 2195 */ 2196 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].mac_upper),&mac_upper); 2197 if ((LM_SHMEM2_HAS(pdev, afex_driver_support)) && 2198 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) ) 2199 { 2200 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 2201 } 2202 else 2203 { 2204 DbgMessage(pdev, WARNi, "Illegal configuration for NPAR-SD mode\n"); 2205 } 2206 DbgBreakIf(CHIP_IS_E1x(pdev)); 2207 break; 2208 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: 2209 mf_info->mf_mode = MULTI_FUNCTION_SD; 2210 mf_info->sd_mode = SD_BD_MODE; 2211 DbgMessage(pdev, WARN, "lm_get_shmem_info: SF_MODE_BD_MODE is detected.\n"); 2212 break; 2213 2214 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: 2215 mf_info->mf_mode = MULTI_FUNCTION_SD; 2216 mf_info->sd_mode = SD_UFP_MODE; 2217 DbgMessage(pdev, WARN, "lm_get_shmem_info: SF_MODE_UFP_MODE is detected.\n"); 2218 break; 2219 2220 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: 2221 /* Get extended mf mode value */ 2222 LM_SHMEM_READ(pdev, OFFSETOF(shmem_region_t, dev_info.shared_hw_config.config_3),&val); 2223 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; 2224 switch (val2) 2225 { 2226 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: 2227 mf_info->mf_mode = MULTI_FUNCTION_SI; 2228 break; 2229 2230 default: 2231 DbgBreakMsg(" Unknown extended mf mode\n"); 2232 return LM_STATUS_FAILURE; 2233 } 2234 break; 2235 2236 default: 2237 DbgBreakMsg(" Unknown mf mode\n"); 2238 return LM_STATUS_FAILURE; 2239 } 2240 2241 /* Set path mf_mode (which could be different than function mf_mode) */ 2242 lm_set_path_has_ovlan(pdev); 2243 2244 /* Invalid Multi function configuration: */ 2245 if (mf_info->mf_mode == SINGLE_FUNCTION) 2246 { 2247 if (VNIC_ID(pdev) >= 1) 2248 { 2249 return LM_STATUS_FAILURE; 2250 } 2251 return LM_STATUS_SUCCESS; 2252 } 2253 2254 /* Get the multi-function configuration */ 2255 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].config),&val); 2256 mf_info->func_mf_cfg = val; 2257 2258 switch(mf_info->mf_mode) 2259 { 2260 case MULTI_FUNCTION_SD: 2261 { 2262 switch (mf_info->sd_mode) 2263 { 2264 case SD_REGULAR_MODE: 2265 status = lm_get_shmem_mf_cfg_info_sd(pdev); 2266 break; 2267 case SD_UFP_MODE: 2268 status = lm_get_shmem_mf_cfg_info_sd_ufp(pdev); 2269 break; 2270 case SD_BD_MODE: 2271 status = lm_get_shmem_mf_cfg_info_sd_bd(pdev); 2272 break; 2273 default: 2274 DbgBreak(); 2275 } 2276 2277 if(status != LM_STATUS_SUCCESS) 2278 return status; 2279 } 2280 break; 2281 case MULTI_FUNCTION_SI: 2282 { 2283 lm_get_shmem_mf_cfg_info_si(pdev); 2284 } 2285 break; 2286 case MULTI_FUNCTION_AFEX: 2287 { 2288 lm_get_shmem_mf_cfg_info_niv(pdev); 2289 } 2290 break; 2291 default: 2292 { 2293 DbgBreakIfAll(TRUE); 2294 return LM_STATUS_FAILURE; 2295 } 2296 } 2297 2298 lm_cmng_get_shmem_info(pdev); 2299 2300 return lm_check_valid_mf_cfg(pdev); 2301 } 2302 2303 static void lm_fcoe_set_default_wwns(lm_device_t *pdev) 2304 { 2305 /* create default wwns from fcoe mac adress */ 2306 mm_memcpy(&(pdev->hw_info.fcoe_wwn_port_name[2]), pdev->hw_info.fcoe_mac_addr, 6); 2307 pdev->hw_info.fcoe_wwn_port_name[0] = 0x20; 2308 pdev->hw_info.fcoe_wwn_port_name[1] = 0; 2309 mm_memcpy(&(pdev->hw_info.fcoe_wwn_node_name[2]), pdev->hw_info.fcoe_mac_addr, 6); 2310 pdev->hw_info.fcoe_wwn_node_name[0] = 0x10; 2311 pdev->hw_info.fcoe_wwn_node_name[1] = 0; 2312 } 2313 2314 static lm_status_t lm_get_shmem_mf_mac_info(lm_device_t *pdev) 2315 { 2316 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info; 2317 u32_t mac_upper = 0; 2318 u32_t mac_lower = 0; 2319 2320 if (mf_info->mf_mode == SINGLE_FUNCTION) 2321 { 2322 return LM_STATUS_FAILURE; 2323 } 2324 2325 /* Get the permanent L2 MAC address. */ 2326 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].mac_upper),&mac_upper); 2327 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].mac_lower),&mac_lower); 2328 2329 2330 /* Mac validity is assumed since we already checked it to determine mf_mode. And we assume mf_mode 2331 * is configured correctly when we enter this function. */ 2332 SET_FLAGS(mf_info->flags,MF_INFO_VALID_MAC); 2333 _copy_mac_upper_lower_to_arr(mac_upper, mac_lower, pdev->hw_info.mac_addr); 2334 2335 /* Set iSCSI / FCOE Mac addresses */ 2336 switch (mf_info->mf_mode) 2337 { 2338 case MULTI_FUNCTION_SD: 2339 { 2340 // in E1x the ext mac doesn't exists and will cause MCP parity error CQ67469 2341 if ( CHIP_IS_E1x(pdev) || IS_SD_UFP_MODE(pdev) || IS_SD_BD_MODE(pdev)) 2342 { 2343 /* Set all iscsi and fcoe mac addresses the same as network. */ 2344 mm_memcpy(pdev->hw_info.iscsi_mac_addr, pdev->hw_info.mac_addr, 6); 2345 mm_memcpy(pdev->hw_info.fcoe_mac_addr, pdev->hw_info.mac_addr, 6); 2346 break; 2347 } 2348 } 2349 case MULTI_FUNCTION_SI: 2350 case MULTI_FUNCTION_AFEX: 2351 lm_get_shmem_ext_mac_addresses(pdev); 2352 break; 2353 } 2354 2355 return LM_STATUS_SUCCESS; 2356 } 2357 2358 static lm_status_t lm_get_shmem_sf_mac_info(lm_device_t *pdev) 2359 { 2360 u32_t val = 0; 2361 u32_t val2 = 0; 2362 2363 LM_SHMEM_READ(pdev, 2364 OFFSETOF(shmem_region_t, dev_info.port_hw_config[PORT_ID(pdev)].mac_upper),&val); 2365 LM_SHMEM_READ(pdev, 2366 OFFSETOF(shmem_region_t, dev_info.port_hw_config[PORT_ID(pdev)].mac_lower),&val2); 2367 _copy_mac_upper_lower_to_arr(val, val2, pdev->hw_info.mac_addr); 2368 2369 /* Get iSCSI MAC address. */ 2370 LM_SHMEM_READ(pdev, 2371 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].iscsi_mac_upper),&val); 2372 LM_SHMEM_READ(pdev, 2373 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].iscsi_mac_lower),&val2); 2374 _copy_mac_upper_lower_to_arr(val, val2, pdev->hw_info.iscsi_mac_addr); 2375 2376 /* Get FCoE MAC addresses. */ 2377 LM_SHMEM_READ(pdev, 2378 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_fip_mac_upper),&val); 2379 LM_SHMEM_READ(pdev, 2380 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_fip_mac_lower),&val2); 2381 _copy_mac_upper_lower_to_arr(val, val2, pdev->hw_info.fcoe_mac_addr); 2382 2383 LM_SHMEM_READ(pdev, 2384 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_wwn_port_name_upper),&val); 2385 LM_SHMEM_READ(pdev, 2386 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_wwn_port_name_lower),&val2); 2387 _copy_mac_upper_lower_to_arr(val, val2, &(pdev->hw_info.fcoe_wwn_port_name[2])); 2388 pdev->hw_info.fcoe_wwn_port_name[0] = (u8_t) (val >> 24); 2389 pdev->hw_info.fcoe_wwn_port_name[1] = (u8_t) (val >> 16); 2390 2391 LM_SHMEM_READ(pdev, 2392 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_wwn_node_name_upper),&val); 2393 LM_SHMEM_READ(pdev, 2394 OFFSETOF(shmem_region_t,dev_info.port_hw_config[PORT_ID(pdev)].fcoe_wwn_node_name_lower),&val2); 2395 _copy_mac_upper_lower_to_arr(val, val2, &(pdev->hw_info.fcoe_wwn_node_name[2])); 2396 pdev->hw_info.fcoe_wwn_node_name[0] = (u8_t) (val >> 24); 2397 pdev->hw_info.fcoe_wwn_node_name[1] = (u8_t) (val >> 16); 2398 2399 DbgMessage(pdev, INFORMi, "main mac addr: %02x %02x %02x %02x %02x %02x\n", 2400 pdev->hw_info.mac_addr[0], 2401 pdev->hw_info.mac_addr[1], 2402 pdev->hw_info.mac_addr[2], 2403 pdev->hw_info.mac_addr[3], 2404 pdev->hw_info.mac_addr[4], 2405 pdev->hw_info.mac_addr[5]); 2406 DbgMessage(pdev, INFORMi, "iSCSI mac addr: %02x %02x %02x %02x %02x %02x\n", 2407 pdev->hw_info.iscsi_mac_addr[0], 2408 pdev->hw_info.iscsi_mac_addr[1], 2409 pdev->hw_info.iscsi_mac_addr[2], 2410 pdev->hw_info.iscsi_mac_addr[3], 2411 pdev->hw_info.iscsi_mac_addr[4], 2412 pdev->hw_info.iscsi_mac_addr[5]); 2413 2414 return LM_STATUS_SUCCESS; 2415 } 2416 2417 /* Gets the sriov info from shmem of ALL functions and marks if configuration is assymetric */ 2418 static void lm_get_shmem_sf_sriov_info(lm_device_t *pdev) 2419 { 2420 const lm_chip_port_mode_t port_mode = CHIP_PORT_MODE(pdev); 2421 u32_t offset = 0; 2422 u32_t val = 0; 2423 u8_t port_max = (port_mode == LM_CHIP_PORT_MODE_2)? 1 : PORT_MAX; 2424 const u8_t port = PORT_ID(pdev); 2425 u8_t port_idx = 0; 2426 u8_t sriov_enabled = 0xff; 2427 u8_t sriov_disabled = 0xff; 2428 2429 ASSERT_STATIC((FIELD_SIZE(struct shm_dev_info, port_hw_config)/FIELD_SIZE(struct shm_dev_info, port_hw_config[0])) >= max(PORT_MAX,1)); 2430 2431 if (CHIP_IS_E1x(pdev)) 2432 { 2433 pdev->hw_info.sriov_info.shmem_num_vfs_in_pf = 0; 2434 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = FALSE; 2435 2436 return; 2437 } 2438 2439 for (port_idx = 0; port_idx < port_max; port_idx++) 2440 { 2441 offset = OFFSETOF(shmem_region_t,dev_info.port_hw_config[port_idx].pf_allocation); 2442 LM_SHMEM_READ(pdev, offset, &val); 2443 2444 val = (val & PORT_HW_CFG_NUMBER_OF_VFS_MASK) >> PORT_HW_CFG_NUMBER_OF_VFS_SHIFT; 2445 2446 if (0 == val) 2447 { 2448 sriov_disabled = 1; 2449 } 2450 else 2451 { 2452 sriov_enabled = 1; 2453 } 2454 2455 if (port_idx == port) 2456 { 2457 pdev->hw_info.sriov_info.shmem_num_vfs_in_pf = val; 2458 } 2459 } 2460 2461 2462 /* check if assymteric configuration...basically we initialize both params to 0xff, so the only way they can both be 2463 * the same is if one of the ports was enabled and one was disabled... */ 2464 if (sriov_disabled == sriov_enabled) 2465 { 2466 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = TRUE; 2467 } 2468 else 2469 { 2470 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = FALSE; 2471 } 2472 2473 } 2474 2475 static void lm_get_shmem_mf_sriov_info(lm_device_t *pdev) 2476 { 2477 u32_t offset = 0; 2478 u32_t val = 0; 2479 u8_t func = 0; 2480 const u8_t abs_func = ABS_FUNC_ID(pdev); 2481 u8_t abs_func_idx = 0; 2482 u8_t sriov_enabled = 0xff; 2483 u8_t sriov_disabled = 0xff; 2484 2485 ASSERT_STATIC((FIELD_SIZE(struct mf_cfg, func_mf_config) / FIELD_SIZE(struct mf_cfg, func_mf_config[0])) == E2_FUNC_MAX*2); 2486 2487 if (CHIP_IS_E1x(pdev)) 2488 { 2489 pdev->hw_info.sriov_info.shmem_num_vfs_in_pf = 0; 2490 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = FALSE; 2491 2492 return; 2493 } 2494 2495 for (func = 0; func < E2_FUNC_MAX; func++) 2496 { 2497 abs_func_idx = PATH_ID(pdev) + func*2; 2498 2499 offset = OFFSETOF(mf_cfg_t, func_mf_config[abs_func_idx].pf_allocation); 2500 LM_MFCFG_READ(pdev, offset,&val); 2501 val = (val & FUNC_MF_CFG_NUMBER_OF_VFS_MASK) >> FUNC_MF_CFG_NUMBER_OF_VFS_SHIFT; 2502 2503 if (0 == val) 2504 { 2505 sriov_disabled = 1; 2506 } 2507 else 2508 { 2509 sriov_enabled = 1; 2510 } 2511 2512 if (abs_func_idx == abs_func) 2513 { 2514 pdev->hw_info.sriov_info.shmem_num_vfs_in_pf = val; 2515 } 2516 } 2517 2518 2519 /* check if assymteric configuration...basically we initialize both params to 0xff, so the only way they can both be 2520 * the same is if one of the ports was enabled and one was disabled... */ 2521 if (sriov_disabled == sriov_enabled) 2522 { 2523 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = TRUE; 2524 } 2525 else 2526 { 2527 pdev->hw_info.sriov_info.b_pf_asymetric_configuration = FALSE; 2528 } 2529 2530 } 2531 2532 2533 static lm_status_t lm_get_shmem_mac_info(lm_device_t *pdev) 2534 { 2535 lm_status_t lm_status = LM_STATUS_SUCCESS; 2536 2537 if (pdev->hw_info.mf_info.mf_mode == SINGLE_FUNCTION) 2538 { 2539 lm_status = lm_get_shmem_sf_mac_info(pdev); 2540 } 2541 else 2542 { 2543 lm_status = lm_get_shmem_mf_mac_info(pdev); 2544 } 2545 2546 return lm_status; 2547 } 2548 2549 static void lm_get_shmem_sriov_info(lm_device_t *pdev) 2550 { 2551 const u32_t bc_rev = LM_GET_BC_REV_MAJOR(pdev); 2552 2553 if (CHIP_IS_E1x(pdev) || (bc_rev < BC_REV_IE_SRIOV_SUPPORTED)) 2554 { 2555 return; 2556 } 2557 2558 if (pdev->hw_info.mf_info.mf_mode == SINGLE_FUNCTION) 2559 { 2560 lm_get_shmem_sf_sriov_info(pdev); 2561 } 2562 else 2563 { 2564 lm_get_shmem_mf_sriov_info(pdev); 2565 } 2566 } 2567 2568 static void lm_get_shmem_fw_flow_control(lm_device_t *pdev) 2569 { 2570 u32_t func_ext_cfg = 0; 2571 2572 // cq57766 2573 // if this static assert fails consider adding the new mode to the if 2574 // and read the l2_fw_flow_ctrl from the shmem in the new mode also 2575 ASSERT_STATIC(MAX_MF_MODE == 4); 2576 // l2_fw_flow_ctrl is read from the shmem in multi-function mode in E2 and above. 2577 // In all other cases this parameter is read from the registry. 2578 // We read this parameter from the registry in E1.5 multi-function since 57711 boot code does not have the struct func_ext_cfg 2579 if (((pdev->hw_info.mf_info.mf_mode == MULTI_FUNCTION_SI) || 2580 (pdev->hw_info.mf_info.mf_mode == MULTI_FUNCTION_AFEX)) && 2581 (!CHIP_IS_E1x(pdev))) 2582 { 2583 LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_ext_config[ABS_FUNC_ID(pdev)].func_cfg), &func_ext_cfg); 2584 if (GET_FLAGS(func_ext_cfg, MACP_FUNC_CFG_PAUSE_ON_HOST_RING)) 2585 { 2586 pdev->params.l2_fw_flow_ctrl = 1; 2587 } 2588 else 2589 { 2590 pdev->params.l2_fw_flow_ctrl = 0; 2591 } 2592 } 2593 } 2594 2595 /** 2596 * @Description 2597 * This function is responsible for reading all the data 2598 * that the driver needs before loading from the shmem. 2599 * 2600 * @param pdev 2601 * 2602 * @return lm_status_t 2603 */ 2604 lm_status_t lm_get_shmem_info(lm_device_t *pdev) 2605 { 2606 lm_status_t lm_status = LM_STATUS_SUCCESS; 2607 u32_t val = 0; 2608 2609 lm_shmem_set_default(pdev); 2610 2611 val = lm_get_shmem_base_addr(pdev); 2612 if (!val) 2613 { 2614 DbgMessage(pdev, WARNi, "NO MCP\n"); 2615 return lm_get_shmem_info_no_mcp_bypass(pdev); 2616 } 2617 2618 pdev->hw_info.mcp_detected = 1; 2619 pdev->hw_info.shmem_base = val; 2620 2621 pdev->hw_info.shmem_base2 = REG_RD(pdev, PATH_ID(pdev) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0); 2622 pdev->hw_info.mf_cfg_base = lm_get_shmem_mf_cfg_base(pdev); 2623 2624 DbgMessage(pdev, WARNi, "MCP Up Detected. shmem_base=0x%x shmem_base2=0x%x mf_cfg_offset=0x%x\n", 2625 pdev->hw_info.shmem_base, pdev->hw_info.shmem_base2, pdev->hw_info.mf_cfg_base); 2626 2627 lm_status = lm_verify_validity_map( pdev ); 2628 if(LM_STATUS_SUCCESS != lm_status ) 2629 { 2630 DbgMessage(pdev, FATAL, "lm_get_shmem_info: Shmem signature not present.\n"); 2631 pdev->hw_info.mcp_detected = 0; 2632 return LM_STATUS_SUCCESS; 2633 } 2634 2635 /* bc rev */ 2636 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t,dev_info.bc_rev),&val); 2637 pdev->hw_info.bc_rev = val; 2638 DbgMessage(pdev, INFORMi, "bc_rev %d\n",val); 2639 2640 lm_status = lm_get_shmem_shared_hw_config(pdev); 2641 if (lm_status != LM_STATUS_SUCCESS) 2642 { 2643 DbgMessage(pdev, WARNi, "lm_get_shmem_shared_hw_config returned lm_status=%d\n", lm_status); 2644 return lm_status; 2645 } 2646 2647 lm_status = lm_get_shmem_port_hw_config(pdev); 2648 if (lm_status != LM_STATUS_SUCCESS) 2649 { 2650 DbgMessage(pdev, WARNi, "lm_get_shmem_port_hw_config returned lm_status=%d\n", lm_status); 2651 return lm_status; 2652 } 2653 2654 /* Check License for toe/rdma/iscsi */ 2655 #ifdef _LICENSE_H 2656 lm_status = lm_get_shmem_license_info(pdev); 2657 if (lm_status != LM_STATUS_SUCCESS) 2658 { 2659 DbgMessage(pdev, WARNi, "lm_get_shmem_license_info returned lm_status=%d\n", lm_status); 2660 return lm_status; 2661 } 2662 #endif 2663 /* get mf config parameters */ 2664 if (IS_MF_MODE_CAPABLE(pdev) && (pdev->hw_info.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE)) 2665 { 2666 lm_status = lm_get_shmem_mf_cfg_info(pdev); 2667 if (lm_status != LM_STATUS_SUCCESS) 2668 { 2669 DbgMessage(pdev, WARNi, "lm_get_shmem_mf_cfg_info returned lm_status=%d\n", lm_status); 2670 return lm_status; 2671 } 2672 } 2673 else if (FUNC_ID(pdev) != PORT_ID(pdev)) 2674 { 2675 DbgMessage(pdev, WARNi, "Illegal to load func %d of port %d on non MF mode capable device\n"); 2676 return LM_STATUS_FAILURE; 2677 } 2678 2679 lm_get_shmem_sriov_info(pdev); 2680 2681 lm_status = lm_get_shmem_mac_info(pdev); 2682 2683 lm_get_shmem_fw_flow_control(pdev); 2684 2685 return lm_status; 2686 } 2687 2688 void init_link_params(lm_device_t *pdev) 2689 { 2690 u32_t val = 0; 2691 u32_t feat_val = 0; 2692 const u8_t port = PORT_ID(pdev); 2693 2694 pdev->params.link.port = port; 2695 pdev->params.link.lfa_base = 0; 2696 pdev->params.link.shmem_base = NO_MCP_WA_CLC_SHMEM; 2697 pdev->params.link.shmem2_base= NO_MCP_WA_CLC_SHMEM; 2698 2699 if (pdev->hw_info.mcp_detected) 2700 { 2701 pdev->params.link.shmem_base = pdev->hw_info.shmem_base; 2702 pdev->params.link.shmem2_base= pdev->hw_info.shmem_base2; 2703 2704 // Only if LFA is supported in MFW 2705 if (LM_SHMEM2_HAS(pdev,lfa_host_addr[port])) 2706 { 2707 LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t, lfa_host_addr[port]), &pdev->params.link.lfa_base); 2708 } 2709 } 2710 2711 pdev->params.link.chip_id = pdev->hw_info.chip_id; 2712 pdev->params.link.cb = pdev; 2713 2714 ///TODO remove - the initialization in lm_mcp_cmd_init should be enough, but BC versions are still in flux. 2715 if(pdev->hw_info.mf_info.mf_mode == MULTI_FUNCTION_AFEX) //we can't use IS_MF_NIV_MODE because params.mf_mode is not initalized yet. 2716 { 2717 SET_FLAGS( pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX ); 2718 } 2719 2720 if (CHIP_REV_IS_SLOW(pdev)) 2721 { 2722 val = CHIP_BONDING(pdev); 2723 DbgMessage(pdev, WARN, "init_link_params: chip bond id is 0x%x\n",val); 2724 2725 if (pdev->hw_info.chip_port_mode == LM_CHIP_PORT_MODE_4) 2726 { 2727 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 2728 } 2729 else if (val & 0x4) 2730 { 2731 // force to work with emac 2732 if (CHIP_IS_E3(pdev)) 2733 { 2734 pdev->params.link.req_line_speed[0] = ELINK_SPEED_1000; 2735 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 2736 } 2737 else 2738 { 2739 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 2740 } 2741 } 2742 else if (val & 0x8) 2743 { 2744 if (CHIP_IS_E3(pdev)) 2745 { 2746 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 2747 } 2748 else 2749 { 2750 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 2751 } 2752 } 2753 /* Disable EMAC for E3 and above */ 2754 if (val & 2) 2755 { 2756 feat_val |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 2757 } 2758 2759 SET_FLAGS(pdev->params.link.feature_config_flags, feat_val); 2760 } 2761 } 2762 2763 /** lm_init_cam_params 2764 * set cam/mac parameters 2765 * 2766 * cam mapping is dynamic, we only set sizes... 2767 * 2768 */ 2769 static void lm_init_cam_params(lm_device_t *pdev) 2770 { 2771 /* FIXME: remove once constants are in hsi file */ 2772 #define LM_CAM_SIZE_EMUL (5) /*5 per vnic also in single function mode (real cam size on emulation is 20 per port) */ 2773 #define LM_MC_TABLE_SIZE_EMUL (1) 2774 #define LM_CAM_SIZE_EMUL_E2 (40) 2775 2776 u16_t mc_credit; 2777 u16_t uc_credit; 2778 u8_t b_is_asic = CHIP_REV_IS_ASIC(pdev); 2779 u8_t num_ports = 2; 2780 u8_t num_funcs; 2781 2782 /* set CAM parameters according to EMUL/FPGA or ASIC + Chip*/ 2783 mm_mem_zero(pdev->params.uc_table_size, sizeof(pdev->params.uc_table_size)); 2784 mm_mem_zero(pdev->params.mc_table_size, sizeof(pdev->params.mc_table_size)); 2785 2786 if (CHIP_IS_E1(pdev)) 2787 { 2788 pdev->params.cam_size = b_is_asic? MAX_MAC_CREDIT_E1 / num_ports : LM_CAM_SIZE_EMUL; 2789 2790 mc_credit = b_is_asic? LM_MC_NDIS_TABLE_SIZE : LM_MC_TABLE_SIZE_EMUL; 2791 uc_credit = pdev->params.cam_size - mc_credit; /* E1 multicast is in CAM */ 2792 2793 /* init unicast table entires */ 2794 pdev->params.uc_table_size[LM_CLI_IDX_ISCSI] = 1; 2795 pdev->params.uc_table_size[LM_CLI_IDX_NDIS] = uc_credit - 1; /* - one for iscsi... */ 2796 2797 /* init multicast table entires */ 2798 pdev->params.mc_table_size[LM_CLI_IDX_NDIS] = mc_credit; 2799 2800 DbgMessage(pdev, INFORMi, "uc_table_size[ndis]=%d, uc_table_size[ndis]=%d, mc_table_size[ndis]=%d\n", 2801 pdev->params.uc_table_size[LM_CLI_IDX_NDIS], pdev->params.uc_table_size[LM_CLI_IDX_ISCSI], 2802 pdev->params.mc_table_size[LM_CLI_IDX_NDIS]); 2803 2804 } 2805 else if (CHIP_IS_E1H(pdev)) 2806 { 2807 pdev->params.cam_size = b_is_asic? MAX_MAC_CREDIT_E1H / num_ports: LM_CAM_SIZE_EMUL; 2808 pdev->params.cam_size = pdev->params.cam_size / pdev->params.vnics_per_port; 2809 uc_credit = pdev->params.cam_size; 2810 2811 /* init unicast table entires */ 2812 pdev->params.uc_table_size[LM_CLI_IDX_ISCSI] = 1; 2813 pdev->params.uc_table_size[LM_CLI_IDX_NDIS] = uc_credit - 1; /* - one for iscsi... */ 2814 2815 /* init multicast table entires */ 2816 pdev->params.mc_table_size[LM_CLI_IDX_NDIS] = LM_MC_NDIS_TABLE_SIZE; 2817 2818 DbgMessage(pdev, INFORMi, "uc_table_size[ndis]=%d, uc_table_size[ndis]=%d, mc_table_size[ndis]=%d\n", 2819 pdev->params.uc_table_size[LM_CLI_IDX_NDIS], pdev->params.uc_table_size[LM_CLI_IDX_ISCSI], 2820 pdev->params.mc_table_size[LM_CLI_IDX_NDIS]); 2821 } 2822 else if (CHIP_IS_E2E3(pdev)) 2823 { 2824 num_ports = (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)? 2 : 1; 2825 num_funcs = VNICS_PER_PATH(pdev); 2826 if (num_funcs > 1) 2827 { 2828 pdev->params.cam_size = b_is_asic? ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(pdev))/ num_funcs + GET_NUM_VFS_PER_PF(pdev)): LM_CAM_SIZE_EMUL_E2; 2829 } 2830 else 2831 { 2832 pdev->params.cam_size = b_is_asic? MAX_MAC_CREDIT_E2 : LM_CAM_SIZE_EMUL_E2; 2833 } 2834 uc_credit = pdev->params.cam_size; 2835 2836 /* init unicast table entires */ 2837 pdev->params.uc_table_size[LM_CLI_IDX_ISCSI] = 1; 2838 pdev->params.uc_table_size[LM_CLI_IDX_FCOE] = 1; 2839 pdev->params.uc_table_size[LM_CLI_IDX_NDIS] = uc_credit - 2; /* - the two above... */ 2840 2841 /* init multicast table entires */ 2842 pdev->params.mc_table_size[LM_CLI_IDX_NDIS] = LM_MC_NDIS_TABLE_SIZE; 2843 pdev->params.mc_table_size[LM_CLI_IDX_FCOE] = LM_MC_FCOE_TABLE_SIZE; 2844 2845 DbgMessage(pdev, INFORMi, "uc_table_size[ndis]=%d, uc_table_size[ndis]=%d, uc_table_size[fcoe]=%d, mc_table_size[ndis]=%d, mc_table_size[fcoe]=%d\n", 2846 pdev->params.uc_table_size[LM_CLI_IDX_NDIS], pdev->params.uc_table_size[LM_CLI_IDX_ISCSI], 2847 pdev->params.uc_table_size[LM_CLI_IDX_FCOE], 2848 pdev->params.mc_table_size[LM_CLI_IDX_NDIS], pdev->params.mc_table_size[LM_CLI_IDX_FCOE]); 2849 } 2850 else 2851 { 2852 DbgBreakIfAll("New Chip?? initialize cam params!\n"); 2853 } 2854 2855 /* override CAM parameters for chips later than E1 */ 2856 if (IS_PFDEV(pdev)) 2857 { 2858 pdev->params.base_offset_in_cam_table = ((num_ports == 2)? FUNC_ID(pdev) : VNIC_ID(pdev)) * LM_CAM_SIZE(pdev); 2859 } 2860 else if (IS_CHANNEL_VFDEV(pdev)) 2861 { 2862 pdev->params.base_offset_in_cam_table = 0; 2863 pdev->params.mc_table_size[LM_CLI_IDX_NDIS] = 0; /* Will be filled later on acquire response (HW_CHANNEL)*/ 2864 } 2865 } 2866 2867 /* 2868 * \brief Initialize pdev->params members 2869 * 2870 * This function initializes the various pdev->params members, depending 2871 * on chip technology/implementation: fpga, emul or asic (default). 2872 * 2873 * The function may also be used to validate these parameters. 2874 * 2875 * \param[in,out] pdev 2876 * \param[in] validate flag to indicate desired operation. 2877 * 2878 * \return success/failure indication 2879 */ 2880 2881 static lm_status_t lm_init_params(lm_device_t *pdev, u8_t validate) 2882 { 2883 typedef struct _param_entry_t 2884 { 2885 /* Ideally, we want to save the address of the parameter here. 2886 * However, some compiler will not allow us to dynamically 2887 * initialize the pointer to a parameter in the table below. 2888 * As an alternative, we will save the offset to the parameter 2889 * from pdev device structure. */ 2890 u32_t offset; 2891 /* Parameter default value. */ 2892 u32_t asic_default; 2893 u32_t fpga_default; 2894 u32_t emulation_default; 2895 /* Limit checking is diabled if min and max are zeros. */ 2896 u32_t min; 2897 u32_t max; 2898 } param_entry_t; 2899 #define _OFFSET(_name) (OFFSETOF(lm_device_t, params._name)) 2900 #define PARAM_VAL(_pdev, _entry) \ 2901 (*((u32_t *) ((u8_t *) (_pdev) + (_entry)->offset))) 2902 #define SET_PARAM_VAL(_pdev, _entry, _val) \ 2903 *((u32_t *) ((u8_t *) (_pdev) + (_entry)->offset)) = (_val) 2904 static param_entry_t param_list[] = 2905 { 2906 /* asic fpga emul 2907 offset default default default min max */ 2908 { _OFFSET(mtu[LM_CLI_IDX_NDIS]), 9216, 9216, 9216, 1500, 9216 }, 2909 { _OFFSET(mtu[LM_CLI_IDX_ISCSI]), 9216, 9216, 9216, 1500, 9216 }, 2910 { _OFFSET(mtu[LM_CLI_IDX_FCOE]), 9216, 9216, 9216, 1500, 9216 }, 2911 // { _OFFSET(mtu[LM_CLI_IDX_RDMA]), LM_MTU_INVALID_VALUE, LM_MTU_INVALID_VALUE, LM_MTU_INVALID_VALUE, LM_MTU_INVALID_VALUE, LM_MTU_INVALID_VALUE }, 2912 { _OFFSET(mtu[LM_CLI_IDX_OOO]), 9216, 9216, 9216, 1500, 9216 }, 2913 { _OFFSET(mtu[LM_CLI_IDX_FWD]), 9216, 9216, 9216, 1500, 9216 }, 2914 { _OFFSET(mtu_max), 9216, 9216, 9216, 1500, 9216 }, 2915 { _OFFSET(rcv_buffer_offset), 0, 0, 0, 0, 9000 }, 2916 { _OFFSET(l2_rx_desc_cnt[LM_CLI_IDX_NDIS]), 200, 200, 200, 0, 32767 }, 2917 { _OFFSET(l2_rx_desc_cnt[LM_CLI_IDX_FCOE]), 200, 200, 200, 0, 32767 }, 2918 { _OFFSET(l2_rx_desc_cnt[LM_CLI_IDX_OOO]), 500, 500, 500, 0, 32767 }, 2919 /* The maximum page count is chosen to prevent us from having 2920 * more than 32767 pending entries at any one time. */ 2921 { _OFFSET(l2_tx_bd_page_cnt[LM_CLI_IDX_NDIS]), 2, 2, 2, 1, 127 }, 2922 { _OFFSET(l2_tx_bd_page_cnt[LM_CLI_IDX_FCOE]), 2, 2, 2, 1, 127 }, 2923 { _OFFSET(l2_tx_coal_buf_cnt[LM_CLI_IDX_NDIS]), 0, 0, 0, 0, 20 }, 2924 { _OFFSET(l2_tx_coal_buf_cnt[LM_CLI_IDX_FCOE]), 0, 0, 0, 0, 20 }, 2925 { _OFFSET(l2_tx_bd_page_cnt[LM_CLI_IDX_FWD]) , 2, 2, 2, 1, 127 }, 2926 /* NirV: still not supported in ediag, being set in the windows mm */ 2927 // { _OFFSET(l2_rx_desc_cnt[LM_CLI_IDX_ISCSI]), 200, 200, 200, 0, 32767 }, 2928 // 2929 // /* The maximum page count is chosen to prevent us from having 2930 // * more than 32767 pending entries at any one time. */ 2931 // { _OFFSET(l2_tx_bd_page_cnt[LM_CLI_IDX_ISCSI]), 2, 2, 2, 1, 127 }, 2932 // { _OFFSET(l2_tx_coal_buf_cnt[LM_CLI_IDX_ISCSI]), 0, 0, 0, 0, 20 }, 2933 // { _OFFSET(l2_rx_bd_page_cnt[LM_CLI_IDX_ISCSI]), 1, 1, 1, 1, 127 }, 2934 { _OFFSET(test_mode), 0, 0, 0, 0, 0 }, 2935 { _OFFSET(ofld_cap), 0, 0, 0, 0, 0 }, 2936 { _OFFSET(wol_cap), 0, 0, 0, 0, 0 }, 2937 { _OFFSET(i2c_interval_sec), 0, 0, 0, 0, 1000 }, 2938 { _OFFSET(flow_ctrl_cap), 0, 0, 0, 0, 0x80000000 }, 2939 { _OFFSET(eee_policy), LM_EEE_CONTROL_NVRAM, LM_EEE_CONTROL_NVRAM, LM_EEE_CONTROL_NVRAM, LM_EEE_CONTROL_HIGH, LM_EEE_CONTROL_NVRAM }, // registry values are 0-5 for this 2940 { _OFFSET(req_medium), 0xff00, 0x00ff, 0x00ff, 0, 0xfffff }, 2941 { _OFFSET(interrupt_mode), LM_INT_MODE_INTA, LM_INT_MODE_INTA, LM_INT_MODE_INTA, LM_INT_MODE_INTA, LM_INT_MODE_MIMD}, 2942 { _OFFSET(igu_access_mode), INTR_BLK_ACCESS_IGUMEM, INTR_BLK_ACCESS_IGUMEM, INTR_BLK_ACCESS_IGUMEM, INTR_BLK_ACCESS_GRC, INTR_BLK_ACCESS_IGUMEM}, 2943 { _OFFSET(sw_config), 4, 4, 4, 0, 4}, 2944 { _OFFSET(selective_autoneg), 0, 0, 0, 0, 0 }, 2945 { _OFFSET(autogreeen), LM_AUTOGREEEN_NVRAM, LM_AUTOGREEEN_NVRAM, LM_AUTOGREEEN_NVRAM, LM_AUTOGREEEN_DISABLED, LM_AUTOGREEEN_NVRAM }, 2946 { _OFFSET(wire_speed), 1, 0, 0, 0, 0 }, 2947 { _OFFSET(phy_int_mode), 2, 2, 2, 0, 0 }, 2948 { _OFFSET(link_chng_mode), 2, 2, 2, 0, 0 }, 2949 // TODO add correct values here 2950 { _OFFSET(max_func_connections), 1024, 1024, 1024, 0, 500000}, 2951 #ifdef VF_INVOLVED 2952 { _OFFSET(max_func_toe_cons), 310, 310, 310, 0, 500000}, 2953 #else 2954 { _OFFSET(max_func_toe_cons), 750, 750, 750, 0, 500000}, 2955 #endif 2956 { _OFFSET(max_func_rdma_cons), 10, 10, 10, 0, 500000}, 2957 { _OFFSET(max_func_iscsi_cons), 128, 128, 128, 0, 500000}, 2958 { _OFFSET(max_func_fcoe_cons), 64, 64, 20, 0, 500000}, 2959 { _OFFSET(context_line_size), LM_CONTEXT_SIZE, LM_CONTEXT_SIZE, LM_CONTEXT_SIZE, 0, LM_CONTEXT_SIZE }, 2960 { _OFFSET(context_waste_size), 0, 0, 0, 0, 1024 }, 2961 { _OFFSET(num_context_in_page), 4, 4, 4, 0, 128}, 2962 { _OFFSET(client_page_size), 0x1000, 0x1000, 0x1000,0x1000, 0x20000 }, 2963 { _OFFSET(elt_page_size), 0x1000, 0x1000, 0x1000,0x1000, 0x20000 }, 2964 { _OFFSET(ilt_client_page_size), 0x1000, 0x1000, 0x1000,0x1000, 0x20000 }, 2965 { _OFFSET(cfc_last_lcid), 0xff, 0xff, 0xff, 0x1, 0xff }, 2966 { _OFFSET(override_rss_chain_cnt), 0, 0, 0, 0, 16 }, 2967 // network type and max cwnd 2968 { _OFFSET(network_type), LM_NETOWRK_TYPE_WAN, LM_NETOWRK_TYPE_WAN, LM_NETOWRK_TYPE_WAN,LM_NETOWRK_TYPE_LAN, LM_NETOWRK_TYPE_WAN }, 2969 { _OFFSET(max_cwnd_wan), 12500000, 12500000, 12500000,12500000, 12500000 }, 2970 { _OFFSET(max_cwnd_lan), 1250000 , 1250000, 1250000, 1250000, 1250000 }, 2971 // cid allocation mode 2972 { _OFFSET(cid_allocation_mode), LM_CID_ALLOC_DELAY , LM_CID_ALLOC_DELAY, LM_CID_ALLOC_DELAY,LM_CID_ALLOC_DELAY, LM_CID_ALLOC_NUM_MODES}, 2973 // interrupt coalesing configuration 2974 { _OFFSET(int_coalesing_mode), LM_INT_COAL_PERIODIC_SYNC, LM_INT_COAL_NONE, LM_INT_COAL_NONE, 1, LM_INT_COAL_NUM_MODES }, 2975 { _OFFSET(int_per_sec_rx[0]), 5000, 5000, 5000, 1, 200000 }, 2976 { _OFFSET(int_per_sec_rx[1]), 5000, 5000, 5000, 1, 200000 }, 2977 { _OFFSET(int_per_sec_rx[2]), 5000, 5000, 5000, 1, 200000 }, 2978 { _OFFSET(int_per_sec_rx[3]), 5000, 5000, 5000, 1, 200000 }, 2979 { _OFFSET(int_per_sec_tx[0]), 7500, 7500, 7500, 1, 200000 }, 2980 { _OFFSET(int_per_sec_tx[1]), 3800, 3800, 3800, 1, 200000 }, 2981 { _OFFSET(int_per_sec_tx[2]), 3800, 3800, 3800, 1, 200000 }, 2982 { _OFFSET(int_per_sec_tx[3]), 3800, 3800, 3800, 1, 200000 }, 2983 // VF interrupt coalesing configuration 2984 { _OFFSET(vf_int_per_sec_rx[LM_VF_INT_LOW_IDX]), 5000, 5000, 5000, 1, 200000 }, 2985 { _OFFSET(vf_int_per_sec_rx[LM_VF_INT_MEDIUM_IDX]), 10000, 5000, 5000, 1, 200000 }, 2986 { _OFFSET(vf_int_per_sec_rx[LM_VF_INT_HIGH_IDX]), 20000, 5000, 5000, 1, 200000 }, 2987 { _OFFSET(vf_int_per_sec_tx[LM_VF_INT_LOW_IDX]), 3800, 3800, 3800, 1, 200000 }, 2988 { _OFFSET(vf_int_per_sec_tx[LM_VF_INT_MEDIUM_IDX]), 8000, 3800, 3800, 1, 200000 }, 2989 { _OFFSET(vf_int_per_sec_tx[LM_VF_INT_HIGH_IDX]), 16000, 3800, 3800, 1, 200000 }, 2990 2991 { _OFFSET(enable_dynamic_hc[0]), 1, 1, 1, 0, 1 }, 2992 { _OFFSET(enable_dynamic_hc[1]), 1, 1, 1, 0, 1 }, 2993 { _OFFSET(enable_dynamic_hc[2]), 1, 1, 1, 0, 1 }, 2994 { _OFFSET(enable_dynamic_hc[3]), 0, 0, 0, 0, 1 }, 2995 { _OFFSET(hc_timeout0[SM_RX_ID][0]), 12, 12, 12, 1, 0xff }, /* (20K int/sec assuming no more btr) */ 2996 { _OFFSET(hc_timeout1[SM_RX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 2997 { _OFFSET(hc_timeout2[SM_RX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 2998 { _OFFSET(hc_timeout3[SM_RX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 2999 { _OFFSET(hc_timeout0[SM_RX_ID][1]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3000 { _OFFSET(hc_timeout1[SM_RX_ID][1]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3001 { _OFFSET(hc_timeout2[SM_RX_ID][1]), 120, 120, 120, 1, 0xff }, /* (2K int/sec assuming no more btr) */ 3002 { _OFFSET(hc_timeout3[SM_RX_ID][1]), 240, 240, 240, 1, 0xff }, /* (1K int/sec assuming no more btr) */ 3003 { _OFFSET(hc_timeout0[SM_RX_ID][2]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3004 { _OFFSET(hc_timeout1[SM_RX_ID][2]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3005 { _OFFSET(hc_timeout2[SM_RX_ID][2]), 120, 120, 120, 1, 0xff }, /* (2K int/sec assuming no more btr) */ 3006 { _OFFSET(hc_timeout3[SM_RX_ID][2]), 240, 240, 240, 1, 0xff }, /* (1K int/sec assuming no more btr) */ 3007 { _OFFSET(hc_timeout0[SM_RX_ID][3]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3008 { _OFFSET(hc_timeout1[SM_RX_ID][3]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3009 { _OFFSET(hc_timeout2[SM_RX_ID][3]), 120, 120, 120, 1, 0xff }, /* (2K int/sec assuming no more btr) */ 3010 { _OFFSET(hc_timeout3[SM_RX_ID][3]), 240, 240, 240, 1, 0xff }, /* (1K int/sec assuming no more btr) */ 3011 3012 { _OFFSET(hc_timeout0[SM_TX_ID][0]), 12, 12, 12, 1, 0xff }, /* (20K int/sec assuming no more btr) */ 3013 { _OFFSET(hc_timeout1[SM_TX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3014 { _OFFSET(hc_timeout2[SM_TX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3015 { _OFFSET(hc_timeout3[SM_TX_ID][0]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3016 { _OFFSET(hc_timeout0[SM_TX_ID][1]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3017 { _OFFSET(hc_timeout1[SM_TX_ID][1]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3018 { _OFFSET(hc_timeout2[SM_TX_ID][1]), 120, 120, 120, 1, 0xff }, /* (2K int/sec assuming no more btr) */ 3019 { _OFFSET(hc_timeout3[SM_TX_ID][1]), 240, 240, 240, 1, 0xff }, /* (1K int/sec assuming no more btr) */ 3020 { _OFFSET(hc_timeout0[SM_TX_ID][2]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3021 { _OFFSET(hc_timeout1[SM_TX_ID][2]), 12, 12, 12, 1, 0xff }, /* (20K int/sec assuming no more btr) */ 3022 { _OFFSET(hc_timeout2[SM_TX_ID][2]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3023 { _OFFSET(hc_timeout3[SM_TX_ID][2]), 64, 64, 64, 1, 0xff }, /* (3.75K int/sec assuming no more btr) */ 3024 { _OFFSET(hc_timeout0[SM_TX_ID][3]), 6, 6, 6, 1, 0xff }, /* (40K int/sec assuming no more btr) */ 3025 { _OFFSET(hc_timeout1[SM_TX_ID][3]), 48, 48, 48, 1, 0xff }, /* (5K int/sec assuming no more btr) */ 3026 { _OFFSET(hc_timeout2[SM_TX_ID][3]), 120, 120, 120, 1, 0xff }, /* (2K int/sec assuming no more btr) */ 3027 { _OFFSET(hc_timeout3[SM_TX_ID][3]), 240, 240, 240, 1, 0xff }, /* (1K int/sec assuming no more btr) */ 3028 3029 { _OFFSET(hc_threshold0[SM_RX_ID]), 0x2000, 0x2000, 0x2000,1, 0xffffffff }, 3030 { _OFFSET(hc_threshold1[SM_RX_ID]), 0x10000, 0x10000, 0x10000,1, 0xffffffff }, 3031 { _OFFSET(hc_threshold2[SM_RX_ID]), 0x50000, 0x50000, 0x50000,1, 0xffffffff }, 3032 3033 { _OFFSET(hc_threshold0[SM_TX_ID]), 0x2000, 0x2000, 0x2000,1, 0xffffffff }, 3034 { _OFFSET(hc_threshold1[SM_TX_ID]), 0x10000, 0x10000, 0x10000,1, 0xffffffff }, 3035 { _OFFSET(hc_threshold2[SM_TX_ID]), 0x20000, 0x20000, 0x20000,1, 0xffffffff }, 3036 3037 { _OFFSET(l2_dynamic_hc_min_bytes_per_packet), 0, 0, 0, 0, 0xffff }, 3038 // { _OFFSET(l4_hc_scaling_factor), 12, 12, 12, 0, 16 }, 3039 { _OFFSET(l4_hc_ustorm_thresh), 12, 12, 12, 12, 0xffffffff }, /* 128K */ 3040 // l4 params 3041 { _OFFSET(l4_scq_page_cnt), 2, 2, 2, 2, 127 }, /* 321 BDs are reserved to FW threshold :-( */ 3042 { _OFFSET(l4_rcq_page_cnt), 3, 3, 3, 3, 127 }, /* 398 BDs are reserved to FW threshold :-( CQ_XOFF_TH = ((65*6) + 8) = ((maximum pending incoming msgs) * (maximum completions) + (maximum ramrods)) */ 3043 { _OFFSET(l4_grq_page_cnt), 2, 2, 2, 2, 127 }, /* 65 BDs are reserved to FW threshold :-( */ 3044 { _OFFSET(l4_tx_chain_page_cnt), 2, 2, 2, 2, 127 }, 3045 { _OFFSET(l4_rx_chain_page_cnt), 2, 2, 2, 2, 127 }, 3046 { _OFFSET(l4_gen_buf_size), LM_PAGE_SIZE,LM_PAGE_SIZE,LM_PAGE_SIZE,LM_PAGE_SIZE,16*LM_PAGE_SIZE }, 3047 { _OFFSET(l4_history_cqe_cnt), 20, 20, 20, 1, 20 }, 3048 { _OFFSET(l4_ignore_grq_push_enabled), 0, 0, 0, 0, 1 }, 3049 { _OFFSET(l4cli_flags), 0, 0, 0, 0, 1 }, 3050 { _OFFSET(l4cli_ticks_per_second), 1000, 1000, 1000, 500, 10000 }, 3051 { _OFFSET(l4cli_ack_frequency), 2, 2, 2, 1, 255 }, /* default 2 segments */ 3052 { _OFFSET(l4cli_delayed_ack_ticks), 200, 200, 200, 1, 255 }, /* default 200ms */ 3053 { _OFFSET(l4cli_max_retx), 6, 6, 6, 1, 255 }, 3054 { _OFFSET(l4cli_doubt_reachability_retx),3, 3, 3, 1, 255 }, 3055 { _OFFSET(l4cli_sws_prevention_ticks), 1000, 1000, 1000, 200, 0xffffffff }, /* default 1s */ 3056 { _OFFSET(l4cli_dup_ack_threshold), 3, 3, 3, 1, 255 }, 3057 { _OFFSET(l4cli_push_ticks), 100, 100, 100, 1, 0xffffffff }, /* default 100ms */ 3058 { _OFFSET(l4cli_nce_stale_ticks), 0xffffff,0xffffff,0xffffff, 1, 0xffffffff }, 3059 { _OFFSET(l4cli_starting_ip_id), 0, 0, 0, 0, 0xffff }, 3060 { _OFFSET(keep_vlan_tag), 1 , 1, 1, 0, 1 }, 3061 //congestion managment parameters 3062 { _OFFSET(cmng_enable), 0, 0, 0, 0, 1}, 3063 { _OFFSET(cmng_rate_shaping_enable),1, 1, 1, 0, 1}, 3064 { _OFFSET(cmng_fairness_enable), 1, 1, 1, 0, 1}, 3065 // safc 3066 { _OFFSET(cmng_safc_rate_thresh), 3, 3, 3, 0, 10}, 3067 { _OFFSET(cmng_activate_safc), 0, 0, 0, 0, 1}, 3068 // fairness 3069 { _OFFSET(cmng_fair_port0_rate), 10, 10, 10, 1, 10}, 3070 { _OFFSET(cmng_eth_weight), 8, 8, 8, 0, 10}, 3071 { _OFFSET(cmng_toe_weight), 8, 8, 8, 0, 10}, 3072 { _OFFSET(cmng_rdma_weight), 8, 8, 8, 0, 10}, 3073 { _OFFSET(cmng_iscsi_weight), 8, 8, 8, 0, 10}, 3074 // rate shaping 3075 { _OFFSET(cmng_eth_rate), 10, 10, 10, 0, 10}, 3076 { _OFFSET(cmng_toe_rate), 10, 10, 10, 0, 10}, 3077 { _OFFSET(cmng_rdma_rate), 2, 2, 2, 0, 10}, 3078 { _OFFSET(cmng_iscsi_rate), 4, 2, 2, 0, 10}, 3079 // Demo will be removed later 3080 { _OFFSET(cmng_toe_con_number), 20, 20, 20, 0, 1024}, 3081 { _OFFSET(cmng_rdma_con_number), 2, 2, 2, 0, 1024}, 3082 { _OFFSET(cmng_iscsi_con_number), 40, 40, 40, 0, 1024}, 3083 // iscsi 3084 { _OFFSET(l5sc_max_pending_tasks), 64, 64, 64, 64, 2048}, 3085 // fcoe 3086 { _OFFSET(max_fcoe_task), 64, 64, 64, 0, 4096}, 3087 #if 0 3088 { _OFFSET(disable_patent_using), 1, 1, 1, 0, 1}, 3089 #else 3090 { _OFFSET(disable_patent_using), 0, 0, 0, 0, 1}, 3091 #endif 3092 { _OFFSET(l4_grq_filling_threshold_divider), 64, 64, 64, 2, 2048}, 3093 { _OFFSET(l4_free_cid_delay_time), 2000, 10000, 10000, 0, 10000}, 3094 { _OFFSET(preemphasis_enable), 0, 0, 0, 0, 1}, 3095 { _OFFSET(preemphasis_rx_0), 0, 0, 0, 0, 0xffff}, 3096 { _OFFSET(preemphasis_rx_1), 0, 0, 0, 0, 0xffff}, 3097 { _OFFSET(preemphasis_rx_2), 0, 0, 0, 0, 0xffff}, 3098 { _OFFSET(preemphasis_rx_3), 0, 0, 0, 0, 0xffff}, 3099 { _OFFSET(preemphasis_tx_0), 0, 0, 0, 0, 0xffff}, 3100 { _OFFSET(preemphasis_tx_1), 0, 0, 0, 0, 0xffff}, 3101 { _OFFSET(preemphasis_tx_2), 0, 0, 0, 0, 0xffff}, 3102 { _OFFSET(preemphasis_tx_3), 0, 0, 0, 0, 0xffff}, 3103 { _OFFSET(disable_pcie_nfr), 0, 0, 0, 0, 1}, 3104 { _OFFSET(debug_cap_flags), 0xffffffff, 0xffffffff, 0xffffffff, 0, 0xffffffff}, 3105 { _OFFSET(try_not_align_page_multiplied_memory), 1, 1, 1, 0, 1}, 3106 { _OFFSET(l4_limit_isles), 0, 0, 0, 0, 1}, 3107 { _OFFSET(l4_max_rcv_wnd_size), 0x100000,0x100000,0x100000, 0, 0x1000000}, 3108 { _OFFSET(ndsb_type), 1, 1, 1, 0, 2}, 3109 { _OFFSET(l4_dominance_threshold), 10, 10, 10, 0, 0xFF}, 3110 { _OFFSET(l4_max_dominance_value), 20, 20, 20, 0, 0xFF}, 3111 { _OFFSET(l4_data_integrity), 0x0, 0x0, 0x0, 0x0, 0x3}, 3112 { _OFFSET(l4_start_port), 5001, 5001, 5001, 0, 0xFFFFFFFF}, 3113 { _OFFSET(l4_num_of_ports), 50, 50, 50, 0, 0xFFFF}, 3114 { _OFFSET(l4_skip_start_bytes), 4, 4, 4, 0, 0xFFFFFFFF}, 3115 { _OFFSET(phy_priority_mode), PHY_PRIORITY_MODE_HW_DEF, PHY_PRIORITY_MODE_HW_DEF, PHY_PRIORITY_MODE_HW_DEF, PHY_PRIORITY_MODE_HW_DEF, PHY_PRIORITY_MODE_HW_PIN}, 3116 { _OFFSET(grc_timeout_max_ignore), 0, 0, 0, 0, 0xFFFFFFFF}, 3117 { _OFFSET(enable_error_recovery), 0, 0, 0, 0, 1}, 3118 { _OFFSET(validate_sq_complete), 0, 0, 0, 0, 1}, 3119 { _OFFSET(npar_vm_switching_enable),0, 0, 0, 0, 1}, 3120 { _OFFSET(flow_control_reporting_mode),LM_FLOW_CONTROL_REPORTING_MODE_DISABLED,LM_FLOW_CONTROL_REPORTING_MODE_DISABLED,LM_FLOW_CONTROL_REPORTING_MODE_DISABLED,LM_FLOW_CONTROL_REPORTING_MODE_DISABLED,LM_FLOW_CONTROL_REPORTING_MODE_ENABLED}, 3121 { _OFFSET(tpa_desc_cnt_per_chain), 0, 0, 0, 0, 0x10000}, 3122 { _OFFSET(sriov_inc_mac), 0, 0, 0, 0, 64}, 3123 { _OFFSET(e3_cos_modes), 0, 0, 0, 0, 1}, 3124 { _OFFSET(e3_network_cos_mode), 0, 0, 0, 0, 1}, 3125 { _OFFSET(fw_valid_mask), 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0, 0xFFFFFFFF}, 3126 { _OFFSET(record_sp), 0x0, 0x0, 0x0, 0, 0xf}, 3127 { 0, 0, 0, 0, 0, 0} 3128 }; // param_list 3129 3130 param_entry_t *param = NULL; 3131 size_t csize = 0; 3132 u32_t flow_control = 0; 3133 u8_t i = 0; 3134 u8_t port_base_aux_qzone = 0; 3135 u8_t base_fw_qzone_id = 0; 3136 DbgMessage(pdev, INFORMi , "### lm_init_param\n"); 3137 if (!validate) 3138 { 3139 /* Initialize the default parameters. */ 3140 param = param_list; 3141 while(param->offset) 3142 { 3143 if(CHIP_REV_IS_FPGA(pdev)) 3144 { 3145 SET_PARAM_VAL(pdev, param, param->fpga_default); 3146 } 3147 else if(CHIP_REV_IS_EMUL(pdev)) 3148 { 3149 SET_PARAM_VAL(pdev, param, param->emulation_default); 3150 } 3151 else 3152 { 3153 SET_PARAM_VAL(pdev, param, param->asic_default); 3154 } 3155 param++; 3156 } 3157 pdev->params.rss_caps = (LM_RSS_CAP_IPV4 | LM_RSS_CAP_IPV6); 3158 pdev->params.rss_chain_cnt = 1; 3159 pdev->params.tss_chain_cnt = 1; 3160 if (IS_PFDEV(pdev)) 3161 { 3162 pdev->params.sb_cnt = MAX_RSS_CHAINS / pdev->params.vnics_per_port; 3163 /* base non-default status block idx - 0 in E1. 0, 4, 8 or 12 in E1H */ 3164 if (CHIP_IS_E1x(pdev)) 3165 { 3166 pdev->params.max_pf_sb_cnt = pdev->params.fw_sb_cnt = HC_SB_MAX_SB_E1X / 2 / pdev->params.vnics_per_port; 3167 pdev->params.base_fw_ndsb = FUNC_ID(pdev) * pdev->params.fw_sb_cnt; 3168 if (CHIP_IS_E1(pdev)) { 3169 pdev->params.fw_client_cnt = pdev->params.max_pf_fw_client_cnt = ETH_MAX_RX_CLIENTS_E1; 3170 } else { 3171 pdev->params.fw_client_cnt = pdev->params.max_pf_fw_client_cnt = ETH_MAX_RX_CLIENTS_E1H / pdev->params.vnics_per_port; 3172 } 3173 pdev->params.base_fw_client_id = VNIC_ID(pdev) * pdev->params.fw_client_cnt; 3174 } 3175 else 3176 { 3177 #ifdef _VBD_ 3178 // pdev->params.sb_cnt = min(LM_IGU_SB_CNT(pdev), MAX_RSS_CHAINS); 3179 pdev->params.sb_cnt = LM_IGU_SB_CNT(pdev); 3180 #endif 3181 if (pdev->params.sb_cnt > LM_IGU_SB_CNT(pdev)) { 3182 pdev->params.sb_cnt = LM_IGU_SB_CNT(pdev); 3183 } 3184 // Asymmetric resource division 3185 #ifndef LM_NUM_DSBS 3186 #define LM_NUM_DSBS 1 3187 #endif 3188 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 3189 { 3190 pdev->params.base_fw_ndsb = IGU_BASE_NDSB(pdev) - (FUNC_ID(pdev) + 1)* LM_NUM_DSBS; 3191 pdev->params.fw_aux_qzone_cnt = (ETH_MAX_RX_CLIENTS_E2 - PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE) / pdev->params.vnics_per_port / 2; 3192 port_base_aux_qzone = PORT_ID(pdev)* ((ETH_MAX_RX_CLIENTS_E2 - PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE)/PORT_MAX); 3193 pdev->params.aux_fw_qzone_id = PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE + port_base_aux_qzone + VNIC_ID(pdev) * pdev->params.fw_aux_qzone_cnt; 3194 pdev->params.base_fw_client_id = pdev->params.base_fw_ndsb + FUNC_ID(pdev) * MAX_NON_RSS_FW_CLIENTS; 3195 } 3196 else 3197 { 3198 pdev->params.base_fw_ndsb = IGU_BASE_NDSB(pdev) - (VNIC_ID(pdev) + 1) * LM_NUM_DSBS; 3199 pdev->params.fw_aux_qzone_cnt = (ETH_MAX_RX_CLIENTS_E2 - PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE) / pdev->params.vnics_per_port; 3200 pdev->params.aux_fw_qzone_id = PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE + VNIC_ID(pdev) * pdev->params.fw_aux_qzone_cnt; 3201 pdev->params.base_fw_client_id = pdev->params.base_fw_ndsb + VNIC_ID(pdev) * MAX_NON_RSS_FW_CLIENTS; 3202 } 3203 pdev->params.fw_sb_cnt = LM_IGU_SB_CNT(pdev); 3204 #ifdef VF_INVOLVED 3205 pdev->params.fw_sb_cnt = pdev->params.fw_sb_cnt + lm_pf_get_vf_available_igu_blocks(pdev); 3206 if ((VNICS_PER_PORT(pdev) == 1) && (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2)) 3207 { 3208 pdev->params.fw_client_cnt = ETH_MAX_RX_CLIENTS_E2; 3209 } 3210 else 3211 { 3212 pdev->params.fw_client_cnt = pdev->params.fw_sb_cnt + MAX_NON_RSS_FW_CLIENTS; 3213 } 3214 #else 3215 pdev->params.fw_client_cnt = pdev->params.fw_sb_cnt + MAX_NON_RSS_FW_CLIENTS; 3216 #endif 3217 pdev->params.fw_base_qzone_cnt = pdev->params.fw_sb_cnt; 3218 base_fw_qzone_id = pdev->params.base_fw_ndsb; 3219 pdev->params.max_pf_sb_cnt = LM_IGU_SB_CNT(pdev); 3220 pdev->params.max_pf_fw_client_cnt = pdev->params.max_pf_sb_cnt + MAX_NON_RSS_FW_CLIENTS; 3221 } 3222 DbgMessage(pdev, WARN, "SB counts(from %d): %d rss, %d max(pf), %d fw ndsbs accessible\n", 3223 pdev->params.base_fw_ndsb, pdev->params.sb_cnt, pdev->params.max_pf_sb_cnt, pdev->params.fw_sb_cnt); 3224 DbgBreakIf(pdev->params.sb_cnt > pdev->params.max_pf_sb_cnt); 3225 DbgBreakIf(pdev->params.max_pf_sb_cnt > pdev->params.fw_sb_cnt); 3226 3227 // pdev->params.base_fw_client_id = VNIC_ID(pdev) * pdev->params.fw_client_cnt; 3228 DbgMessage(pdev, WARN, "FW clients (from %d): %d max(pf), %d fw cliens accessible\n", 3229 pdev->params.base_fw_client_id, pdev->params.max_pf_fw_client_cnt, pdev->params.fw_client_cnt); 3230 if (CHIP_IS_E2E3(pdev)) { 3231 u8_t qz_idx; 3232 for (qz_idx = 0; qz_idx < pdev->params.fw_base_qzone_cnt; qz_idx++) 3233 { 3234 pdev->params.fw_qzone_id[qz_idx] = base_fw_qzone_id + qz_idx; 3235 3236 } 3237 DbgMessage(pdev, WARN, "%d base FW Q zone IDs from %d\n", pdev->params.fw_base_qzone_cnt, base_fw_qzone_id); 3238 DbgMessage(pdev, WARN, "%d aux FW Q zone IDs from %d\n", pdev->params.fw_aux_qzone_cnt, pdev->params.aux_fw_qzone_id); 3239 } 3240 // pdev->params.base_fw_client_id = VNIC_ID(pdev) * (pdev->params.sb_cnt + MAX_NON_RSS_FW_CLIENTS); 3241 /* For now, base_fw_qzone_id == base_fw_client_id, but this doesn't have to be the case... */ 3242 /* qzone-id is relevant only for E2 and therefore it is ok that we use a */ 3243 /* Todo - change once E2 client is added. */ 3244 // pdev->params.base_fw_qzone_id = pdev->params.base_fw_client_id + ETH_MAX_RX_CLIENTS_E1H*PORT_ID(pdev); 3245 /* E2 TODO: read how many sb each pf has...?? */ 3246 } else if (IS_CHANNEL_VFDEV(pdev)) { 3247 pdev->params.sb_cnt = 16; 3248 } else { 3249 pdev->params.sb_cnt = 1; 3250 } 3251 3252 pdev->params.max_rss_chains = ((IS_PFDEV(pdev) && IGU_U_NDSB_OFFSET(pdev)) ? min(IGU_U_NDSB_OFFSET(pdev),LM_SB_CNT(pdev)) : LM_SB_CNT(pdev)); 3253 if (pdev->params.max_rss_chains > MAX_RSS_CHAINS) 3254 { 3255 pdev->params.max_rss_chains = MAX_RSS_CHAINS; 3256 } 3257 #ifndef EDIAG 3258 if(0 == pdev->params.max_rss_chains) 3259 { 3260 DbgBreakMsg("Zero isn't a valid value for pdev->params.max_rss_chains "); 3261 return LM_STATUS_FAILURE; 3262 } 3263 #endif 3264 pdev->params.base_cam_offset = 0; 3265 /* set the clients cids that will be used by the driver */ 3266 pdev->params.map_client_to_cid[LM_CLI_IDX_NDIS] = 0; 3267 pdev->params.map_client_to_cid[LM_CLI_IDX_ISCSI] = i = LM_MAX_RSS_CHAINS(pdev); 3268 pdev->params.map_client_to_cid[LM_CLI_IDX_OOO] = ++i; 3269 pdev->params.map_client_to_cid[LM_CLI_IDX_FCOE] = ++i; 3270 pdev->params.map_client_to_cid[LM_CLI_IDX_FWD] = ++i; 3271 pdev->params.start_mp_chain = ++i; 3272 3273 // pdev->params.map_client_to_cid[LM_CLI_IDX_RDMA] = ++i; 3274 // FCoE is not supported in E1 and we have only 18 clients in E1 3275 // so we OOO client gets 'priority' over FCoE 3276 DbgBreakIf(pdev->params.map_client_to_cid[LM_CLI_IDX_OOO] > pdev->params.map_client_to_cid[LM_CLI_IDX_FCOE]); 3277 3278 /* L4 RSS */ 3279 pdev->params.l4_rss_chain_cnt = 1; 3280 pdev->params.l4_tss_chain_cnt = 1; 3281 /* set l4_rss base chain index to be the first one after l2 */ 3282 pdev->params.l4_rss_base_chain_idx = 0; 3283 if (CHIP_IS_E1x(pdev)) 3284 { 3285 pdev->params.l4_base_fw_rss_id = VNIC_ID(pdev) * pdev->params.sb_cnt; 3286 } 3287 else 3288 { 3289 pdev->params.l4_base_fw_rss_id = VNIC_ID(pdev); 3290 } 3291 /* master-pfdev needs to keep resources for its vfs, resource allocation is done first between 3292 * pfs and then each pf leaves itself 1 sb_cnt for enabling vfs. */ 3293 pdev->params.eth_align_enable = 0; 3294 lm_init_cam_params(pdev); 3295 3296 if((CHIP_REV_IS_SLOW(pdev) 3297 #ifdef DUMMY_MAC_FOR_VF 3298 || IS_VFDEV(pdev) 3299 #endif 3300 ) 3301 && 3302 (!(GET_FLAGS(pdev->hw_info.mf_info.flags,MF_INFO_VALID_MAC)))) 3303 { 3304 pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0] = 0x00; 3305 pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1] = 0x50; 3306 pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2] = 0xc2; 3307 pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3] = 0x2c; 3308 pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4] = 0x70 + (IS_PFDEV(pdev) ? 0 : (1 + 64*PATH_ID(pdev) + ABS_VFID(pdev))); 3309 if (CHIP_IS_E1x(pdev)) 3310 { 3311 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5] = 0x9a + 2 * FUNC_ID(pdev); 3312 } 3313 else 3314 { 3315 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5] = 0x9a + PATH_ID(pdev)*8 + PORT_ID(pdev)*4 + VNIC_ID(pdev)*2; 3316 } 3317 3318 mm_memcpy(pdev->hw_info.iscsi_mac_addr, pdev->hw_info.mac_addr, 6); 3319 pdev->hw_info.iscsi_mac_addr[5]++; 3320 mm_memcpy(pdev->hw_info.fcoe_mac_addr, pdev->hw_info.iscsi_mac_addr, 6); 3321 pdev->hw_info.fcoe_mac_addr[5]++; 3322 lm_fcoe_set_default_wwns(pdev); 3323 } 3324 else 3325 { 3326 pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0]; 3327 pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1]; 3328 pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2]; 3329 pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3]; 3330 pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4]; 3331 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5]; 3332 } 3333 if(CHIP_REV_IS_EMUL(pdev)) 3334 { 3335 DbgMessage(pdev, INFORMi, "Emulation is detected.\n"); 3336 pdev->params.test_mode |= TEST_MODE_IGNORE_SHMEM_SIGNATURE; 3337 pdev->params.test_mode |= TEST_MODE_LOG_REG_ACCESS; 3338 //pdev->params.test_mode |= TEST_MODE_NO_MCP; 3339 DbgMessage(pdev, INFORMi , "test mode is 0x%x \n",pdev->params.test_mode); 3340 } 3341 else 3342 { 3343 DbgMessage(pdev, INFORMi, "ASIC is detected.\n"); 3344 } 3345 if (!pdev->hw_info.mcp_detected) 3346 { 3347 pdev->params.test_mode |= TEST_MODE_NO_MCP; 3348 } 3349 flow_control = (pdev->hw_info.link_config[ELINK_INT_PHY] & PORT_FEATURE_FLOW_CONTROL_MASK); 3350 3351 switch (flow_control) 3352 { 3353 case PORT_FEATURE_FLOW_CONTROL_AUTO: 3354 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_AUTO_PAUSE; 3355 break; 3356 case PORT_FEATURE_FLOW_CONTROL_TX: 3357 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_TRANSMIT_PAUSE; 3358 break; 3359 case PORT_FEATURE_FLOW_CONTROL_RX: 3360 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_RECEIVE_PAUSE; 3361 break; 3362 case PORT_FEATURE_FLOW_CONTROL_BOTH: 3363 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_TRANSMIT_PAUSE | LM_FLOW_CONTROL_RECEIVE_PAUSE; 3364 break; 3365 case PORT_FEATURE_FLOW_CONTROL_NONE: 3366 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_NONE; 3367 break; 3368 default: 3369 pdev->params.flow_ctrl_cap = LM_FLOW_CONTROL_NONE; 3370 break; 3371 } 3372 3373 /* 3374 * We don't know (yet...) if the PHY supportes EEE - so we cannot set params 3375 * to reflect this info. 3376 */ 3377 3378 /* L2 FW Flow control */ 3379 // cq57766 3380 // if this static assert fails consider adding the new mode to the if 3381 // and read the l2_fw_flow_ctrl from the shmem in the new mode also 3382 ASSERT_STATIC(MAX_MF_MODE == 4); 3383 if ((pdev->hw_info.mf_info.mf_mode == SINGLE_FUNCTION) || 3384 (pdev->hw_info.mf_info.mf_mode == MULTI_FUNCTION_SD) || 3385 (CHIP_IS_E1x(pdev))) 3386 { 3387 // l2_fw_flow_ctrl is read from the shmem in multi-function mode in E2 and above. 3388 // In all other cases this parameter is read from the registry. 3389 // We read this parameter from the registry in E1.5 multi-function since 57711 boot code does not have the struct func_ext_cfg 3390 pdev->params.l2_fw_flow_ctrl = 0; 3391 } 3392 pdev->params.l4_fw_flow_ctrl = 0; 3393 pdev->params.fw_stats_init_value = TRUE; 3394 3395 pdev->params.mf_mode = pdev->hw_info.mf_info.mf_mode; 3396 if (pdev->params.mf_mode == MULTI_FUNCTION_SD) 3397 { 3398 pdev->params.sd_mode = pdev->hw_info.mf_info.sd_mode; 3399 } 3400 3401 } 3402 else 3403 { 3404 /* Make sure the parameter values are within range. */ 3405 param = param_list; 3406 while(param->offset) 3407 { 3408 if(param->min != 0 || param->max != 0) 3409 { 3410 if(PARAM_VAL(pdev, param) < param->min || 3411 PARAM_VAL(pdev, param) > param->max) 3412 { 3413 if(CHIP_REV_IS_FPGA(pdev)) 3414 { 3415 SET_PARAM_VAL(pdev, param, param->fpga_default); 3416 } 3417 else if(CHIP_REV_IS_EMUL(pdev)) 3418 { 3419 SET_PARAM_VAL(pdev, param, param->emulation_default); 3420 } 3421 else 3422 { 3423 SET_PARAM_VAL(pdev, param, param->asic_default); 3424 } 3425 } 3426 } 3427 param++; 3428 } 3429 /* calculate context_line_size context_waste_size */ 3430 // TODO calculate number of context lines in alocation page. 3431 csize = max(sizeof(struct eth_context),sizeof(struct toe_context)); 3432 //csize = max(sizeof(struct rdma_context),csize); 3433 csize = max(sizeof(struct iscsi_context),csize); 3434 DbgBreakIf(csize>1024); 3435 /* Check for a valid mac address. */ 3436 if((pdev->params.mac_addr[0] == 0 && 3437 pdev->params.mac_addr[1] == 0 && 3438 pdev->params.mac_addr[2] == 0 && 3439 pdev->params.mac_addr[3] == 0 && 3440 pdev->params.mac_addr[4] == 0 && 3441 pdev->params.mac_addr[5] == 0) || (pdev->params.mac_addr[0] & 1)) 3442 { 3443 DbgMessage(pdev, WARNi, "invalid MAC number.\n"); 3444 pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0]; 3445 pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1]; 3446 pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2]; 3447 pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3]; 3448 pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4]; 3449 pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5]; 3450 } 3451 if (CHIP_IS_E1(pdev)) 3452 { 3453 if ((pdev->params.l2_fw_flow_ctrl == 1) || (pdev->params.l4_fw_flow_ctrl == 1)) 3454 { 3455 DbgMessage(pdev, WARNi, "L2 FW Flow control not supported on E1\n"); 3456 pdev->params.l2_fw_flow_ctrl = 0; 3457 pdev->params.l4_fw_flow_ctrl = 0; 3458 } 3459 } 3460 } 3461 3462 /* init l2 client conn param with default mtu values */ 3463 for (i = 0; i < ARRSIZE(pdev->params.l2_cli_con_params); i++) 3464 { 3465 lm_cli_idx_t lm_cli_idx = LM_CHAIN_IDX_CLI(pdev, i); 3466 ASSERT_STATIC( ARRSIZE(pdev->params.l2_rx_desc_cnt) == ARRSIZE(pdev->params.mtu)); 3467 if( lm_cli_idx >= ARRSIZE(pdev->params.l2_rx_desc_cnt)) 3468 { 3469 // in case lm_cli_idx is above boundries 3470 // it means that is should not be used (currently expected in MF mode) 3471 // we skip the iteration 3472 continue; 3473 } 3474 pdev->params.l2_cli_con_params[i].mtu = pdev->params.mtu[lm_cli_idx]; 3475 3476 if(i < (LM_SB_CNT(pdev) + MAX_NON_RSS_CHAINS)) 3477 { 3478 pdev->params.l2_cli_con_params[i].num_rx_desc = pdev->params.l2_rx_desc_cnt[lm_cli_idx]; 3479 pdev->params.l2_cli_con_params[i].attributes = LM_CLIENT_ATTRIBUTES_RX | LM_CLIENT_ATTRIBUTES_TX | LM_CLIENT_ATTRIBUTES_REG_CLI; 3480 } 3481 else 3482 { 3483 pdev->params.l2_cli_con_params[i].attributes = LM_CLIENT_ATTRIBUTES_TX; 3484 } 3485 } 3486 return LM_STATUS_SUCCESS; 3487 } /* lm_init_params */ 3488 3489 /******************************************************************************* 3490 * Description: 3491 * 3492 * Return: 3493 ******************************************************************************/ 3494 lm_status_t 3495 lm_get_dev_info( 3496 lm_device_t *pdev) 3497 { 3498 lm_status_t lm_status = LM_STATUS_SUCCESS; 3499 3500 DbgMessage(pdev, INFORMi , "### lm_get_dev_info\n"); 3501 3502 // initialize "product_version" to 0xffffffff so all platforms will have invalid values (but Windows that will update it later) 3503 mm_memset( pdev->product_version, 0xff, sizeof(pdev->product_version) ); 3504 3505 lm_status = lm_get_pcicfg_info(pdev); 3506 if(lm_status != LM_STATUS_SUCCESS) 3507 { 3508 return lm_status; 3509 } 3510 3511 lm_status = lm_get_bars_info(pdev); 3512 if(lm_status != LM_STATUS_SUCCESS) 3513 { 3514 return lm_status; 3515 } 3516 if (!IS_CHANNEL_VFDEV(pdev)) { 3517 lm_status = lm_get_chip_id_and_mode(pdev); 3518 if(lm_status != LM_STATUS_SUCCESS) 3519 { 3520 return lm_status; 3521 } 3522 } 3523 if (IS_PFDEV(pdev)) { 3524 // Get function num using me register 3525 lm_status = lm_get_function_num(pdev); 3526 if (lm_status != LM_STATUS_SUCCESS) { 3527 return lm_status; 3528 } 3529 // initialize pointers to init arrays (can only do this after we know which chip we are...) 3530 // We want to do this here to enable IRO access before driver load (ediag/lediag) this is only done 3531 // for PFs, VFs use PFDEV to access IRO 3532 if ( lm_set_init_arrs(pdev) != 0 ) { 3533 DbgMessage(pdev, FATAL, "Unknown chip revision\n"); 3534 return LM_STATUS_UNKNOWN_ADAPTER; 3535 } 3536 } else { 3537 /* For VF, we also get the vf-id here... since we need it from configuration space */ 3538 #ifdef VF_INVOLVED 3539 if (IS_VFDEV(pdev)) 3540 { 3541 lm_vf_get_vf_id(pdev); 3542 } 3543 #endif 3544 } 3545 3546 #ifdef __LINUX 3547 if (lm_is_function_after_flr(pdev)) 3548 { 3549 if (IS_PFDEV(pdev)) { 3550 lm_status = lm_cleanup_after_flr(pdev); 3551 if(lm_status != LM_STATUS_SUCCESS) 3552 { 3553 return lm_status; 3554 } 3555 } else { 3556 /* 8. Verify that the transaction-pending bit of each of the function in the Device Status Register in the PCIe is cleared. */ 3557 3558 #ifdef __LINUX 3559 u32_t pcie_caps_offset = mm_get_cap_offset(pdev, PCI_CAP_PCIE); 3560 if (pcie_caps_offset != 0 && pcie_caps_offset != 0xFFFFFFFF) { 3561 u32_t dev_control_and_status = 0xFFFFFFFF; 3562 mm_read_pci(pdev, pcie_caps_offset + PCIE_DEV_CTRL, &dev_control_and_status); 3563 DbgMessage(pdev, FATAL,"Device Control&Status of PCIe caps is %x\n",dev_control_and_status); 3564 if (dev_control_and_status & (PCIE_DEV_STATUS_PENDING_TRANSACTION << 16)) { 3565 DbgBreak(); 3566 } 3567 } 3568 #else 3569 DbgMessage(pdev, FATAL, "Function mm_get_cap_offset is not implemented yet\n"); 3570 DbgBreak(); 3571 #endif 3572 lm_fl_reset_clear_inprogress(pdev); 3573 } 3574 } 3575 #endif 3576 3577 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) { 3578 pdev->params.max_eth_including_vfs_conns = 1 << (LM_VF_MAX_RVFID_SIZE + LM_VF_CID_WND_SIZE(pdev) + 1); 3579 } else if (IS_PFDEV(pdev)) { 3580 pdev->params.max_eth_including_vfs_conns = MAX_VF_ETH_CONS; 3581 // Registry parameters are read in this stage. 3582 // As a result pdev->params.is_dcb_ndis_mp_en isn't valid yet. 3583 if(IS_DCB_SUPPORTED_BY_CHIP(pdev)) 3584 { 3585 // Add DCB multiple connections 3586 #ifdef _VBD_ 3587 pdev->params.max_eth_including_vfs_conns += 3 * MAX_HW_CHAINS + MAX_NON_RSS_CHAINS; 3588 #else 3589 pdev->params.max_eth_including_vfs_conns += MAX_ETH_CONS; 3590 #endif 3591 } 3592 else 3593 { 3594 #ifdef _VBD_ 3595 pdev->params.max_eth_including_vfs_conns += MAX_ETH_REG_CHAINS; 3596 #else 3597 pdev->params.max_eth_including_vfs_conns += MAX_ETH_REG_CONS; 3598 #endif 3599 } 3600 } 3601 else 3602 { 3603 pdev->params.max_eth_including_vfs_conns = MAX_RSS_CHAINS; 3604 } 3605 if (IS_PFDEV(pdev)) { 3606 lm_status = lm_get_sriov_info(pdev); 3607 if (lm_status != LM_STATUS_SUCCESS) { 3608 return lm_status; 3609 } 3610 lm_status = lm_get_nvm_info(pdev); 3611 if (lm_status != LM_STATUS_SUCCESS) { 3612 return lm_status; 3613 } 3614 lm_status = lm_get_shmem_info(pdev); 3615 if (lm_status != LM_STATUS_SUCCESS) { 3616 return lm_status; 3617 } 3618 3619 } else if (IS_CHANNEL_VFDEV(pdev)) { //TODO check for basic vf 3620 pdev->hw_info.mf_info.multi_vnics_mode = 0; 3621 pdev->hw_info.mf_info.vnics_per_port = 1; 3622 pdev->hw_info.mf_info.ext_id = 0xffff; /* invalid ovlan */ /* TBD - E1H: - what is the right value for Cisco? */ 3623 pdev->hw_info.mcp_detected = FALSE; 3624 pdev->hw_info.chip_id = CHIP_NUM_5712E; 3625 pdev->hw_info.max_port_conns = log2_align(MAX_ETH_CONS); 3626 pdev->debug_info.ack_en[0] = 1; 3627 } 3628 3629 #ifdef VF_INVOLVED 3630 if (IS_VFDEV(pdev)) { 3631 lm_vf_enable_vf(pdev); 3632 } 3633 #endif 3634 pdev->ver_num = 3635 (LM_DRIVER_MAJOR_VER << 24) | 3636 (LM_DRIVER_MINOR_VER << 16) | 3637 (LM_DRIVER_FIX_NUM << 8) | 3638 LM_DRIVER_ENG_NUM ; 3639 mm_build_ver_string(pdev); 3640 // for debugging only (no other use) 3641 pdev->ver_num_fw = (BCM_5710_FW_MAJOR_VERSION << 24) | 3642 (BCM_5710_FW_MINOR_VERSION << 16) | 3643 (BCM_5710_FW_REVISION_VERSION<<8) | 3644 (BCM_5710_FW_ENGINEERING_VERSION) ; 3645 /* get vnic parameters */ 3646 pdev->params.vnics_per_port = pdev->hw_info.mf_info.vnics_per_port; 3647 pdev->params.ovlan = VALID_OVLAN(OVLAN(pdev)) ? OVLAN(pdev) : 0; // TBD: verify it's the right value (with OfirH) 3648 pdev->params.multi_vnics_mode = pdev->hw_info.mf_info.multi_vnics_mode; 3649 pdev->params.path_has_ovlan = pdev->hw_info.mf_info.path_has_ovlan; 3650 3651 if IS_MULTI_VNIC(pdev) 3652 { 3653 lm_cmng_calc_params(pdev); 3654 } 3655 3656 if (IS_PFDEV(pdev)) 3657 { 3658 // clc params 3659 init_link_params(pdev); 3660 } 3661 3662 if (IS_CHANNEL_VFDEV(pdev)) 3663 { 3664 pdev->hw_info.intr_blk_info.blk_type = INTR_BLK_IGU; 3665 pdev->hw_info.intr_blk_info.blk_mode = INTR_BLK_MODE_NORM; 3666 pdev->hw_info.intr_blk_info.access_type = INTR_BLK_ACCESS_IGUMEM; 3667 } 3668 else 3669 { 3670 lm_status = lm_get_intr_blk_info(pdev); 3671 if(lm_status != LM_STATUS_SUCCESS) 3672 { 3673 return lm_status; 3674 } 3675 } 3676 3677 lm_status = lm_init_params(pdev, 0); 3678 if(lm_status != LM_STATUS_SUCCESS) 3679 { 3680 return lm_status; 3681 } 3682 3683 lm_status = lm_mcp_cmd_init(pdev); 3684 if( LM_STATUS_SUCCESS != lm_status ) 3685 { 3686 // Ediag may want to update the BC version. Don't fail lm_get_dev_info because of lm_mcp_cmd_init 3687 // in no condition. 3688 DbgMessage(pdev, FATAL, "lm_get_shmem_info: mcp_cmd_init failed. lm_status=0x%x\n", lm_status); 3689 } 3690 3691 if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4) 3692 { 3693 /* We're a single-function port on a mult-function path in a 4-port-mode environment... we need to support 1G */ 3694 if (pdev->params.path_has_ovlan && !pdev->params.multi_vnics_mode) 3695 { 3696 DbgMessage(pdev, WARN, "func_id = %d Setting link speed to 1000MBPS\n", ABS_FUNC_ID(pdev)); 3697 SET_MEDIUM_SPEED(pdev->params.req_medium, LM_MEDIUM_SPEED_1000MBPS); 3698 } 3699 } 3700 3701 /* Override the defaults with user configurations. */ 3702 lm_status = mm_get_user_config(pdev); 3703 if(lm_status != LM_STATUS_SUCCESS) 3704 { 3705 return lm_status; 3706 } 3707 lm_status = lm_init_params(pdev, 1); 3708 if(lm_status != LM_STATUS_SUCCESS) 3709 { 3710 return lm_status; 3711 } 3712 DbgMessage(pdev, INFORMi , "### lm_get_dev_info exit\n"); 3713 return LM_STATUS_SUCCESS; 3714 } /* lm_get_dev_info */ 3715 3716 /* 3717 *Function Name: lm_get_port_id_from_func_abs 3718 * 3719 *Parameters: 3720 * 3721 *Description: 3722 * returns the port ID according to the func_abs_id 3723 * E1/E1.5: 3724 * Port0: 0,2,4,6 3725 * Port1: 1,3,5,7 3726 * 3727 * E2/E32P 3728 * Port0: 0,1,2,3,4,5,6,7 3729 * 3730 * E34P 3731 * Port0: 0,1,4,5 3732 * Port1: 2,3,6,7 3733 * 3734 *Returns: u8_t port_id 3735 * 3736 */ 3737 u8_t lm_get_port_id_from_func_abs( const u32_t chip_num, const lm_chip_port_mode_t lm_chip_port_mode, const u8_t abs_func ) 3738 { 3739 u8_t port_id = 0xff; 3740 u8_t modulus_res = 0; 3741 3742 do 3743 { 3744 if( CHIP_IS_E1x_PARAM( chip_num ) ) 3745 { 3746 port_id = abs_func % PORT_MAX; 3747 break; 3748 } 3749 3750 switch( lm_chip_port_mode ) 3751 { 3752 case LM_CHIP_PORT_MODE_2: 3753 { 3754 // we expect here only E2 or E3 3755 DbgBreakIf( CHIP_IS_E1x_PARAM( chip_num ) ); 3756 port_id = 0; 3757 } 3758 break; 3759 3760 case LM_CHIP_PORT_MODE_4: 3761 { 3762 modulus_res = abs_func % 4; 3763 switch (modulus_res) 3764 { 3765 case 0: 3766 case 1: 3767 port_id = 0; 3768 break; 3769 case 2: 3770 case 3: 3771 port_id = 1; 3772 break; 3773 default: 3774 break; 3775 } 3776 } 3777 break; 3778 3779 default: 3780 DbgBreakIf(TRUE); 3781 break; 3782 } // switch lm_chip_port_mode 3783 3784 }while(0); 3785 3786 return port_id; 3787 } /* lm_get_port_id_from_func_abs */ 3788 3789 /* 3790 *Function Name: lm_get_abs_func_vector 3791 * 3792 *Parameters: 3793 * 3794 *Description: 3795 * returns vector of abs_func id's upon parameters 3796 * 3797 *Returns: u32_t abs_func_vector 3798 * 3799 */ 3800 u8_t lm_get_abs_func_vector( const u32_t chip_num, const lm_chip_port_mode_t chip_port_mode, const u8_t b_multi_vnics_mode, const u8_t path_id ) 3801 { 3802 u8_t abs_func_vector = 0; 3803 3804 // TODO VF for T7.0 3805 3806 /* 3807 The following table is mapping between abs func, ports and paths 3808 3809 |-----------------------------------------------| 3810 |[#]| CHIP & Mode | PATH(s) | Port(s) | Func(s) | 3811 |---|-------------|---------|---------|---------| 3812 |[1]| E1.0 (SF) | (0) | 0,1 | (0,1) | 3813 | | E1.5 SF | | 0,1 | (0,1) | (port is same as func) 3814 |---|-------------|---------|---------|---------| 3815 |[2]| E1.5 MF | (0) | 0,1 | 0-7 | 0,1,2,3,4,5,6,7 (port is %2 of func) 3816 |---|-------------|---------|---------|---------| 3817 |[3]| E2/E32P SF | 0,1 | 0 | ---> | (Path 0) 0 | (Path 1) 1 3818 |---|-------------|---------|---------|---------| 3819 |[4]| E2/E32P MF | 0,1 | 0 | ---> | (Path 0) 0,2,4,6 | (Path 1) 1,3,5,7 3820 |---|-------------|---------|---------|---------| 3821 |[5]| E34P SF | 0,1 | 0,1 | ---> | (Path 0) 0:port0 2:port1 | (Path 1) 1:port0 3:port1 3822 |---|-------------|---------|---------|---------| 3823 |[6]| E34P MF | 0,1 | 0,1 | ---> | (Path 0) 0,4:port0 2,6:port1 | (Path 1) 1,5:port0 3,7:port1 (57840) 3824 |---|-------------|---------|---------|---------| 3825 |[7]| E34P MF/SF | 0,1 | 0,1 | ---> | (Path 0) 0,4:port0 2:port1 | (Path 1) 1,5:port0 3:port1 (57800) 3826 |---|-------------|---------|---------|---------| 3827 */ 3828 do 3829 { 3830 // [1] 3831 if( CHIP_IS_E1x_PARAM(chip_num) && !b_multi_vnics_mode ) 3832 { 3833 SET_BIT( abs_func_vector, 0 ); 3834 SET_BIT( abs_func_vector, 1 ); 3835 break; 3836 } 3837 3838 // [2] 3839 if( CHIP_IS_E1H_PARAM(chip_num) && b_multi_vnics_mode ) 3840 { 3841 SET_BIT( abs_func_vector, 0 ); 3842 SET_BIT( abs_func_vector, 1 ); 3843 SET_BIT( abs_func_vector, 2 ); 3844 SET_BIT( abs_func_vector, 3 ); 3845 SET_BIT( abs_func_vector, 4 ); 3846 SET_BIT( abs_func_vector, 5 ); 3847 SET_BIT( abs_func_vector, 6 ); 3848 SET_BIT( abs_func_vector, 7 ); 3849 break; 3850 } 3851 3852 // If we got here chip should not be ealier than E2 3853 DbgBreakIf( CHIP_IS_E1x_PARAM(chip_num) ); 3854 3855 // [3] [4] [5] [6] 3856 switch ( chip_port_mode ) 3857 { 3858 case LM_CHIP_PORT_MODE_2: 3859 { 3860 // we expect here only E2 or E3 3861 DbgBreakIf( !CHIP_IS_E2_PARAM(chip_num) && !CHIP_IS_E3_PARAM(chip_num) ); 3862 3863 if( b_multi_vnics_mode ) 3864 { 3865 // [4] 3866 SET_BIT( abs_func_vector, (0 + path_id) ); 3867 SET_BIT( abs_func_vector, (2 + path_id) ); 3868 SET_BIT( abs_func_vector, (4 + path_id) ); 3869 SET_BIT( abs_func_vector, (6 + path_id) ); 3870 break; 3871 } 3872 else 3873 { 3874 // [3] 3875 SET_BIT( abs_func_vector, path_id ); 3876 break; 3877 } 3878 } // LM_CHIP_PORT_MODE_2 3879 break; 3880 3881 3882 case LM_CHIP_PORT_MODE_4: 3883 { 3884 if( b_multi_vnics_mode ) 3885 { 3886 // [6] 3887 if (chip_num != CHIP_NUM_57800) 3888 { 3889 SET_BIT( abs_func_vector, (0 + path_id) ); 3890 SET_BIT( abs_func_vector, (2 + path_id) ); 3891 SET_BIT( abs_func_vector, (4 + path_id) ); 3892 SET_BIT( abs_func_vector, (6 + path_id) ); 3893 3894 } 3895 // [7] In 57800 if we are multi function the other port can only be single function 3896 else 3897 { 3898 SET_BIT( abs_func_vector, (0 + path_id) ); 3899 SET_BIT( abs_func_vector, (2 + path_id) ); 3900 SET_BIT( abs_func_vector, (4 + path_id) ); 3901 } 3902 break; 3903 } 3904 else 3905 { 3906 // [5] 3907 if (chip_num != CHIP_NUM_57800) 3908 { 3909 SET_BIT( abs_func_vector, (0 + path_id) ); 3910 SET_BIT( abs_func_vector, (2 + path_id) ); 3911 } 3912 // [7] We can't really know what's on the other port, so for this case where we are 3913 // in 57800 single function, we assume multi-function and access all the functions 3914 // so this might be case [5] but we can't know this. 3915 else 3916 { 3917 SET_BIT( abs_func_vector, (0 + path_id) ); 3918 SET_BIT( abs_func_vector, (2 + path_id) ); 3919 SET_BIT( abs_func_vector, (4 + path_id) ); 3920 } 3921 break; 3922 } 3923 } // LM_CHIP_PORT_MODE_4 3924 break; 3925 3926 default: 3927 { 3928 DbgBreakIf(TRUE); 3929 break; 3930 } 3931 } // CHIP_PORT_MODE 3932 3933 }while(0); 3934 3935 return abs_func_vector; 3936 } /* lm_get_abs_func_vector */ 3937 3938 lm_status_t lm_verify_validity_map(lm_device_t *pdev) 3939 { 3940 u64_t wait_cnt = 0 ; 3941 u64_t wait_cnt_limit = 200000; // 4 seconds (ASIC) 3942 u32_t val = 0; 3943 lm_status_t lm_status = LM_STATUS_FAILURE ; 3944 if ( CHK_NULL(pdev) ) 3945 { 3946 return LM_STATUS_INVALID_PARAMETER ; 3947 } 3948 wait_cnt_limit*= (u64_t)(pdev->vars.clk_factor) ; 3949 for(wait_cnt = 0; wait_cnt < wait_cnt_limit; wait_cnt++) 3950 { 3951 LM_SHMEM_READ(pdev,OFFSETOF(shmem_region_t, validity_map[PORT_ID(pdev)]),&val); 3952 // check that shared memory is valid. 3953 if((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) == (SHR_MEM_VALIDITY_DEV_INFO|SHR_MEM_VALIDITY_MB)) 3954 { 3955 lm_status = LM_STATUS_SUCCESS ; 3956 break; 3957 } 3958 mm_wait(pdev, 20); 3959 } 3960 DbgMessage(pdev, INFORMi, "lm_verify_validity_map: shmem signature %d\n",val); 3961 return lm_status ; 3962 } 3963 3964 3965 lm_status_t 3966 lm_set_cam_params(struct _lm_device_t * pdev, 3967 u32_t mac_requestors_mask, 3968 u32_t base_offset_in_cam_table, 3969 u32_t cam_size, 3970 u32_t mma_size, 3971 u32_t mc_size) 3972 { 3973 lm_status_t lm_status = LM_STATUS_SUCCESS; 3974 if (IS_VFDEV(pdev)) { 3975 return LM_STATUS_FAILURE; 3976 } 3977 if (base_offset_in_cam_table != LM_KEEP_CURRENT_CAM_VALUE) { 3978 pdev->params.base_offset_in_cam_table = (u8_t)base_offset_in_cam_table; 3979 } 3980 if (cam_size != LM_KEEP_CURRENT_CAM_VALUE) { 3981 pdev->params.cam_size = (u8_t)cam_size; 3982 } 3983 if (mc_size != LM_KEEP_CURRENT_CAM_VALUE) { 3984 if (CHIP_IS_E1(pdev)) { 3985 pdev->params.mc_table_size[LM_CLI_IDX_NDIS] =(u8_t) mc_size; 3986 } else { 3987 pdev->params.mc_table_size[LM_CLI_IDX_FCOE] = (u8_t)mc_size; 3988 } 3989 } 3990 3991 return lm_status; 3992 } /* lm_set_cam_params */ 3993 3994 /******************************************************************************* 3995 * Description: 3996 * 3997 * Return: 3998 ******************************************************************************/ 3999 void lm_cmng_calc_params(lm_device_t* pdev ) 4000 { 4001 u8_t vnic = 0; 4002 DbgBreakIf(!IS_MULTI_VNIC(pdev)); 4003 for (vnic = 0; vnic < MAX_VNIC_NUM; vnic++) 4004 { 4005 if (GET_FLAGS(pdev->hw_info.mf_info.func_mf_cfg , FUNC_MF_CFG_FUNC_HIDE)) 4006 { 4007 pdev->params.min_bw[vnic] = 0; 4008 pdev->params.max_bw[vnic] = 0; 4009 } 4010 else 4011 { 4012 pdev->params.min_bw[vnic] = pdev->hw_info.mf_info.min_bw[vnic]; 4013 pdev->params.max_bw[vnic] = pdev->hw_info.mf_info.max_bw[vnic]; 4014 } 4015 } 4016 } /* lm_cmng_calc_params */ 4017 4018 /** 4019 * @description 4020 * Calculates BW according to current linespeed and MF 4021 * configuration of the function in Mbps. 4022 * @param pdev 4023 * @param link_speed - Port rate in Mbps. 4024 * @param vnic 4025 * 4026 * @return u16 4027 * Return the max BW of the function in Mbps. 4028 */ 4029 u16_t 4030 lm_get_max_bw(IN const lm_device_t *pdev, 4031 IN const u32_t link_speed, 4032 IN const u8_t vnic) 4033 { 4034 u16_t max_bw = 0; 4035 4036 DbgBreakIf(0 == IS_MULTI_VNIC(pdev)); 4037 4038 //global vnic counter 4039 if(IS_MF_SD_MODE(pdev) || IS_MF_AFEX_MODE(pdev)) 4040 { 4041 // SD max BW in 100Mbps 4042 max_bw = pdev->params.max_bw[vnic]*100; 4043 } 4044 else 4045 { 4046 // SI max BW in percentage from the link speed. 4047 DbgBreakIf(FALSE == IS_MF_SI_MODE(pdev)); 4048 max_bw = (link_speed * pdev->params.max_bw[vnic])/100; 4049 } 4050 return max_bw; 4051 } 4052 4053 u8_t lm_check_if_pf_assigned_to_vm(struct _lm_device_t *pdev) 4054 { 4055 u8_t b_assigned_to_vm = FALSE; 4056 4057 switch (pdev->hw_info.pci_cfg_trust) 4058 { 4059 case PCI_CFG_NOT_TESTED_FOR_TRUST: 4060 break; 4061 case PCI_CFG_NOT_TRUSTED: 4062 b_assigned_to_vm = TRUE; 4063 break; 4064 case PCI_CFG_TRUSTED: 4065 b_assigned_to_vm = FALSE; 4066 break; 4067 } 4068 return b_assigned_to_vm; 4069 } 4070 4071 u8_t lm_is_fw_version_valid(struct _lm_device_t *pdev) 4072 { 4073 u8_t is_fw_valid = FALSE; 4074 u32_t drv_fw_ver = (BCM_5710_FW_MAJOR_VERSION) | 4075 (BCM_5710_FW_MINOR_VERSION << 8) | 4076 (BCM_5710_FW_REVISION_VERSION << 16) | 4077 (BCM_5710_FW_ENGINEERING_VERSION << 24) ; 4078 u32_t real_fw_ver = REG_RD(pdev,0x2c0000); /* Read acitve FW version from 1st DWORD of XSTORM params*/ 4079 u32_t fw_valid_mask; 4080 4081 fw_valid_mask = SWAP_BYTES32(pdev->params.fw_valid_mask); 4082 is_fw_valid = (((drv_fw_ver ^ real_fw_ver) & fw_valid_mask) == 0); 4083 return (is_fw_valid); 4084 } 4085 4086 /* 4087 * Support for NSCI get OS driver version CQ70040 4088 */ 4089 4090 /*Descripion: Write the client driver version 4091 * to the shmem2 region 4092 */ 4093 lm_status_t 4094 lm_set_cli_drv_ver_to_shmem(struct _lm_device_t *pdev) 4095 { 4096 u32_t drv_ver_offset = OFFSETOF(shmem2_region_t,func_os_drv_ver); 4097 u32_t offset = 0; 4098 lm_status_t lm_status = LM_STATUS_SUCCESS; // Status is always SUCCESS now 4099 u32_t shmem2_size = 0; 4100 u32_t index = 0; 4101 4102 if (IS_VFDEV(pdev)) 4103 { 4104 return LM_STATUS_SUCCESS; 4105 } 4106 4107 ASSERT_STATIC( sizeof(pdev->lm_cli_drv_ver_to_shmem.cli_drv_ver) == sizeof(struct os_drv_ver) ); 4108 4109 offset = drv_ver_offset + (pdev->params.pfunc_mb_id * sizeof(pdev->lm_cli_drv_ver_to_shmem.cli_drv_ver)); 4110 4111 DbgMessage(pdev, WARN,"offset= %d \n", offset); 4112 4113 if (pdev->hw_info.shmem_base2 != 0) 4114 { 4115 LM_SHMEM2_READ (pdev, OFFSETOF(shmem2_region_t,size), &shmem2_size); 4116 if (shmem2_size > offset) 4117 { 4118 for (index = 0; index < ARRSIZE(pdev->lm_cli_drv_ver_to_shmem.cli_drv_ver.versions); index++) 4119 { 4120 LM_SHMEM2_WRITE(pdev, offset, pdev->lm_cli_drv_ver_to_shmem.cli_drv_ver.versions[index]); 4121 offset+= sizeof( pdev->lm_cli_drv_ver_to_shmem.cli_drv_ver.versions[index] ); 4122 } 4123 } 4124 } 4125 4126 return lm_status; 4127 } 4128 4129 u8_t lm_is_mac_locally_administrated(struct _lm_device_t *pdev, u8_t * mac) 4130 { 4131 u8_t res = FALSE; 4132 if (mac != NULL) 4133 { 4134 res = (mac[0] != pdev->params.mac_addr[0]) || 4135 (mac[1] != pdev->params.mac_addr[1]) || 4136 (mac[2] != pdev->params.mac_addr[2]) || 4137 (mac[3] != pdev->params.mac_addr[3]) || 4138 (mac[4] != pdev->params.mac_addr[4]) || 4139 (mac[5] != pdev->params.mac_addr[5]); 4140 } 4141 return res; 4142 } 4143