1 /*- 2 * Copyright 2021 Intel Corp 3 * Copyright 2021 Rubicon Communications, LLC (Netgate) 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <sys/cdefs.h> 8 __FBSDID("$FreeBSD$"); 9 10 #include "igc_api.h" 11 12 static s32 igc_init_nvm_params_i225(struct igc_hw *hw); 13 static s32 igc_init_mac_params_i225(struct igc_hw *hw); 14 static s32 igc_init_phy_params_i225(struct igc_hw *hw); 15 static s32 igc_reset_hw_i225(struct igc_hw *hw); 16 static s32 igc_acquire_nvm_i225(struct igc_hw *hw); 17 static void igc_release_nvm_i225(struct igc_hw *hw); 18 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw); 19 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 20 u16 *data); 21 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw); 22 23 /** 24 * igc_init_nvm_params_i225 - Init NVM func ptrs. 25 * @hw: pointer to the HW structure 26 **/ 27 static s32 igc_init_nvm_params_i225(struct igc_hw *hw) 28 { 29 struct igc_nvm_info *nvm = &hw->nvm; 30 u32 eecd = IGC_READ_REG(hw, IGC_EECD); 31 u16 size; 32 33 DEBUGFUNC("igc_init_nvm_params_i225"); 34 35 size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> 36 IGC_EECD_SIZE_EX_SHIFT); 37 /* 38 * Added to a constant, "size" becomes the left-shift value 39 * for setting word_size. 40 */ 41 size += NVM_WORD_SIZE_BASE_SHIFT; 42 43 /* Just in case size is out of range, cap it to the largest 44 * EEPROM size supported 45 */ 46 if (size > 15) 47 size = 15; 48 49 nvm->word_size = 1 << size; 50 nvm->opcode_bits = 8; 51 nvm->delay_usec = 1; 52 nvm->type = igc_nvm_eeprom_spi; 53 54 55 nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; 56 nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? 57 16 : 8; 58 59 if (nvm->word_size == (1 << 15)) 60 nvm->page_size = 128; 61 62 nvm->ops.acquire = igc_acquire_nvm_i225; 63 nvm->ops.release = igc_release_nvm_i225; 64 if (igc_get_flash_presence_i225(hw)) { 65 hw->nvm.type = igc_nvm_flash_hw; 66 nvm->ops.read = igc_read_nvm_srrd_i225; 67 nvm->ops.write = igc_write_nvm_srwr_i225; 68 nvm->ops.validate = igc_validate_nvm_checksum_i225; 69 nvm->ops.update = igc_update_nvm_checksum_i225; 70 } else { 71 hw->nvm.type = igc_nvm_invm; 72 nvm->ops.write = igc_null_write_nvm; 73 nvm->ops.validate = igc_null_ops_generic; 74 nvm->ops.update = igc_null_ops_generic; 75 } 76 77 return IGC_SUCCESS; 78 } 79 80 /** 81 * igc_init_mac_params_i225 - Init MAC func ptrs. 82 * @hw: pointer to the HW structure 83 **/ 84 static s32 igc_init_mac_params_i225(struct igc_hw *hw) 85 { 86 struct igc_mac_info *mac = &hw->mac; 87 struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225; 88 89 DEBUGFUNC("igc_init_mac_params_i225"); 90 91 /* Initialize function pointer */ 92 igc_init_mac_ops_generic(hw); 93 94 /* Set media type */ 95 hw->phy.media_type = igc_media_type_copper; 96 /* Set mta register count */ 97 mac->mta_reg_count = 128; 98 /* Set rar entry count */ 99 mac->rar_entry_count = IGC_RAR_ENTRIES_BASE; 100 101 /* reset */ 102 mac->ops.reset_hw = igc_reset_hw_i225; 103 /* hw initialization */ 104 mac->ops.init_hw = igc_init_hw_i225; 105 /* link setup */ 106 mac->ops.setup_link = igc_setup_link_generic; 107 /* check for link */ 108 mac->ops.check_for_link = igc_check_for_link_i225; 109 /* link info */ 110 mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic; 111 /* acquire SW_FW sync */ 112 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; 113 /* release SW_FW sync */ 114 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; 115 116 /* Allow a single clear of the SW semaphore on I225 */ 117 dev_spec->clear_semaphore_once = true; 118 mac->ops.setup_physical_interface = igc_setup_copper_link_i225; 119 120 /* Set if part includes ASF firmware */ 121 mac->asf_firmware_present = true; 122 123 /* multicast address update */ 124 mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic; 125 126 mac->ops.write_vfta = igc_write_vfta_generic; 127 128 return IGC_SUCCESS; 129 } 130 131 /** 132 * igc_init_phy_params_i225 - Init PHY func ptrs. 133 * @hw: pointer to the HW structure 134 **/ 135 static s32 igc_init_phy_params_i225(struct igc_hw *hw) 136 { 137 struct igc_phy_info *phy = &hw->phy; 138 s32 ret_val = IGC_SUCCESS; 139 u32 ctrl_ext; 140 141 DEBUGFUNC("igc_init_phy_params_i225"); 142 143 144 if (hw->phy.media_type != igc_media_type_copper) { 145 phy->type = igc_phy_none; 146 goto out; 147 } 148 149 phy->ops.power_up = igc_power_up_phy_copper; 150 phy->ops.power_down = igc_power_down_phy_copper_base; 151 152 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; 153 154 phy->reset_delay_us = 100; 155 156 phy->ops.acquire = igc_acquire_phy_base; 157 phy->ops.check_reset_block = igc_check_reset_block_generic; 158 phy->ops.commit = igc_phy_sw_reset_generic; 159 phy->ops.release = igc_release_phy_base; 160 161 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 162 163 /* Make sure the PHY is in a good state. Several people have reported 164 * firmware leaving the PHY's page select register set to something 165 * other than the default of zero, which causes the PHY ID read to 166 * access something other than the intended register. 167 */ 168 ret_val = hw->phy.ops.reset(hw); 169 if (ret_val) 170 goto out; 171 172 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext); 173 phy->ops.read_reg = igc_read_phy_reg_gpy; 174 phy->ops.write_reg = igc_write_phy_reg_gpy; 175 176 ret_val = igc_get_phy_id(hw); 177 /* Verify phy id and set remaining function pointers */ 178 switch (phy->id) { 179 case I225_I_PHY_ID: 180 phy->type = igc_phy_i225; 181 phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225; 182 phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225; 183 /* TODO - complete with GPY PHY information */ 184 break; 185 default: 186 ret_val = -IGC_ERR_PHY; 187 goto out; 188 } 189 190 out: 191 return ret_val; 192 } 193 194 /** 195 * igc_reset_hw_i225 - Reset hardware 196 * @hw: pointer to the HW structure 197 * 198 * This resets the hardware into a known state. 199 **/ 200 static s32 igc_reset_hw_i225(struct igc_hw *hw) 201 { 202 u32 ctrl; 203 s32 ret_val; 204 205 DEBUGFUNC("igc_reset_hw_i225"); 206 207 /* 208 * Prevent the PCI-E bus from sticking if there is no TLP connection 209 * on the last TLP read/write transaction when MAC is reset. 210 */ 211 ret_val = igc_disable_pcie_master_generic(hw); 212 if (ret_val) 213 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 214 215 DEBUGOUT("Masking off all interrupts\n"); 216 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 217 218 IGC_WRITE_REG(hw, IGC_RCTL, 0); 219 IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP); 220 IGC_WRITE_FLUSH(hw); 221 222 msec_delay(10); 223 224 ctrl = IGC_READ_REG(hw, IGC_CTRL); 225 226 DEBUGOUT("Issuing a global reset to MAC\n"); 227 IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); 228 229 ret_val = igc_get_auto_rd_done_generic(hw); 230 if (ret_val) { 231 /* 232 * When auto config read does not complete, do not 233 * return with an error. This can happen in situations 234 * where there is no eeprom and prevents getting link. 235 */ 236 DEBUGOUT("Auto Read Done did not complete\n"); 237 } 238 239 /* Clear any pending interrupt events. */ 240 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 241 IGC_READ_REG(hw, IGC_ICR); 242 243 /* Install any alternate MAC address into RAR0 */ 244 ret_val = igc_check_alt_mac_addr_generic(hw); 245 246 return ret_val; 247 } 248 249 /* igc_acquire_nvm_i225 - Request for access to EEPROM 250 * @hw: pointer to the HW structure 251 * 252 * Acquire the necessary semaphores for exclusive access to the EEPROM. 253 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 254 * Return successful if access grant bit set, else clear the request for 255 * EEPROM access and return -IGC_ERR_NVM (-1). 256 */ 257 static s32 igc_acquire_nvm_i225(struct igc_hw *hw) 258 { 259 s32 ret_val; 260 261 DEBUGFUNC("igc_acquire_nvm_i225"); 262 263 ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 264 265 return ret_val; 266 } 267 268 /* igc_release_nvm_i225 - Release exclusive access to EEPROM 269 * @hw: pointer to the HW structure 270 * 271 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 272 * then release the semaphores acquired. 273 */ 274 static void igc_release_nvm_i225(struct igc_hw *hw) 275 { 276 DEBUGFUNC("igc_release_nvm_i225"); 277 278 igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 279 } 280 281 /* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore 282 * @hw: pointer to the HW structure 283 * @mask: specifies which semaphore to acquire 284 * 285 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 286 * will also specify which port we're acquiring the lock for. 287 */ 288 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) 289 { 290 u32 swfw_sync; 291 u32 swmask = mask; 292 u32 fwmask = mask << 16; 293 s32 ret_val = IGC_SUCCESS; 294 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 295 296 DEBUGFUNC("igc_acquire_swfw_sync_i225"); 297 298 while (i < timeout) { 299 if (igc_get_hw_semaphore_i225(hw)) { 300 ret_val = -IGC_ERR_SWFW_SYNC; 301 goto out; 302 } 303 304 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); 305 if (!(swfw_sync & (fwmask | swmask))) 306 break; 307 308 /* Firmware currently using resource (fwmask) 309 * or other software thread using resource (swmask) 310 */ 311 igc_put_hw_semaphore_generic(hw); 312 msec_delay_irq(5); 313 i++; 314 } 315 316 if (i == timeout) { 317 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 318 ret_val = -IGC_ERR_SWFW_SYNC; 319 goto out; 320 } 321 322 swfw_sync |= swmask; 323 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); 324 325 igc_put_hw_semaphore_generic(hw); 326 327 out: 328 return ret_val; 329 } 330 331 /* igc_release_swfw_sync_i225 - Release SW/FW semaphore 332 * @hw: pointer to the HW structure 333 * @mask: specifies which semaphore to acquire 334 * 335 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 336 * will also specify which port we're releasing the lock for. 337 */ 338 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) 339 { 340 u32 swfw_sync; 341 342 DEBUGFUNC("igc_release_swfw_sync_i225"); 343 344 while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS) 345 ; /* Empty */ 346 347 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); 348 swfw_sync &= ~mask; 349 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); 350 351 igc_put_hw_semaphore_generic(hw); 352 } 353 354 /* 355 * igc_setup_copper_link_i225 - Configure copper link settings 356 * @hw: pointer to the HW structure 357 * 358 * Configures the link for auto-neg or forced speed and duplex. Then we check 359 * for link, once link is established calls to configure collision distance 360 * and flow control are called. 361 */ 362 s32 igc_setup_copper_link_i225(struct igc_hw *hw) 363 { 364 u32 phpm_reg; 365 s32 ret_val; 366 u32 ctrl; 367 368 DEBUGFUNC("igc_setup_copper_link_i225"); 369 370 ctrl = IGC_READ_REG(hw, IGC_CTRL); 371 ctrl |= IGC_CTRL_SLU; 372 ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); 373 IGC_WRITE_REG(hw, IGC_CTRL, ctrl); 374 375 phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM); 376 phpm_reg &= ~IGC_I225_PHPM_GO_LINKD; 377 IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg); 378 379 ret_val = igc_setup_copper_link_generic(hw); 380 381 return ret_val; 382 } 383 384 /* igc_get_hw_semaphore_i225 - Acquire hardware semaphore 385 * @hw: pointer to the HW structure 386 * 387 * Acquire the HW semaphore to access the PHY or NVM 388 */ 389 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) 390 { 391 u32 swsm; 392 s32 timeout = hw->nvm.word_size + 1; 393 s32 i = 0; 394 395 DEBUGFUNC("igc_get_hw_semaphore_i225"); 396 397 /* Get the SW semaphore */ 398 while (i < timeout) { 399 swsm = IGC_READ_REG(hw, IGC_SWSM); 400 if (!(swsm & IGC_SWSM_SMBI)) 401 break; 402 403 usec_delay(50); 404 i++; 405 } 406 407 if (i == timeout) { 408 /* In rare circumstances, the SW semaphore may already be held 409 * unintentionally. Clear the semaphore once before giving up. 410 */ 411 if (hw->dev_spec._i225.clear_semaphore_once) { 412 hw->dev_spec._i225.clear_semaphore_once = false; 413 igc_put_hw_semaphore_generic(hw); 414 for (i = 0; i < timeout; i++) { 415 swsm = IGC_READ_REG(hw, IGC_SWSM); 416 if (!(swsm & IGC_SWSM_SMBI)) 417 break; 418 419 usec_delay(50); 420 } 421 } 422 423 /* If we do not have the semaphore here, we have to give up. */ 424 if (i == timeout) { 425 DEBUGOUT("Driver can't access device -\n"); 426 DEBUGOUT("SMBI bit is set.\n"); 427 return -IGC_ERR_NVM; 428 } 429 } 430 431 /* Get the FW semaphore. */ 432 for (i = 0; i < timeout; i++) { 433 swsm = IGC_READ_REG(hw, IGC_SWSM); 434 IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI); 435 436 /* Semaphore acquired if bit latched */ 437 if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI) 438 break; 439 440 usec_delay(50); 441 } 442 443 if (i == timeout) { 444 /* Release semaphores */ 445 igc_put_hw_semaphore_generic(hw); 446 DEBUGOUT("Driver can't access the NVM\n"); 447 return -IGC_ERR_NVM; 448 } 449 450 return IGC_SUCCESS; 451 } 452 453 /* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register 454 * @hw: pointer to the HW structure 455 * @offset: offset of word in the Shadow Ram to read 456 * @words: number of words to read 457 * @data: word read from the Shadow Ram 458 * 459 * Reads a 16 bit word from the Shadow Ram using the EERD register. 460 * Uses necessary synchronization semaphores. 461 */ 462 s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, 463 u16 *data) 464 { 465 s32 status = IGC_SUCCESS; 466 u16 i, count; 467 468 DEBUGFUNC("igc_read_nvm_srrd_i225"); 469 470 /* We cannot hold synchronization semaphores for too long, 471 * because of forceful takeover procedure. However it is more efficient 472 * to read in bursts than synchronizing access for each word. 473 */ 474 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 475 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 476 IGC_EERD_EEWR_MAX_COUNT : (words - i); 477 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 478 status = igc_read_nvm_eerd(hw, offset, count, 479 data + i); 480 hw->nvm.ops.release(hw); 481 } else { 482 status = IGC_ERR_SWFW_SYNC; 483 } 484 485 if (status != IGC_SUCCESS) 486 break; 487 } 488 489 return status; 490 } 491 492 /* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR 493 * @hw: pointer to the HW structure 494 * @offset: offset within the Shadow RAM to be written to 495 * @words: number of words to write 496 * @data: 16 bit word(s) to be written to the Shadow RAM 497 * 498 * Writes data to Shadow RAM at offset using EEWR register. 499 * 500 * If igc_update_nvm_checksum is not called after this function , the 501 * data will not be committed to FLASH and also Shadow RAM will most likely 502 * contain an invalid checksum. 503 * 504 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 505 * partially written. 506 */ 507 s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, 508 u16 *data) 509 { 510 s32 status = IGC_SUCCESS; 511 u16 i, count; 512 513 DEBUGFUNC("igc_write_nvm_srwr_i225"); 514 515 /* We cannot hold synchronization semaphores for too long, 516 * because of forceful takeover procedure. However it is more efficient 517 * to write in bursts than synchronizing access for each word. 518 */ 519 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 520 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 521 IGC_EERD_EEWR_MAX_COUNT : (words - i); 522 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 523 status = __igc_write_nvm_srwr(hw, offset, count, 524 data + i); 525 hw->nvm.ops.release(hw); 526 } else { 527 status = IGC_ERR_SWFW_SYNC; 528 } 529 530 if (status != IGC_SUCCESS) 531 break; 532 } 533 534 return status; 535 } 536 537 /* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR 538 * @hw: pointer to the HW structure 539 * @offset: offset within the Shadow Ram to be written to 540 * @words: number of words to write 541 * @data: 16 bit word(s) to be written to the Shadow Ram 542 * 543 * Writes data to Shadow Ram at offset using EEWR register. 544 * 545 * If igc_update_nvm_checksum is not called after this function , the 546 * Shadow Ram will most likely contain an invalid checksum. 547 */ 548 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 549 u16 *data) 550 { 551 struct igc_nvm_info *nvm = &hw->nvm; 552 u32 i, k, eewr = 0; 553 u32 attempts = 100000; 554 s32 ret_val = IGC_SUCCESS; 555 556 DEBUGFUNC("__igc_write_nvm_srwr"); 557 558 /* A check for invalid values: offset too large, too many words, 559 * too many words for the offset, and not enough words. 560 */ 561 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 562 (words == 0)) { 563 DEBUGOUT("nvm parameter(s) out of bounds\n"); 564 ret_val = -IGC_ERR_NVM; 565 goto out; 566 } 567 568 for (i = 0; i < words; i++) { 569 eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | 570 (data[i] << IGC_NVM_RW_REG_DATA) | 571 IGC_NVM_RW_REG_START; 572 573 IGC_WRITE_REG(hw, IGC_SRWR, eewr); 574 575 for (k = 0; k < attempts; k++) { 576 if (IGC_NVM_RW_REG_DONE & 577 IGC_READ_REG(hw, IGC_SRWR)) { 578 ret_val = IGC_SUCCESS; 579 break; 580 } 581 usec_delay(5); 582 } 583 584 if (ret_val != IGC_SUCCESS) { 585 DEBUGOUT("Shadow RAM write EEWR timed out\n"); 586 break; 587 } 588 } 589 590 out: 591 return ret_val; 592 } 593 594 /* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum 595 * @hw: pointer to the HW structure 596 * 597 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 598 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 599 */ 600 s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) 601 { 602 s32 status = IGC_SUCCESS; 603 s32 (*read_op_ptr)(struct igc_hw *, u16, u16, u16 *); 604 605 DEBUGFUNC("igc_validate_nvm_checksum_i225"); 606 607 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 608 /* Replace the read function with semaphore grabbing with 609 * the one that skips this for a while. 610 * We have semaphore taken already here. 611 */ 612 read_op_ptr = hw->nvm.ops.read; 613 hw->nvm.ops.read = igc_read_nvm_eerd; 614 615 status = igc_validate_nvm_checksum_generic(hw); 616 617 /* Revert original read operation. */ 618 hw->nvm.ops.read = read_op_ptr; 619 620 hw->nvm.ops.release(hw); 621 } else { 622 status = IGC_ERR_SWFW_SYNC; 623 } 624 625 return status; 626 } 627 628 /* igc_update_nvm_checksum_i225 - Update EEPROM checksum 629 * @hw: pointer to the HW structure 630 * 631 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 632 * up to the checksum. Then calculates the EEPROM checksum and writes the 633 * value to the EEPROM. Next commit EEPROM data onto the Flash. 634 */ 635 s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) 636 { 637 s32 ret_val; 638 u16 checksum = 0; 639 u16 i, nvm_data; 640 641 DEBUGFUNC("igc_update_nvm_checksum_i225"); 642 643 /* Read the first word from the EEPROM. If this times out or fails, do 644 * not continue or we could be in for a very long wait while every 645 * EEPROM read fails 646 */ 647 ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); 648 if (ret_val != IGC_SUCCESS) { 649 DEBUGOUT("EEPROM read failed\n"); 650 goto out; 651 } 652 653 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 654 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 655 * because we do not want to take the synchronization 656 * semaphores twice here. 657 */ 658 659 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 660 ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); 661 if (ret_val) { 662 hw->nvm.ops.release(hw); 663 DEBUGOUT("NVM Read Error while updating\n"); 664 DEBUGOUT("checksum.\n"); 665 goto out; 666 } 667 checksum += nvm_data; 668 } 669 checksum = (u16)NVM_SUM - checksum; 670 ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 671 &checksum); 672 if (ret_val != IGC_SUCCESS) { 673 hw->nvm.ops.release(hw); 674 DEBUGOUT("NVM Write Error while updating checksum.\n"); 675 goto out; 676 } 677 678 hw->nvm.ops.release(hw); 679 680 ret_val = igc_update_flash_i225(hw); 681 } else { 682 ret_val = IGC_ERR_SWFW_SYNC; 683 } 684 out: 685 return ret_val; 686 } 687 688 /* igc_get_flash_presence_i225 - Check if flash device is detected. 689 * @hw: pointer to the HW structure 690 */ 691 bool igc_get_flash_presence_i225(struct igc_hw *hw) 692 { 693 u32 eec = 0; 694 bool ret_val = false; 695 696 DEBUGFUNC("igc_get_flash_presence_i225"); 697 698 eec = IGC_READ_REG(hw, IGC_EECD); 699 700 if (eec & IGC_EECD_FLASH_DETECTED_I225) 701 ret_val = true; 702 703 return ret_val; 704 } 705 706 /* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst 707 * Counter in FLSWCNT register. 708 * 709 * @hw: pointer to the HW structure 710 * @burst_counter: size in bytes of the Flash burst to read or write 711 */ 712 s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw, 713 u32 burst_counter) 714 { 715 s32 ret_val = IGC_SUCCESS; 716 717 DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225"); 718 719 /* Validate input data */ 720 if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) { 721 /* Write FLSWCNT - burst counter */ 722 IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter); 723 } else { 724 ret_val = IGC_ERR_INVALID_ARGUMENT; 725 } 726 727 return ret_val; 728 } 729 730 /* igc_write_erase_flash_command_i225 - write/erase to a sector 731 * region on a given address. 732 * 733 * @hw: pointer to the HW structure 734 * @opcode: opcode to be used for the write command 735 * @address: the offset to write into the FLASH image 736 */ 737 s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode, 738 u32 address) 739 { 740 u32 flswctl = 0; 741 s32 timeout = IGC_NVM_GRANT_ATTEMPTS; 742 s32 ret_val = IGC_SUCCESS; 743 744 DEBUGFUNC("igc_write_erase_flash_command_i225"); 745 746 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 747 /* Polling done bit on FLSWCTL register */ 748 while (timeout) { 749 if (flswctl & IGC_FLSWCTL_DONE) 750 break; 751 usec_delay(5); 752 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 753 timeout--; 754 } 755 756 if (!timeout) { 757 DEBUGOUT("Flash transaction was not done\n"); 758 return -IGC_ERR_NVM; 759 } 760 761 /* Build and issue command on FLSWCTL register */ 762 flswctl = address | opcode; 763 IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl); 764 765 /* Check if issued command is valid on FLSWCTL register */ 766 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 767 if (!(flswctl & IGC_FLSWCTL_CMDV)) { 768 DEBUGOUT("Write flash command failed\n"); 769 ret_val = IGC_ERR_INVALID_ARGUMENT; 770 } 771 772 return ret_val; 773 } 774 775 /* igc_update_flash_i225 - Commit EEPROM to the flash 776 * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC 777 * register makes the FW load the internal shadow RAM into the flash. 778 * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0 779 * then FW is not active so the SW is responsible shadow RAM dump. 780 * 781 * @hw: pointer to the HW structure 782 */ 783 s32 igc_update_flash_i225(struct igc_hw *hw) 784 { 785 u16 current_offset_data = 0; 786 u32 block_sw_protect = 1; 787 u16 base_address = 0x0; 788 u32 i, fw_valid_bit; 789 u16 current_offset; 790 s32 ret_val = 0; 791 u32 flup; 792 793 DEBUGFUNC("igc_update_flash_i225"); 794 795 block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) & 796 IGC_FLSECU_BLK_SW_ACCESS_I225; 797 fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) & 798 IGC_FWSM_FW_VALID_I225; 799 if (fw_valid_bit) { 800 ret_val = igc_pool_flash_update_done_i225(hw); 801 if (ret_val == -IGC_ERR_NVM) { 802 DEBUGOUT("Flash update time out\n"); 803 goto out; 804 } 805 806 flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225; 807 IGC_WRITE_REG(hw, IGC_EECD, flup); 808 809 ret_val = igc_pool_flash_update_done_i225(hw); 810 if (ret_val == IGC_SUCCESS) 811 DEBUGOUT("Flash update complete\n"); 812 else 813 DEBUGOUT("Flash update time out\n"); 814 } else if (!block_sw_protect) { 815 /* FW is not active and security protection is disabled. 816 * therefore, SW is in charge of shadow RAM dump. 817 * Check which sector is valid. if sector 0 is valid, 818 * base address remains 0x0. otherwise, sector 1 is 819 * valid and it's base address is 0x1000 820 */ 821 if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225) 822 base_address = 0x1000; 823 824 /* Valid sector erase */ 825 ret_val = igc_write_erase_flash_command_i225(hw, 826 IGC_I225_ERASE_CMD_OPCODE, 827 base_address); 828 if (!ret_val) { 829 DEBUGOUT("Sector erase failed\n"); 830 goto out; 831 } 832 833 current_offset = base_address; 834 835 /* Write */ 836 for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) { 837 /* Set burst write length */ 838 ret_val = igc_set_flsw_flash_burst_counter_i225(hw, 839 0x2); 840 if (ret_val != IGC_SUCCESS) 841 break; 842 843 /* Set address and opcode */ 844 ret_val = igc_write_erase_flash_command_i225(hw, 845 IGC_I225_WRITE_CMD_OPCODE, 846 2 * current_offset); 847 if (ret_val != IGC_SUCCESS) 848 break; 849 850 ret_val = igc_read_nvm_eerd(hw, current_offset, 851 1, ¤t_offset_data); 852 if (ret_val) { 853 DEBUGOUT("Failed to read from EEPROM\n"); 854 goto out; 855 } 856 857 /* Write CurrentOffseData to FLSWDATA register */ 858 IGC_WRITE_REG(hw, IGC_I225_FLSWDATA, 859 current_offset_data); 860 current_offset++; 861 862 /* Wait till operation has finished */ 863 ret_val = igc_poll_eerd_eewr_done(hw, 864 IGC_NVM_POLL_READ); 865 if (ret_val) 866 break; 867 868 usec_delay(1000); 869 } 870 } 871 out: 872 return ret_val; 873 } 874 875 /* igc_pool_flash_update_done_i225 - Pool FLUDONE status. 876 * @hw: pointer to the HW structure 877 */ 878 s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) 879 { 880 s32 ret_val = -IGC_ERR_NVM; 881 u32 i, reg; 882 883 DEBUGFUNC("igc_pool_flash_update_done_i225"); 884 885 for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { 886 reg = IGC_READ_REG(hw, IGC_EECD); 887 if (reg & IGC_EECD_FLUDONE_I225) { 888 ret_val = IGC_SUCCESS; 889 break; 890 } 891 usec_delay(5); 892 } 893 894 return ret_val; 895 } 896 897 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds. 898 * @hw: pointer to the HW structure 899 * @link: bool indicating link status 900 * 901 * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC 902 * settings, otherwise specify that there is no LTR requirement. 903 */ 904 static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) 905 { 906 u16 speed, duplex; 907 u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; 908 s32 size; 909 910 DEBUGFUNC("igc_set_ltr_i225"); 911 912 /* If we do not have link, LTR thresholds are zero. */ 913 if (link) { 914 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 915 916 /* Check if using copper interface with EEE enabled or if the 917 * link speed is 10 Mbps. 918 */ 919 if ((hw->phy.media_type == igc_media_type_copper) && 920 !(hw->dev_spec._i225.eee_disable) && 921 (speed != SPEED_10)) { 922 /* EEE enabled, so send LTRMAX threshold. */ 923 ltrc = IGC_READ_REG(hw, IGC_LTRC) | 924 IGC_LTRC_EEEMS_EN; 925 IGC_WRITE_REG(hw, IGC_LTRC, ltrc); 926 927 /* Calculate tw_system (nsec). */ 928 if (speed == SPEED_100) { 929 tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) & 930 IGC_TW_SYSTEM_100_MASK) >> 931 IGC_TW_SYSTEM_100_SHIFT) * 500; 932 } else { 933 tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) & 934 IGC_TW_SYSTEM_1000_MASK) * 500; 935 } 936 } else { 937 tw_system = 0; 938 } 939 940 /* Get the Rx packet buffer size. */ 941 size = IGC_READ_REG(hw, IGC_RXPBS) & 942 IGC_RXPBS_SIZE_I225_MASK; 943 944 /* Calculations vary based on DMAC settings. */ 945 if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) { 946 size -= (IGC_READ_REG(hw, IGC_DMACR) & 947 IGC_DMACR_DMACTHR_MASK) >> 948 IGC_DMACR_DMACTHR_SHIFT; 949 /* Convert size to bits. */ 950 size *= 1024 * 8; 951 } else { 952 /* Convert size to bytes, subtract the MTU, and then 953 * convert the size to bits. 954 */ 955 size *= 1024; 956 size -= hw->dev_spec._i225.mtu; 957 size *= 8; 958 } 959 960 if (size < 0) { 961 DEBUGOUT1("Invalid effective Rx buffer size %d\n", 962 size); 963 return -IGC_ERR_CONFIG; 964 } 965 966 /* Calculate the thresholds. Since speed is in Mbps, simplify 967 * the calculation by multiplying size/speed by 1000 for result 968 * to be in nsec before dividing by the scale in nsec. Set the 969 * scale such that the LTR threshold fits in the register. 970 */ 971 ltr_min = (1000 * size) / speed; 972 ltr_max = ltr_min + tw_system; 973 scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : 974 IGC_LTRMINV_SCALE_32768; 975 scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : 976 IGC_LTRMAXV_SCALE_32768; 977 ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; 978 ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; 979 980 /* Only write the LTR thresholds if they differ from before. */ 981 ltrv = IGC_READ_REG(hw, IGC_LTRMINV); 982 if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { 983 ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | 984 (scale_min << IGC_LTRMINV_SCALE_SHIFT); 985 IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv); 986 } 987 988 ltrv = IGC_READ_REG(hw, IGC_LTRMAXV); 989 if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { 990 ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | 991 (scale_min << IGC_LTRMAXV_SCALE_SHIFT); 992 IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv); 993 } 994 } 995 996 return IGC_SUCCESS; 997 } 998 999 /* igc_check_for_link_i225 - Check for link 1000 * @hw: pointer to the HW structure 1001 * 1002 * Checks to see of the link status of the hardware has changed. If a 1003 * change in link status has been detected, then we read the PHY registers 1004 * to get the current speed/duplex if link exists. 1005 */ 1006 s32 igc_check_for_link_i225(struct igc_hw *hw) 1007 { 1008 struct igc_mac_info *mac = &hw->mac; 1009 s32 ret_val; 1010 bool link = false; 1011 1012 DEBUGFUNC("igc_check_for_link_i225"); 1013 1014 /* We only want to go out to the PHY registers to see if 1015 * Auto-Neg has completed and/or if our link status has 1016 * changed. The get_link_status flag is set upon receiving 1017 * a Link Status Change or Rx Sequence Error interrupt. 1018 */ 1019 if (!mac->get_link_status) { 1020 ret_val = IGC_SUCCESS; 1021 goto out; 1022 } 1023 1024 /* First we want to see if the MII Status Register reports 1025 * link. If so, then we want to get the current speed/duplex 1026 * of the PHY. 1027 */ 1028 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); 1029 if (ret_val) 1030 goto out; 1031 1032 if (!link) 1033 goto out; /* No link detected */ 1034 1035 /* First we want to see if the MII Status Register reports 1036 * link. If so, then we want to get the current speed/duplex 1037 * of the PHY. 1038 */ 1039 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); 1040 if (ret_val) 1041 goto out; 1042 1043 if (!link) 1044 goto out; /* No link detected */ 1045 1046 mac->get_link_status = false; 1047 1048 /* Check if there was DownShift, must be checked 1049 * immediately after link-up 1050 */ 1051 igc_check_downshift_generic(hw); 1052 1053 /* If we are forcing speed/duplex, then we simply return since 1054 * we have already determined whether we have link or not. 1055 */ 1056 if (!mac->autoneg) 1057 goto out; 1058 1059 /* Auto-Neg is enabled. Auto Speed Detection takes care 1060 * of MAC speed/duplex configuration. So we only need to 1061 * configure Collision Distance in the MAC. 1062 */ 1063 mac->ops.config_collision_dist(hw); 1064 1065 /* Configure Flow Control now that Auto-Neg has completed. 1066 * First, we need to restore the desired flow control 1067 * settings because we may have had to re-autoneg with a 1068 * different link partner. 1069 */ 1070 ret_val = igc_config_fc_after_link_up_generic(hw); 1071 if (ret_val) 1072 DEBUGOUT("Error configuring flow control\n"); 1073 out: 1074 /* Now that we are aware of our link settings, we can set the LTR 1075 * thresholds. 1076 */ 1077 ret_val = igc_set_ltr_i225(hw, link); 1078 1079 return ret_val; 1080 } 1081 1082 /* igc_init_function_pointers_i225 - Init func ptrs. 1083 * @hw: pointer to the HW structure 1084 * 1085 * Called to initialize all function pointers and parameters. 1086 */ 1087 void igc_init_function_pointers_i225(struct igc_hw *hw) 1088 { 1089 igc_init_mac_ops_generic(hw); 1090 igc_init_phy_ops_generic(hw); 1091 igc_init_nvm_ops_generic(hw); 1092 hw->mac.ops.init_params = igc_init_mac_params_i225; 1093 hw->nvm.ops.init_params = igc_init_nvm_params_i225; 1094 hw->phy.ops.init_params = igc_init_phy_params_i225; 1095 } 1096 1097 /* igc_init_hw_i225 - Init hw for I225 1098 * @hw: pointer to the HW structure 1099 * 1100 * Called to initialize hw for i225 hw family. 1101 */ 1102 s32 igc_init_hw_i225(struct igc_hw *hw) 1103 { 1104 s32 ret_val; 1105 1106 DEBUGFUNC("igc_init_hw_i225"); 1107 1108 ret_val = igc_init_hw_base(hw); 1109 return ret_val; 1110 } 1111 1112 /* 1113 * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state 1114 * @hw: pointer to the HW structure 1115 * @active: true to enable LPLU, false to disable 1116 * 1117 * Note: since I225 does not actually support LPLU, this function 1118 * simply enables/disables 1G and 2.5G speeds in D0. 1119 */ 1120 s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active) 1121 { 1122 u32 data; 1123 1124 DEBUGFUNC("igc_set_d0_lplu_state_i225"); 1125 1126 data = IGC_READ_REG(hw, IGC_I225_PHPM); 1127 1128 if (active) { 1129 data |= IGC_I225_PHPM_DIS_1000; 1130 data |= IGC_I225_PHPM_DIS_2500; 1131 } else { 1132 data &= ~IGC_I225_PHPM_DIS_1000; 1133 data &= ~IGC_I225_PHPM_DIS_2500; 1134 } 1135 1136 IGC_WRITE_REG(hw, IGC_I225_PHPM, data); 1137 return IGC_SUCCESS; 1138 } 1139 1140 /* 1141 * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state 1142 * @hw: pointer to the HW structure 1143 * @active: true to enable LPLU, false to disable 1144 * 1145 * Note: since I225 does not actually support LPLU, this function 1146 * simply enables/disables 100M, 1G and 2.5G speeds in D3. 1147 */ 1148 s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active) 1149 { 1150 u32 data; 1151 1152 DEBUGFUNC("igc_set_d3_lplu_state_i225"); 1153 1154 data = IGC_READ_REG(hw, IGC_I225_PHPM); 1155 1156 if (active) { 1157 data |= IGC_I225_PHPM_DIS_100_D3; 1158 data |= IGC_I225_PHPM_DIS_1000_D3; 1159 data |= IGC_I225_PHPM_DIS_2500_D3; 1160 } else { 1161 data &= ~IGC_I225_PHPM_DIS_100_D3; 1162 data &= ~IGC_I225_PHPM_DIS_1000_D3; 1163 data &= ~IGC_I225_PHPM_DIS_2500_D3; 1164 } 1165 1166 IGC_WRITE_REG(hw, IGC_I225_PHPM, data); 1167 return IGC_SUCCESS; 1168 } 1169 1170 /** 1171 * igc_set_eee_i225 - Enable/disable EEE support 1172 * @hw: pointer to the HW structure 1173 * @adv2p5G: boolean flag enabling 2.5G EEE advertisement 1174 * @adv1G: boolean flag enabling 1G EEE advertisement 1175 * @adv100M: boolean flag enabling 100M EEE advertisement 1176 * 1177 * Enable/disable EEE based on setting in dev_spec structure. 1178 * 1179 **/ 1180 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, 1181 bool adv100M) 1182 { 1183 u32 ipcnfg, eeer; 1184 1185 DEBUGFUNC("igc_set_eee_i225"); 1186 1187 if (hw->mac.type != igc_i225 || 1188 hw->phy.media_type != igc_media_type_copper) 1189 goto out; 1190 ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG); 1191 eeer = IGC_READ_REG(hw, IGC_EEER); 1192 1193 /* enable or disable per user setting */ 1194 if (!(hw->dev_spec._i225.eee_disable)) { 1195 u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU); 1196 1197 if (adv100M) 1198 ipcnfg |= IGC_IPCNFG_EEE_100M_AN; 1199 else 1200 ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; 1201 1202 if (adv1G) 1203 ipcnfg |= IGC_IPCNFG_EEE_1G_AN; 1204 else 1205 ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; 1206 1207 if (adv2p5G) 1208 ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; 1209 else 1210 ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; 1211 1212 eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 1213 IGC_EEER_LPI_FC); 1214 1215 /* This bit should not be set in normal operation. */ 1216 if (eee_su & IGC_EEE_SU_LPI_CLK_STP) 1217 DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); 1218 } else { 1219 ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | 1220 IGC_IPCNFG_EEE_100M_AN); 1221 eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 1222 IGC_EEER_LPI_FC); 1223 } 1224 IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg); 1225 IGC_WRITE_REG(hw, IGC_EEER, eeer); 1226 IGC_READ_REG(hw, IGC_IPCNFG); 1227 IGC_READ_REG(hw, IGC_EEER); 1228 out: 1229 1230 return IGC_SUCCESS; 1231 } 1232 1233