1 /*- 2 * Copyright 2021 Intel Corp 3 * Copyright 2021 Rubicon Communications, LLC (Netgate) 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <sys/cdefs.h> 8 __FBSDID("$FreeBSD$"); 9 10 #include "igc_api.h" 11 12 static s32 igc_init_nvm_params_i225(struct igc_hw *hw); 13 static s32 igc_init_mac_params_i225(struct igc_hw *hw); 14 static s32 igc_init_phy_params_i225(struct igc_hw *hw); 15 static s32 igc_reset_hw_i225(struct igc_hw *hw); 16 static s32 igc_acquire_nvm_i225(struct igc_hw *hw); 17 static void igc_release_nvm_i225(struct igc_hw *hw); 18 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw); 19 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 20 u16 *data); 21 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw); 22 23 /** 24 * igc_init_nvm_params_i225 - Init NVM func ptrs. 25 * @hw: pointer to the HW structure 26 **/ 27 static s32 igc_init_nvm_params_i225(struct igc_hw *hw) 28 { 29 struct igc_nvm_info *nvm = &hw->nvm; 30 u32 eecd = IGC_READ_REG(hw, IGC_EECD); 31 u16 size; 32 33 DEBUGFUNC("igc_init_nvm_params_i225"); 34 35 size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> 36 IGC_EECD_SIZE_EX_SHIFT); 37 /* 38 * Added to a constant, "size" becomes the left-shift value 39 * for setting word_size. 40 */ 41 size += NVM_WORD_SIZE_BASE_SHIFT; 42 43 /* Just in case size is out of range, cap it to the largest 44 * EEPROM size supported 45 */ 46 if (size > 15) 47 size = 15; 48 49 nvm->word_size = 1 << size; 50 nvm->opcode_bits = 8; 51 nvm->delay_usec = 1; 52 nvm->type = igc_nvm_eeprom_spi; 53 54 55 nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; 56 nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? 57 16 : 8; 58 59 if (nvm->word_size == (1 << 15)) 60 nvm->page_size = 128; 61 62 nvm->ops.acquire = igc_acquire_nvm_i225; 63 nvm->ops.release = igc_release_nvm_i225; 64 if (igc_get_flash_presence_i225(hw)) { 65 hw->nvm.type = igc_nvm_flash_hw; 66 nvm->ops.read = igc_read_nvm_srrd_i225; 67 nvm->ops.write = igc_write_nvm_srwr_i225; 68 nvm->ops.validate = igc_validate_nvm_checksum_i225; 69 nvm->ops.update = igc_update_nvm_checksum_i225; 70 } else { 71 hw->nvm.type = igc_nvm_invm; 72 nvm->ops.write = igc_null_write_nvm; 73 nvm->ops.validate = igc_null_ops_generic; 74 nvm->ops.update = igc_null_ops_generic; 75 } 76 77 return IGC_SUCCESS; 78 } 79 80 /** 81 * igc_init_mac_params_i225 - Init MAC func ptrs. 82 * @hw: pointer to the HW structure 83 **/ 84 static s32 igc_init_mac_params_i225(struct igc_hw *hw) 85 { 86 struct igc_mac_info *mac = &hw->mac; 87 struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225; 88 89 DEBUGFUNC("igc_init_mac_params_i225"); 90 91 /* Initialize function pointer */ 92 igc_init_mac_ops_generic(hw); 93 94 /* Set media type */ 95 hw->phy.media_type = igc_media_type_copper; 96 /* Set mta register count */ 97 mac->mta_reg_count = 128; 98 /* Set rar entry count */ 99 mac->rar_entry_count = IGC_RAR_ENTRIES_BASE; 100 101 /* reset */ 102 mac->ops.reset_hw = igc_reset_hw_i225; 103 /* hw initialization */ 104 mac->ops.init_hw = igc_init_hw_i225; 105 /* link setup */ 106 mac->ops.setup_link = igc_setup_link_generic; 107 /* check for link */ 108 mac->ops.check_for_link = igc_check_for_link_i225; 109 /* link info */ 110 mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic; 111 /* acquire SW_FW sync */ 112 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; 113 /* release SW_FW sync */ 114 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; 115 116 /* Allow a single clear of the SW semaphore on I225 */ 117 dev_spec->clear_semaphore_once = true; 118 mac->ops.setup_physical_interface = igc_setup_copper_link_i225; 119 120 /* Set if part includes ASF firmware */ 121 mac->asf_firmware_present = true; 122 123 /* multicast address update */ 124 mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic; 125 126 mac->ops.write_vfta = igc_write_vfta_generic; 127 128 return IGC_SUCCESS; 129 } 130 131 /** 132 * igc_init_phy_params_i225 - Init PHY func ptrs. 133 * @hw: pointer to the HW structure 134 **/ 135 static s32 igc_init_phy_params_i225(struct igc_hw *hw) 136 { 137 struct igc_phy_info *phy = &hw->phy; 138 s32 ret_val = IGC_SUCCESS; 139 140 DEBUGFUNC("igc_init_phy_params_i225"); 141 142 143 if (hw->phy.media_type != igc_media_type_copper) { 144 phy->type = igc_phy_none; 145 goto out; 146 } 147 148 phy->ops.power_up = igc_power_up_phy_copper; 149 phy->ops.power_down = igc_power_down_phy_copper_base; 150 151 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; 152 153 phy->reset_delay_us = 100; 154 155 phy->ops.acquire = igc_acquire_phy_base; 156 phy->ops.check_reset_block = igc_check_reset_block_generic; 157 phy->ops.release = igc_release_phy_base; 158 phy->ops.reset = igc_phy_hw_reset_generic; 159 phy->ops.read_reg = igc_read_phy_reg_gpy; 160 phy->ops.write_reg = igc_write_phy_reg_gpy; 161 162 /* Make sure the PHY is in a good state. Several people have reported 163 * firmware leaving the PHY's page select register set to something 164 * other than the default of zero, which causes the PHY ID read to 165 * access something other than the intended register. 166 */ 167 ret_val = hw->phy.ops.reset(hw); 168 if (ret_val) 169 goto out; 170 171 ret_val = igc_get_phy_id(hw); 172 /* Verify phy id and set remaining function pointers */ 173 switch (phy->id) { 174 case I225_I_PHY_ID: 175 phy->type = igc_phy_i225; 176 phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225; 177 phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225; 178 /* TODO - complete with GPY PHY information */ 179 break; 180 default: 181 ret_val = -IGC_ERR_PHY; 182 goto out; 183 } 184 185 out: 186 return ret_val; 187 } 188 189 /** 190 * igc_reset_hw_i225 - Reset hardware 191 * @hw: pointer to the HW structure 192 * 193 * This resets the hardware into a known state. 194 **/ 195 static s32 igc_reset_hw_i225(struct igc_hw *hw) 196 { 197 u32 ctrl; 198 s32 ret_val; 199 200 DEBUGFUNC("igc_reset_hw_i225"); 201 202 /* 203 * Prevent the PCI-E bus from sticking if there is no TLP connection 204 * on the last TLP read/write transaction when MAC is reset. 205 */ 206 ret_val = igc_disable_pcie_master_generic(hw); 207 if (ret_val) 208 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 209 210 DEBUGOUT("Masking off all interrupts\n"); 211 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 212 213 IGC_WRITE_REG(hw, IGC_RCTL, 0); 214 IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP); 215 IGC_WRITE_FLUSH(hw); 216 217 msec_delay(10); 218 219 ctrl = IGC_READ_REG(hw, IGC_CTRL); 220 221 DEBUGOUT("Issuing a global reset to MAC\n"); 222 IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); 223 224 ret_val = igc_get_auto_rd_done_generic(hw); 225 if (ret_val) { 226 /* 227 * When auto config read does not complete, do not 228 * return with an error. This can happen in situations 229 * where there is no eeprom and prevents getting link. 230 */ 231 DEBUGOUT("Auto Read Done did not complete\n"); 232 } 233 234 /* Clear any pending interrupt events. */ 235 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 236 IGC_READ_REG(hw, IGC_ICR); 237 238 /* Install any alternate MAC address into RAR0 */ 239 ret_val = igc_check_alt_mac_addr_generic(hw); 240 241 return ret_val; 242 } 243 244 /* igc_acquire_nvm_i225 - Request for access to EEPROM 245 * @hw: pointer to the HW structure 246 * 247 * Acquire the necessary semaphores for exclusive access to the EEPROM. 248 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 249 * Return successful if access grant bit set, else clear the request for 250 * EEPROM access and return -IGC_ERR_NVM (-1). 251 */ 252 static s32 igc_acquire_nvm_i225(struct igc_hw *hw) 253 { 254 s32 ret_val; 255 256 DEBUGFUNC("igc_acquire_nvm_i225"); 257 258 ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 259 260 return ret_val; 261 } 262 263 /* igc_release_nvm_i225 - Release exclusive access to EEPROM 264 * @hw: pointer to the HW structure 265 * 266 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 267 * then release the semaphores acquired. 268 */ 269 static void igc_release_nvm_i225(struct igc_hw *hw) 270 { 271 DEBUGFUNC("igc_release_nvm_i225"); 272 273 igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); 274 } 275 276 /* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore 277 * @hw: pointer to the HW structure 278 * @mask: specifies which semaphore to acquire 279 * 280 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 281 * will also specify which port we're acquiring the lock for. 282 */ 283 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) 284 { 285 u32 swfw_sync; 286 u32 swmask = mask; 287 u32 fwmask = mask << 16; 288 s32 ret_val = IGC_SUCCESS; 289 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 290 291 DEBUGFUNC("igc_acquire_swfw_sync_i225"); 292 293 while (i < timeout) { 294 if (igc_get_hw_semaphore_i225(hw)) { 295 ret_val = -IGC_ERR_SWFW_SYNC; 296 goto out; 297 } 298 299 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); 300 if (!(swfw_sync & (fwmask | swmask))) 301 break; 302 303 /* Firmware currently using resource (fwmask) 304 * or other software thread using resource (swmask) 305 */ 306 igc_put_hw_semaphore_generic(hw); 307 msec_delay_irq(5); 308 i++; 309 } 310 311 if (i == timeout) { 312 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 313 ret_val = -IGC_ERR_SWFW_SYNC; 314 goto out; 315 } 316 317 swfw_sync |= swmask; 318 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); 319 320 igc_put_hw_semaphore_generic(hw); 321 322 out: 323 return ret_val; 324 } 325 326 /* igc_release_swfw_sync_i225 - Release SW/FW semaphore 327 * @hw: pointer to the HW structure 328 * @mask: specifies which semaphore to acquire 329 * 330 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 331 * will also specify which port we're releasing the lock for. 332 */ 333 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) 334 { 335 u32 swfw_sync; 336 337 DEBUGFUNC("igc_release_swfw_sync_i225"); 338 339 while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS) 340 ; /* Empty */ 341 342 swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); 343 swfw_sync &= ~mask; 344 IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); 345 346 igc_put_hw_semaphore_generic(hw); 347 } 348 349 /* 350 * igc_setup_copper_link_i225 - Configure copper link settings 351 * @hw: pointer to the HW structure 352 * 353 * Configures the link for auto-neg or forced speed and duplex. Then we check 354 * for link, once link is established calls to configure collision distance 355 * and flow control are called. 356 */ 357 s32 igc_setup_copper_link_i225(struct igc_hw *hw) 358 { 359 u32 phpm_reg; 360 s32 ret_val; 361 u32 ctrl; 362 363 DEBUGFUNC("igc_setup_copper_link_i225"); 364 365 ctrl = IGC_READ_REG(hw, IGC_CTRL); 366 ctrl |= IGC_CTRL_SLU; 367 ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); 368 IGC_WRITE_REG(hw, IGC_CTRL, ctrl); 369 370 phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM); 371 phpm_reg &= ~IGC_I225_PHPM_GO_LINKD; 372 IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg); 373 374 ret_val = igc_setup_copper_link_generic(hw); 375 376 return ret_val; 377 } 378 379 /* igc_get_hw_semaphore_i225 - Acquire hardware semaphore 380 * @hw: pointer to the HW structure 381 * 382 * Acquire the HW semaphore to access the PHY or NVM 383 */ 384 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) 385 { 386 u32 swsm; 387 s32 timeout = hw->nvm.word_size + 1; 388 s32 i = 0; 389 390 DEBUGFUNC("igc_get_hw_semaphore_i225"); 391 392 /* Get the SW semaphore */ 393 while (i < timeout) { 394 swsm = IGC_READ_REG(hw, IGC_SWSM); 395 if (!(swsm & IGC_SWSM_SMBI)) 396 break; 397 398 usec_delay(50); 399 i++; 400 } 401 402 if (i == timeout) { 403 /* In rare circumstances, the SW semaphore may already be held 404 * unintentionally. Clear the semaphore once before giving up. 405 */ 406 if (hw->dev_spec._i225.clear_semaphore_once) { 407 hw->dev_spec._i225.clear_semaphore_once = false; 408 igc_put_hw_semaphore_generic(hw); 409 for (i = 0; i < timeout; i++) { 410 swsm = IGC_READ_REG(hw, IGC_SWSM); 411 if (!(swsm & IGC_SWSM_SMBI)) 412 break; 413 414 usec_delay(50); 415 } 416 } 417 418 /* If we do not have the semaphore here, we have to give up. */ 419 if (i == timeout) { 420 DEBUGOUT("Driver can't access device -\n"); 421 DEBUGOUT("SMBI bit is set.\n"); 422 return -IGC_ERR_NVM; 423 } 424 } 425 426 /* Get the FW semaphore. */ 427 for (i = 0; i < timeout; i++) { 428 swsm = IGC_READ_REG(hw, IGC_SWSM); 429 IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI); 430 431 /* Semaphore acquired if bit latched */ 432 if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI) 433 break; 434 435 usec_delay(50); 436 } 437 438 if (i == timeout) { 439 /* Release semaphores */ 440 igc_put_hw_semaphore_generic(hw); 441 DEBUGOUT("Driver can't access the NVM\n"); 442 return -IGC_ERR_NVM; 443 } 444 445 return IGC_SUCCESS; 446 } 447 448 /* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register 449 * @hw: pointer to the HW structure 450 * @offset: offset of word in the Shadow Ram to read 451 * @words: number of words to read 452 * @data: word read from the Shadow Ram 453 * 454 * Reads a 16 bit word from the Shadow Ram using the EERD register. 455 * Uses necessary synchronization semaphores. 456 */ 457 s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, 458 u16 *data) 459 { 460 s32 status = IGC_SUCCESS; 461 u16 i, count; 462 463 DEBUGFUNC("igc_read_nvm_srrd_i225"); 464 465 /* We cannot hold synchronization semaphores for too long, 466 * because of forceful takeover procedure. However it is more efficient 467 * to read in bursts than synchronizing access for each word. 468 */ 469 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 470 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 471 IGC_EERD_EEWR_MAX_COUNT : (words - i); 472 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 473 status = igc_read_nvm_eerd(hw, offset, count, 474 data + i); 475 hw->nvm.ops.release(hw); 476 } else { 477 status = IGC_ERR_SWFW_SYNC; 478 } 479 480 if (status != IGC_SUCCESS) 481 break; 482 } 483 484 return status; 485 } 486 487 /* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR 488 * @hw: pointer to the HW structure 489 * @offset: offset within the Shadow RAM to be written to 490 * @words: number of words to write 491 * @data: 16 bit word(s) to be written to the Shadow RAM 492 * 493 * Writes data to Shadow RAM at offset using EEWR register. 494 * 495 * If igc_update_nvm_checksum is not called after this function , the 496 * data will not be committed to FLASH and also Shadow RAM will most likely 497 * contain an invalid checksum. 498 * 499 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 500 * partially written. 501 */ 502 s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, 503 u16 *data) 504 { 505 s32 status = IGC_SUCCESS; 506 u16 i, count; 507 508 DEBUGFUNC("igc_write_nvm_srwr_i225"); 509 510 /* We cannot hold synchronization semaphores for too long, 511 * because of forceful takeover procedure. However it is more efficient 512 * to write in bursts than synchronizing access for each word. 513 */ 514 for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { 515 count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? 516 IGC_EERD_EEWR_MAX_COUNT : (words - i); 517 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 518 status = __igc_write_nvm_srwr(hw, offset, count, 519 data + i); 520 hw->nvm.ops.release(hw); 521 } else { 522 status = IGC_ERR_SWFW_SYNC; 523 } 524 525 if (status != IGC_SUCCESS) 526 break; 527 } 528 529 return status; 530 } 531 532 /* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR 533 * @hw: pointer to the HW structure 534 * @offset: offset within the Shadow Ram to be written to 535 * @words: number of words to write 536 * @data: 16 bit word(s) to be written to the Shadow Ram 537 * 538 * Writes data to Shadow Ram at offset using EEWR register. 539 * 540 * If igc_update_nvm_checksum is not called after this function , the 541 * Shadow Ram will most likely contain an invalid checksum. 542 */ 543 static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, 544 u16 *data) 545 { 546 struct igc_nvm_info *nvm = &hw->nvm; 547 u32 i, k, eewr = 0; 548 u32 attempts = 100000; 549 s32 ret_val = IGC_SUCCESS; 550 551 DEBUGFUNC("__igc_write_nvm_srwr"); 552 553 /* A check for invalid values: offset too large, too many words, 554 * too many words for the offset, and not enough words. 555 */ 556 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 557 (words == 0)) { 558 DEBUGOUT("nvm parameter(s) out of bounds\n"); 559 ret_val = -IGC_ERR_NVM; 560 goto out; 561 } 562 563 for (i = 0; i < words; i++) { 564 eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | 565 (data[i] << IGC_NVM_RW_REG_DATA) | 566 IGC_NVM_RW_REG_START; 567 568 IGC_WRITE_REG(hw, IGC_SRWR, eewr); 569 570 for (k = 0; k < attempts; k++) { 571 if (IGC_NVM_RW_REG_DONE & 572 IGC_READ_REG(hw, IGC_SRWR)) { 573 ret_val = IGC_SUCCESS; 574 break; 575 } 576 usec_delay(5); 577 } 578 579 if (ret_val != IGC_SUCCESS) { 580 DEBUGOUT("Shadow RAM write EEWR timed out\n"); 581 break; 582 } 583 } 584 585 out: 586 return ret_val; 587 } 588 589 /* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum 590 * @hw: pointer to the HW structure 591 * 592 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 593 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 594 */ 595 s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) 596 { 597 s32 status = IGC_SUCCESS; 598 s32 (*read_op_ptr)(struct igc_hw *, u16, u16, u16 *); 599 600 DEBUGFUNC("igc_validate_nvm_checksum_i225"); 601 602 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 603 /* Replace the read function with semaphore grabbing with 604 * the one that skips this for a while. 605 * We have semaphore taken already here. 606 */ 607 read_op_ptr = hw->nvm.ops.read; 608 hw->nvm.ops.read = igc_read_nvm_eerd; 609 610 status = igc_validate_nvm_checksum_generic(hw); 611 612 /* Revert original read operation. */ 613 hw->nvm.ops.read = read_op_ptr; 614 615 hw->nvm.ops.release(hw); 616 } else { 617 status = IGC_ERR_SWFW_SYNC; 618 } 619 620 return status; 621 } 622 623 /* igc_update_nvm_checksum_i225 - Update EEPROM checksum 624 * @hw: pointer to the HW structure 625 * 626 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 627 * up to the checksum. Then calculates the EEPROM checksum and writes the 628 * value to the EEPROM. Next commit EEPROM data onto the Flash. 629 */ 630 s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) 631 { 632 s32 ret_val; 633 u16 checksum = 0; 634 u16 i, nvm_data; 635 636 DEBUGFUNC("igc_update_nvm_checksum_i225"); 637 638 /* Read the first word from the EEPROM. If this times out or fails, do 639 * not continue or we could be in for a very long wait while every 640 * EEPROM read fails 641 */ 642 ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); 643 if (ret_val != IGC_SUCCESS) { 644 DEBUGOUT("EEPROM read failed\n"); 645 goto out; 646 } 647 648 if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { 649 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 650 * because we do not want to take the synchronization 651 * semaphores twice here. 652 */ 653 654 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 655 ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); 656 if (ret_val) { 657 hw->nvm.ops.release(hw); 658 DEBUGOUT("NVM Read Error while updating\n"); 659 DEBUGOUT("checksum.\n"); 660 goto out; 661 } 662 checksum += nvm_data; 663 } 664 checksum = (u16)NVM_SUM - checksum; 665 ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 666 &checksum); 667 if (ret_val != IGC_SUCCESS) { 668 hw->nvm.ops.release(hw); 669 DEBUGOUT("NVM Write Error while updating checksum.\n"); 670 goto out; 671 } 672 673 hw->nvm.ops.release(hw); 674 675 ret_val = igc_update_flash_i225(hw); 676 } else { 677 ret_val = IGC_ERR_SWFW_SYNC; 678 } 679 out: 680 return ret_val; 681 } 682 683 /* igc_get_flash_presence_i225 - Check if flash device is detected. 684 * @hw: pointer to the HW structure 685 */ 686 bool igc_get_flash_presence_i225(struct igc_hw *hw) 687 { 688 u32 eec = 0; 689 bool ret_val = false; 690 691 DEBUGFUNC("igc_get_flash_presence_i225"); 692 693 eec = IGC_READ_REG(hw, IGC_EECD); 694 695 if (eec & IGC_EECD_FLASH_DETECTED_I225) 696 ret_val = true; 697 698 return ret_val; 699 } 700 701 /* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst 702 * Counter in FLSWCNT register. 703 * 704 * @hw: pointer to the HW structure 705 * @burst_counter: size in bytes of the Flash burst to read or write 706 */ 707 s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw, 708 u32 burst_counter) 709 { 710 s32 ret_val = IGC_SUCCESS; 711 712 DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225"); 713 714 /* Validate input data */ 715 if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) { 716 /* Write FLSWCNT - burst counter */ 717 IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter); 718 } else { 719 ret_val = IGC_ERR_INVALID_ARGUMENT; 720 } 721 722 return ret_val; 723 } 724 725 /* igc_write_erase_flash_command_i225 - write/erase to a sector 726 * region on a given address. 727 * 728 * @hw: pointer to the HW structure 729 * @opcode: opcode to be used for the write command 730 * @address: the offset to write into the FLASH image 731 */ 732 s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode, 733 u32 address) 734 { 735 u32 flswctl = 0; 736 s32 timeout = IGC_NVM_GRANT_ATTEMPTS; 737 s32 ret_val = IGC_SUCCESS; 738 739 DEBUGFUNC("igc_write_erase_flash_command_i225"); 740 741 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 742 /* Polling done bit on FLSWCTL register */ 743 while (timeout) { 744 if (flswctl & IGC_FLSWCTL_DONE) 745 break; 746 usec_delay(5); 747 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 748 timeout--; 749 } 750 751 if (!timeout) { 752 DEBUGOUT("Flash transaction was not done\n"); 753 return -IGC_ERR_NVM; 754 } 755 756 /* Build and issue command on FLSWCTL register */ 757 flswctl = address | opcode; 758 IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl); 759 760 /* Check if issued command is valid on FLSWCTL register */ 761 flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); 762 if (!(flswctl & IGC_FLSWCTL_CMDV)) { 763 DEBUGOUT("Write flash command failed\n"); 764 ret_val = IGC_ERR_INVALID_ARGUMENT; 765 } 766 767 return ret_val; 768 } 769 770 /* igc_update_flash_i225 - Commit EEPROM to the flash 771 * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC 772 * register makes the FW load the internal shadow RAM into the flash. 773 * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0 774 * then FW is not active so the SW is responsible shadow RAM dump. 775 * 776 * @hw: pointer to the HW structure 777 */ 778 s32 igc_update_flash_i225(struct igc_hw *hw) 779 { 780 u16 current_offset_data = 0; 781 u32 block_sw_protect = 1; 782 u16 base_address = 0x0; 783 u32 i, fw_valid_bit; 784 u16 current_offset; 785 s32 ret_val = 0; 786 u32 flup; 787 788 DEBUGFUNC("igc_update_flash_i225"); 789 790 block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) & 791 IGC_FLSECU_BLK_SW_ACCESS_I225; 792 fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) & 793 IGC_FWSM_FW_VALID_I225; 794 if (fw_valid_bit) { 795 ret_val = igc_pool_flash_update_done_i225(hw); 796 if (ret_val == -IGC_ERR_NVM) { 797 DEBUGOUT("Flash update time out\n"); 798 goto out; 799 } 800 801 flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225; 802 IGC_WRITE_REG(hw, IGC_EECD, flup); 803 804 ret_val = igc_pool_flash_update_done_i225(hw); 805 if (ret_val == IGC_SUCCESS) 806 DEBUGOUT("Flash update complete\n"); 807 else 808 DEBUGOUT("Flash update time out\n"); 809 } else if (!block_sw_protect) { 810 /* FW is not active and security protection is disabled. 811 * therefore, SW is in charge of shadow RAM dump. 812 * Check which sector is valid. if sector 0 is valid, 813 * base address remains 0x0. otherwise, sector 1 is 814 * valid and it's base address is 0x1000 815 */ 816 if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225) 817 base_address = 0x1000; 818 819 /* Valid sector erase */ 820 ret_val = igc_write_erase_flash_command_i225(hw, 821 IGC_I225_ERASE_CMD_OPCODE, 822 base_address); 823 if (!ret_val) { 824 DEBUGOUT("Sector erase failed\n"); 825 goto out; 826 } 827 828 current_offset = base_address; 829 830 /* Write */ 831 for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) { 832 /* Set burst write length */ 833 ret_val = igc_set_flsw_flash_burst_counter_i225(hw, 834 0x2); 835 if (ret_val != IGC_SUCCESS) 836 break; 837 838 /* Set address and opcode */ 839 ret_val = igc_write_erase_flash_command_i225(hw, 840 IGC_I225_WRITE_CMD_OPCODE, 841 2 * current_offset); 842 if (ret_val != IGC_SUCCESS) 843 break; 844 845 ret_val = igc_read_nvm_eerd(hw, current_offset, 846 1, ¤t_offset_data); 847 if (ret_val) { 848 DEBUGOUT("Failed to read from EEPROM\n"); 849 goto out; 850 } 851 852 /* Write CurrentOffseData to FLSWDATA register */ 853 IGC_WRITE_REG(hw, IGC_I225_FLSWDATA, 854 current_offset_data); 855 current_offset++; 856 857 /* Wait till operation has finished */ 858 ret_val = igc_poll_eerd_eewr_done(hw, 859 IGC_NVM_POLL_READ); 860 if (ret_val) 861 break; 862 863 usec_delay(1000); 864 } 865 } 866 out: 867 return ret_val; 868 } 869 870 /* igc_pool_flash_update_done_i225 - Pool FLUDONE status. 871 * @hw: pointer to the HW structure 872 */ 873 s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) 874 { 875 s32 ret_val = -IGC_ERR_NVM; 876 u32 i, reg; 877 878 DEBUGFUNC("igc_pool_flash_update_done_i225"); 879 880 for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { 881 reg = IGC_READ_REG(hw, IGC_EECD); 882 if (reg & IGC_EECD_FLUDONE_I225) { 883 ret_val = IGC_SUCCESS; 884 break; 885 } 886 usec_delay(5); 887 } 888 889 return ret_val; 890 } 891 892 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds. 893 * @hw: pointer to the HW structure 894 * @link: bool indicating link status 895 * 896 * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC 897 * settings, otherwise specify that there is no LTR requirement. 898 */ 899 static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) 900 { 901 u16 speed, duplex; 902 u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; 903 s32 size; 904 905 DEBUGFUNC("igc_set_ltr_i225"); 906 907 /* If we do not have link, LTR thresholds are zero. */ 908 if (link) { 909 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 910 911 /* Check if using copper interface with EEE enabled or if the 912 * link speed is 10 Mbps. 913 */ 914 if ((hw->phy.media_type == igc_media_type_copper) && 915 !(hw->dev_spec._i225.eee_disable) && 916 (speed != SPEED_10)) { 917 /* EEE enabled, so send LTRMAX threshold. */ 918 ltrc = IGC_READ_REG(hw, IGC_LTRC) | 919 IGC_LTRC_EEEMS_EN; 920 IGC_WRITE_REG(hw, IGC_LTRC, ltrc); 921 922 /* Calculate tw_system (nsec). */ 923 if (speed == SPEED_100) { 924 tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) & 925 IGC_TW_SYSTEM_100_MASK) >> 926 IGC_TW_SYSTEM_100_SHIFT) * 500; 927 } else { 928 tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) & 929 IGC_TW_SYSTEM_1000_MASK) * 500; 930 } 931 } else { 932 tw_system = 0; 933 } 934 935 /* Get the Rx packet buffer size. */ 936 size = IGC_READ_REG(hw, IGC_RXPBS) & 937 IGC_RXPBS_SIZE_I225_MASK; 938 939 /* Calculations vary based on DMAC settings. */ 940 if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) { 941 size -= (IGC_READ_REG(hw, IGC_DMACR) & 942 IGC_DMACR_DMACTHR_MASK) >> 943 IGC_DMACR_DMACTHR_SHIFT; 944 /* Convert size to bits. */ 945 size *= 1024 * 8; 946 } else { 947 /* Convert size to bytes, subtract the MTU, and then 948 * convert the size to bits. 949 */ 950 size *= 1024; 951 size -= hw->dev_spec._i225.mtu; 952 size *= 8; 953 } 954 955 if (size < 0) { 956 DEBUGOUT1("Invalid effective Rx buffer size %d\n", 957 size); 958 return -IGC_ERR_CONFIG; 959 } 960 961 /* Calculate the thresholds. Since speed is in Mbps, simplify 962 * the calculation by multiplying size/speed by 1000 for result 963 * to be in nsec before dividing by the scale in nsec. Set the 964 * scale such that the LTR threshold fits in the register. 965 */ 966 ltr_min = (1000 * size) / speed; 967 ltr_max = ltr_min + tw_system; 968 scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : 969 IGC_LTRMINV_SCALE_32768; 970 scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : 971 IGC_LTRMAXV_SCALE_32768; 972 ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; 973 ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; 974 975 /* Only write the LTR thresholds if they differ from before. */ 976 ltrv = IGC_READ_REG(hw, IGC_LTRMINV); 977 if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { 978 ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | 979 (scale_min << IGC_LTRMINV_SCALE_SHIFT); 980 IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv); 981 } 982 983 ltrv = IGC_READ_REG(hw, IGC_LTRMAXV); 984 if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { 985 ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | 986 (scale_min << IGC_LTRMAXV_SCALE_SHIFT); 987 IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv); 988 } 989 } 990 991 return IGC_SUCCESS; 992 } 993 994 /* igc_check_for_link_i225 - Check for link 995 * @hw: pointer to the HW structure 996 * 997 * Checks to see of the link status of the hardware has changed. If a 998 * change in link status has been detected, then we read the PHY registers 999 * to get the current speed/duplex if link exists. 1000 */ 1001 s32 igc_check_for_link_i225(struct igc_hw *hw) 1002 { 1003 struct igc_mac_info *mac = &hw->mac; 1004 s32 ret_val; 1005 bool link = false; 1006 1007 DEBUGFUNC("igc_check_for_link_i225"); 1008 1009 /* We only want to go out to the PHY registers to see if 1010 * Auto-Neg has completed and/or if our link status has 1011 * changed. The get_link_status flag is set upon receiving 1012 * a Link Status Change or Rx Sequence Error interrupt. 1013 */ 1014 if (!mac->get_link_status) { 1015 ret_val = IGC_SUCCESS; 1016 goto out; 1017 } 1018 1019 /* First we want to see if the MII Status Register reports 1020 * link. If so, then we want to get the current speed/duplex 1021 * of the PHY. 1022 */ 1023 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); 1024 if (ret_val) 1025 goto out; 1026 1027 if (!link) 1028 goto out; /* No link detected */ 1029 1030 /* First we want to see if the MII Status Register reports 1031 * link. If so, then we want to get the current speed/duplex 1032 * of the PHY. 1033 */ 1034 ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); 1035 if (ret_val) 1036 goto out; 1037 1038 if (!link) 1039 goto out; /* No link detected */ 1040 1041 mac->get_link_status = false; 1042 1043 /* Check if there was DownShift, must be checked 1044 * immediately after link-up 1045 */ 1046 igc_check_downshift_generic(hw); 1047 1048 /* If we are forcing speed/duplex, then we simply return since 1049 * we have already determined whether we have link or not. 1050 */ 1051 if (!mac->autoneg) 1052 goto out; 1053 1054 /* Auto-Neg is enabled. Auto Speed Detection takes care 1055 * of MAC speed/duplex configuration. So we only need to 1056 * configure Collision Distance in the MAC. 1057 */ 1058 mac->ops.config_collision_dist(hw); 1059 1060 /* Configure Flow Control now that Auto-Neg has completed. 1061 * First, we need to restore the desired flow control 1062 * settings because we may have had to re-autoneg with a 1063 * different link partner. 1064 */ 1065 ret_val = igc_config_fc_after_link_up_generic(hw); 1066 if (ret_val) 1067 DEBUGOUT("Error configuring flow control\n"); 1068 out: 1069 /* Now that we are aware of our link settings, we can set the LTR 1070 * thresholds. 1071 */ 1072 ret_val = igc_set_ltr_i225(hw, link); 1073 1074 return ret_val; 1075 } 1076 1077 /* igc_init_function_pointers_i225 - Init func ptrs. 1078 * @hw: pointer to the HW structure 1079 * 1080 * Called to initialize all function pointers and parameters. 1081 */ 1082 void igc_init_function_pointers_i225(struct igc_hw *hw) 1083 { 1084 igc_init_mac_ops_generic(hw); 1085 igc_init_phy_ops_generic(hw); 1086 igc_init_nvm_ops_generic(hw); 1087 hw->mac.ops.init_params = igc_init_mac_params_i225; 1088 hw->nvm.ops.init_params = igc_init_nvm_params_i225; 1089 hw->phy.ops.init_params = igc_init_phy_params_i225; 1090 } 1091 1092 /* igc_init_hw_i225 - Init hw for I225 1093 * @hw: pointer to the HW structure 1094 * 1095 * Called to initialize hw for i225 hw family. 1096 */ 1097 s32 igc_init_hw_i225(struct igc_hw *hw) 1098 { 1099 s32 ret_val; 1100 1101 DEBUGFUNC("igc_init_hw_i225"); 1102 1103 ret_val = igc_init_hw_base(hw); 1104 return ret_val; 1105 } 1106 1107 /* 1108 * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state 1109 * @hw: pointer to the HW structure 1110 * @active: true to enable LPLU, false to disable 1111 * 1112 * Note: since I225 does not actually support LPLU, this function 1113 * simply enables/disables 1G and 2.5G speeds in D0. 1114 */ 1115 s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active) 1116 { 1117 u32 data; 1118 1119 DEBUGFUNC("igc_set_d0_lplu_state_i225"); 1120 1121 data = IGC_READ_REG(hw, IGC_I225_PHPM); 1122 1123 if (active) { 1124 data |= IGC_I225_PHPM_DIS_1000; 1125 data |= IGC_I225_PHPM_DIS_2500; 1126 } else { 1127 data &= ~IGC_I225_PHPM_DIS_1000; 1128 data &= ~IGC_I225_PHPM_DIS_2500; 1129 } 1130 1131 IGC_WRITE_REG(hw, IGC_I225_PHPM, data); 1132 return IGC_SUCCESS; 1133 } 1134 1135 /* 1136 * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state 1137 * @hw: pointer to the HW structure 1138 * @active: true to enable LPLU, false to disable 1139 * 1140 * Note: since I225 does not actually support LPLU, this function 1141 * simply enables/disables 100M, 1G and 2.5G speeds in D3. 1142 */ 1143 s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active) 1144 { 1145 u32 data; 1146 1147 DEBUGFUNC("igc_set_d3_lplu_state_i225"); 1148 1149 data = IGC_READ_REG(hw, IGC_I225_PHPM); 1150 1151 if (active) { 1152 data |= IGC_I225_PHPM_DIS_100_D3; 1153 data |= IGC_I225_PHPM_DIS_1000_D3; 1154 data |= IGC_I225_PHPM_DIS_2500_D3; 1155 } else { 1156 data &= ~IGC_I225_PHPM_DIS_100_D3; 1157 data &= ~IGC_I225_PHPM_DIS_1000_D3; 1158 data &= ~IGC_I225_PHPM_DIS_2500_D3; 1159 } 1160 1161 IGC_WRITE_REG(hw, IGC_I225_PHPM, data); 1162 return IGC_SUCCESS; 1163 } 1164 1165 /** 1166 * igc_set_eee_i225 - Enable/disable EEE support 1167 * @hw: pointer to the HW structure 1168 * @adv2p5G: boolean flag enabling 2.5G EEE advertisement 1169 * @adv1G: boolean flag enabling 1G EEE advertisement 1170 * @adv100M: boolean flag enabling 100M EEE advertisement 1171 * 1172 * Enable/disable EEE based on setting in dev_spec structure. 1173 * 1174 **/ 1175 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, 1176 bool adv100M) 1177 { 1178 u32 ipcnfg, eeer; 1179 1180 DEBUGFUNC("igc_set_eee_i225"); 1181 1182 if (hw->mac.type != igc_i225 || 1183 hw->phy.media_type != igc_media_type_copper) 1184 goto out; 1185 ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG); 1186 eeer = IGC_READ_REG(hw, IGC_EEER); 1187 1188 /* enable or disable per user setting */ 1189 if (!(hw->dev_spec._i225.eee_disable)) { 1190 u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU); 1191 1192 if (adv100M) 1193 ipcnfg |= IGC_IPCNFG_EEE_100M_AN; 1194 else 1195 ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; 1196 1197 if (adv1G) 1198 ipcnfg |= IGC_IPCNFG_EEE_1G_AN; 1199 else 1200 ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; 1201 1202 if (adv2p5G) 1203 ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; 1204 else 1205 ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; 1206 1207 eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 1208 IGC_EEER_LPI_FC); 1209 1210 /* This bit should not be set in normal operation. */ 1211 if (eee_su & IGC_EEE_SU_LPI_CLK_STP) 1212 DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); 1213 } else { 1214 ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | 1215 IGC_IPCNFG_EEE_100M_AN); 1216 eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | 1217 IGC_EEER_LPI_FC); 1218 } 1219 IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg); 1220 IGC_WRITE_REG(hw, IGC_EEER, eeer); 1221 IGC_READ_REG(hw, IGC_IPCNFG); 1222 IGC_READ_REG(hw, IGC_EEER); 1223 out: 1224 1225 return IGC_SUCCESS; 1226 } 1227 1228