1 /****************************************************************************** 2 3 Copyright (c) 2001-2012, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "e1000_api.h" 36 37 38 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); 39 static void e1000_release_nvm_i210(struct e1000_hw *hw); 40 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); 41 static void e1000_put_hw_semaphore_i210(struct e1000_hw *hw); 42 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, 43 u16 *data); 44 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); 45 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 46 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 47 u16 *data); 48 49 /** 50 * e1000_acquire_nvm_i210 - Request for access to EEPROM 51 * @hw: pointer to the HW structure 52 * 53 * Acquire the necessary semaphores for exclusive access to the EEPROM. 54 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 55 * Return successful if access grant bit set, else clear the request for 56 * EEPROM access and return -E1000_ERR_NVM (-1). 57 **/ 58 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) 59 { 60 s32 ret_val; 61 62 DEBUGFUNC("e1000_acquire_nvm_i210"); 63 64 ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 65 66 return ret_val; 67 } 68 69 /** 70 * e1000_release_nvm_i210 - Release exclusive access to EEPROM 71 * @hw: pointer to the HW structure 72 * 73 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 74 * then release the semaphores acquired. 75 **/ 76 static void e1000_release_nvm_i210(struct e1000_hw *hw) 77 { 78 DEBUGFUNC("e1000_release_nvm_i210"); 79 80 e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 81 } 82 83 /** 84 * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore 85 * @hw: pointer to the HW structure 86 * @mask: specifies which semaphore to acquire 87 * 88 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 89 * will also specify which port we're acquiring the lock for. 90 **/ 91 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 92 { 93 u32 swfw_sync; 94 u32 swmask = mask; 95 u32 fwmask = mask << 16; 96 s32 ret_val = E1000_SUCCESS; 97 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 98 99 DEBUGFUNC("e1000_acquire_swfw_sync_i210"); 100 101 while (i < timeout) { 102 if (e1000_get_hw_semaphore_i210(hw)) { 103 ret_val = -E1000_ERR_SWFW_SYNC; 104 goto out; 105 } 106 107 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 108 if (!(swfw_sync & fwmask)) 109 break; 110 111 /* 112 * Firmware currently using resource (fwmask) 113 */ 114 e1000_put_hw_semaphore_i210(hw); 115 msec_delay_irq(5); 116 i++; 117 } 118 119 if (i == timeout) { 120 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 121 ret_val = -E1000_ERR_SWFW_SYNC; 122 goto out; 123 } 124 125 swfw_sync |= swmask; 126 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 127 128 e1000_put_hw_semaphore_i210(hw); 129 130 out: 131 return ret_val; 132 } 133 134 /** 135 * e1000_release_swfw_sync_i210 - Release SW/FW semaphore 136 * @hw: pointer to the HW structure 137 * @mask: specifies which semaphore to acquire 138 * 139 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 140 * will also specify which port we're releasing the lock for. 141 **/ 142 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) 143 { 144 u32 swfw_sync; 145 146 DEBUGFUNC("e1000_release_swfw_sync_i210"); 147 148 while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) 149 ; /* Empty */ 150 151 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 152 swfw_sync &= ~mask; 153 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 154 155 e1000_put_hw_semaphore_i210(hw); 156 } 157 158 /** 159 * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore 160 * @hw: pointer to the HW structure 161 * 162 * Acquire the HW semaphore to access the PHY or NVM 163 **/ 164 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) 165 { 166 u32 swsm; 167 s32 ret_val = E1000_SUCCESS; 168 s32 timeout = hw->nvm.word_size + 1; 169 s32 i = 0; 170 171 DEBUGFUNC("e1000_get_hw_semaphore_i210"); 172 173 /* Get the FW semaphore. */ 174 for (i = 0; i < timeout; i++) { 175 swsm = E1000_READ_REG(hw, E1000_SWSM); 176 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); 177 178 /* Semaphore acquired if bit latched */ 179 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) 180 break; 181 182 usec_delay(50); 183 } 184 185 if (i == timeout) { 186 /* Release semaphores */ 187 e1000_put_hw_semaphore_generic(hw); 188 DEBUGOUT("Driver can't access the NVM\n"); 189 ret_val = -E1000_ERR_NVM; 190 goto out; 191 } 192 193 out: 194 return ret_val; 195 } 196 197 /** 198 * e1000_put_hw_semaphore_i210 - Release hardware semaphore 199 * @hw: pointer to the HW structure 200 * 201 * Release hardware semaphore used to access the PHY or NVM 202 **/ 203 static void e1000_put_hw_semaphore_i210(struct e1000_hw *hw) 204 { 205 u32 swsm; 206 207 DEBUGFUNC("e1000_put_hw_semaphore_i210"); 208 209 swsm = E1000_READ_REG(hw, E1000_SWSM); 210 211 swsm &= ~E1000_SWSM_SWESMBI; 212 213 E1000_WRITE_REG(hw, E1000_SWSM, swsm); 214 } 215 216 /** 217 * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register 218 * @hw: pointer to the HW structure 219 * @offset: offset of word in the Shadow Ram to read 220 * @words: number of words to read 221 * @data: word read from the Shadow Ram 222 * 223 * Reads a 16 bit word from the Shadow Ram using the EERD register. 224 * Uses necessary synchronization semaphores. 225 **/ 226 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 227 u16 *data) 228 { 229 s32 status = E1000_SUCCESS; 230 u16 i, count; 231 232 DEBUGFUNC("e1000_read_nvm_srrd_i210"); 233 234 /* We cannot hold synchronization semaphores for too long, 235 * because of forceful takeover procedure. However it is more efficient 236 * to read in bursts than synchronizing access for each word. */ 237 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 238 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 239 E1000_EERD_EEWR_MAX_COUNT : (words - i); 240 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 241 status = e1000_read_nvm_eerd(hw, offset, count, 242 data + i); 243 hw->nvm.ops.release(hw); 244 } else { 245 status = E1000_ERR_SWFW_SYNC; 246 } 247 248 if (status != E1000_SUCCESS) 249 break; 250 } 251 252 return status; 253 } 254 255 /** 256 * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR 257 * @hw: pointer to the HW structure 258 * @offset: offset within the Shadow RAM to be written to 259 * @words: number of words to write 260 * @data: 16 bit word(s) to be written to the Shadow RAM 261 * 262 * Writes data to Shadow RAM at offset using EEWR register. 263 * 264 * If e1000_update_nvm_checksum is not called after this function , the 265 * data will not be committed to FLASH and also Shadow RAM will most likely 266 * contain an invalid checksum. 267 * 268 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 269 * partially written. 270 **/ 271 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 272 u16 *data) 273 { 274 s32 status = E1000_SUCCESS; 275 u16 i, count; 276 277 DEBUGFUNC("e1000_write_nvm_srwr_i210"); 278 279 /* We cannot hold synchronization semaphores for too long, 280 * because of forceful takeover procedure. However it is more efficient 281 * to write in bursts than synchronizing access for each word. */ 282 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 283 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 284 E1000_EERD_EEWR_MAX_COUNT : (words - i); 285 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 286 status = e1000_write_nvm_srwr(hw, offset, count, 287 data + i); 288 hw->nvm.ops.release(hw); 289 } else { 290 status = E1000_ERR_SWFW_SYNC; 291 } 292 293 if (status != E1000_SUCCESS) 294 break; 295 } 296 297 return status; 298 } 299 300 /** 301 * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR 302 * @hw: pointer to the HW structure 303 * @offset: offset within the Shadow Ram to be written to 304 * @words: number of words to write 305 * @data: 16 bit word(s) to be written to the Shadow Ram 306 * 307 * Writes data to Shadow Ram at offset using EEWR register. 308 * 309 * If e1000_update_nvm_checksum is not called after this function , the 310 * Shadow Ram will most likely contain an invalid checksum. 311 **/ 312 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, 313 u16 *data) 314 { 315 struct e1000_nvm_info *nvm = &hw->nvm; 316 u32 i, k, eewr = 0; 317 u32 attempts = 100000; 318 s32 ret_val = E1000_SUCCESS; 319 320 DEBUGFUNC("e1000_write_nvm_srwr"); 321 322 /* 323 * A check for invalid values: offset too large, too many words, 324 * too many words for the offset, and not enough words. 325 */ 326 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 327 (words == 0)) { 328 DEBUGOUT("nvm parameter(s) out of bounds\n"); 329 ret_val = -E1000_ERR_NVM; 330 goto out; 331 } 332 333 for (i = 0; i < words; i++) { 334 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | 335 (data[i] << E1000_NVM_RW_REG_DATA) | 336 E1000_NVM_RW_REG_START; 337 338 E1000_WRITE_REG(hw, E1000_SRWR, eewr); 339 340 for (k = 0; k < attempts; k++) { 341 if (E1000_NVM_RW_REG_DONE & 342 E1000_READ_REG(hw, E1000_SRWR)) { 343 ret_val = E1000_SUCCESS; 344 break; 345 } 346 usec_delay(5); 347 } 348 349 if (ret_val != E1000_SUCCESS) { 350 DEBUGOUT("Shadow RAM write EEWR timed out\n"); 351 break; 352 } 353 } 354 355 out: 356 return ret_val; 357 } 358 359 /** 360 * e1000_read_nvm_i211 - Read NVM wrapper function for I211 361 * @hw: pointer to the HW structure 362 * @address: the word address (aka eeprom offset) to read 363 * @data: pointer to the data read 364 * 365 * Wrapper function to return data formerly found in the NVM. 366 **/ 367 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 368 u16 *data) 369 { 370 s32 ret_val = E1000_SUCCESS; 371 372 DEBUGFUNC("e1000_read_nvm_i211"); 373 374 /* Only the MAC addr is required to be present in the iNVM */ 375 switch (offset) { 376 case NVM_MAC_ADDR: 377 ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]); 378 ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]); 379 ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]); 380 if (ret_val != E1000_SUCCESS) 381 DEBUGOUT("MAC Addr not found in iNVM\n"); 382 break; 383 case NVM_ID_LED_SETTINGS: 384 case NVM_INIT_CTRL_2: 385 case NVM_INIT_CTRL_4: 386 case NVM_LED_1_CFG: 387 case NVM_LED_0_2_CFG: 388 e1000_read_invm_i211(hw, (u8)offset, data); 389 break; 390 case NVM_COMPAT: 391 *data = ID_LED_DEFAULT_I210; 392 break; 393 case NVM_SUB_DEV_ID: 394 *data = hw->subsystem_device_id; 395 break; 396 case NVM_SUB_VEN_ID: 397 *data = hw->subsystem_vendor_id; 398 break; 399 case NVM_DEV_ID: 400 *data = hw->device_id; 401 break; 402 case NVM_VEN_ID: 403 *data = hw->vendor_id; 404 break; 405 default: 406 DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); 407 *data = NVM_RESERVED_WORD; 408 break; 409 } 410 return ret_val; 411 } 412 413 /** 414 * e1000_read_invm_i211 - Reads OTP 415 * @hw: pointer to the HW structure 416 * @address: the word address (aka eeprom offset) to read 417 * @data: pointer to the data read 418 * 419 * Reads 16-bit words from the OTP. Return error when the word is not 420 * stored in OTP. 421 **/ 422 s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data) 423 { 424 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; 425 u32 invm_dword; 426 u16 i; 427 u8 record_type, word_address; 428 429 DEBUGFUNC("e1000_read_invm_i211"); 430 431 for (i = 0; i < E1000_INVM_SIZE; i++) { 432 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); 433 /* Get record type */ 434 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); 435 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) 436 break; 437 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) 438 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; 439 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) 440 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; 441 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { 442 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 443 if (word_address == address) { 444 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 445 DEBUGOUT2("Read INVM Word 0x%02x = %x", 446 address, *data); 447 status = E1000_SUCCESS; 448 break; 449 } 450 } 451 } 452 if (status != E1000_SUCCESS) 453 DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); 454 return status; 455 } 456 457 /** 458 * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum 459 * @hw: pointer to the HW structure 460 * 461 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 462 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 463 **/ 464 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) 465 { 466 s32 status = E1000_SUCCESS; 467 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 468 469 DEBUGFUNC("e1000_validate_nvm_checksum_i210"); 470 471 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 472 473 /* 474 * Replace the read function with semaphore grabbing with 475 * the one that skips this for a while. 476 * We have semaphore taken already here. 477 */ 478 read_op_ptr = hw->nvm.ops.read; 479 hw->nvm.ops.read = e1000_read_nvm_eerd; 480 481 status = e1000_validate_nvm_checksum_generic(hw); 482 483 /* Revert original read operation. */ 484 hw->nvm.ops.read = read_op_ptr; 485 486 hw->nvm.ops.release(hw); 487 } else { 488 status = E1000_ERR_SWFW_SYNC; 489 } 490 491 return status; 492 } 493 494 495 /** 496 * e1000_update_nvm_checksum_i210 - Update EEPROM checksum 497 * @hw: pointer to the HW structure 498 * 499 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 500 * up to the checksum. Then calculates the EEPROM checksum and writes the 501 * value to the EEPROM. Next commit EEPROM data onto the Flash. 502 **/ 503 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) 504 { 505 s32 ret_val = E1000_SUCCESS; 506 u16 checksum = 0; 507 u16 i, nvm_data; 508 509 DEBUGFUNC("e1000_update_nvm_checksum_i210"); 510 511 /* 512 * Read the first word from the EEPROM. If this times out or fails, do 513 * not continue or we could be in for a very long wait while every 514 * EEPROM read fails 515 */ 516 ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); 517 if (ret_val != E1000_SUCCESS) { 518 DEBUGOUT("EEPROM read failed\n"); 519 goto out; 520 } 521 522 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 523 /* 524 * Do not use hw->nvm.ops.write, hw->nvm.ops.read 525 * because we do not want to take the synchronization 526 * semaphores twice here. 527 */ 528 529 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 530 ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); 531 if (ret_val) { 532 hw->nvm.ops.release(hw); 533 DEBUGOUT("NVM Read Error while updating checksum.\n"); 534 goto out; 535 } 536 checksum += nvm_data; 537 } 538 checksum = (u16) NVM_SUM - checksum; 539 ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 540 &checksum); 541 if (ret_val != E1000_SUCCESS) { 542 hw->nvm.ops.release(hw); 543 DEBUGOUT("NVM Write Error while updating checksum.\n"); 544 goto out; 545 } 546 547 hw->nvm.ops.release(hw); 548 549 ret_val = e1000_update_flash_i210(hw); 550 } else { 551 ret_val = E1000_ERR_SWFW_SYNC; 552 } 553 out: 554 return ret_val; 555 } 556 557 /** 558 * e1000_get_flash_presence_i210 - Check if flash device is detected. 559 * @hw: pointer to the HW structure 560 * 561 **/ 562 static bool e1000_get_flash_presence_i210(struct e1000_hw *hw) 563 { 564 u32 eec = 0; 565 bool ret_val = FALSE; 566 567 DEBUGFUNC("e1000_get_flash_presence_i210"); 568 569 eec = E1000_READ_REG(hw, E1000_EECD); 570 571 if (eec & E1000_EECD_FLASH_DETECTED_I210) 572 ret_val = TRUE; 573 574 return ret_val; 575 } 576 577 /** 578 * e1000_update_flash_i210 - Commit EEPROM to the flash 579 * @hw: pointer to the HW structure 580 * 581 **/ 582 s32 e1000_update_flash_i210(struct e1000_hw *hw) 583 { 584 s32 ret_val = E1000_SUCCESS; 585 u32 flup; 586 587 DEBUGFUNC("e1000_update_flash_i210"); 588 589 ret_val = e1000_pool_flash_update_done_i210(hw); 590 if (ret_val == -E1000_ERR_NVM) { 591 DEBUGOUT("Flash update time out\n"); 592 goto out; 593 } 594 595 flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; 596 E1000_WRITE_REG(hw, E1000_EECD, flup); 597 598 ret_val = e1000_pool_flash_update_done_i210(hw); 599 if (ret_val == E1000_SUCCESS) 600 DEBUGOUT("Flash update complete\n"); 601 else 602 DEBUGOUT("Flash update time out\n"); 603 604 out: 605 return ret_val; 606 } 607 608 /** 609 * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. 610 * @hw: pointer to the HW structure 611 * 612 **/ 613 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) 614 { 615 s32 ret_val = -E1000_ERR_NVM; 616 u32 i, reg; 617 618 DEBUGFUNC("e1000_pool_flash_update_done_i210"); 619 620 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 621 reg = E1000_READ_REG(hw, E1000_EECD); 622 if (reg & E1000_EECD_FLUDONE_I210) { 623 ret_val = E1000_SUCCESS; 624 break; 625 } 626 usec_delay(5); 627 } 628 629 return ret_val; 630 } 631 632 /** 633 * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers 634 * @hw: pointer to the HW structure 635 * 636 * Initialize the i210 NVM parameters and function pointers. 637 **/ 638 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) 639 { 640 s32 ret_val = E1000_SUCCESS; 641 struct e1000_nvm_info *nvm = &hw->nvm; 642 643 DEBUGFUNC("e1000_init_nvm_params_i210"); 644 645 ret_val = e1000_init_nvm_params_82575(hw); 646 647 nvm->ops.acquire = e1000_acquire_nvm_i210; 648 nvm->ops.release = e1000_release_nvm_i210; 649 nvm->ops.read = e1000_read_nvm_srrd_i210; 650 nvm->ops.write = e1000_write_nvm_srwr_i210; 651 nvm->ops.valid_led_default = e1000_valid_led_default_i210; 652 nvm->ops.validate = e1000_validate_nvm_checksum_i210; 653 nvm->ops.update = e1000_update_nvm_checksum_i210; 654 655 return ret_val; 656 } 657 658 /** 659 * e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers 660 * @hw: pointer to the HW structure 661 * 662 * Initialize the NVM parameters and function pointers for i211. 663 **/ 664 static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw) 665 { 666 struct e1000_nvm_info *nvm = &hw->nvm; 667 668 DEBUGFUNC("e1000_init_nvm_params_i211"); 669 670 nvm->ops.acquire = e1000_acquire_nvm_i210; 671 nvm->ops.release = e1000_release_nvm_i210; 672 nvm->ops.read = e1000_read_nvm_i211; 673 nvm->ops.valid_led_default = e1000_valid_led_default_i210; 674 nvm->ops.write = e1000_null_write_nvm; 675 nvm->ops.validate = e1000_null_ops_generic; 676 nvm->ops.update = e1000_null_ops_generic; 677 678 return E1000_SUCCESS; 679 } 680 681 /** 682 * e1000_init_function_pointers_i210 - Init func ptrs. 683 * @hw: pointer to the HW structure 684 * 685 * Called to initialize all function pointers and parameters. 686 **/ 687 void e1000_init_function_pointers_i210(struct e1000_hw *hw) 688 { 689 e1000_init_function_pointers_82575(hw); 690 691 switch (hw->mac.type) { 692 case e1000_i210: 693 if (e1000_get_flash_presence_i210(hw)) 694 hw->nvm.ops.init_params = e1000_init_nvm_params_i210; 695 else 696 hw->nvm.ops.init_params = e1000_init_nvm_params_i211; 697 break; 698 case e1000_i211: 699 hw->nvm.ops.init_params = e1000_init_nvm_params_i211; 700 break; 701 default: 702 break; 703 } 704 return; 705 } 706 707 /** 708 * e1000_valid_led_default_i210 - Verify a valid default LED config 709 * @hw: pointer to the HW structure 710 * @data: pointer to the NVM (EEPROM) 711 * 712 * Read the EEPROM for the current default LED configuration. If the 713 * LED configuration is not valid, set to a valid LED configuration. 714 **/ 715 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) 716 { 717 s32 ret_val; 718 719 DEBUGFUNC("e1000_valid_led_default_i210"); 720 721 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 722 if (ret_val) { 723 DEBUGOUT("NVM Read Error\n"); 724 goto out; 725 } 726 727 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 728 switch (hw->phy.media_type) { 729 case e1000_media_type_internal_serdes: 730 *data = ID_LED_DEFAULT_I210_SERDES; 731 break; 732 case e1000_media_type_copper: 733 default: 734 *data = ID_LED_DEFAULT_I210; 735 break; 736 } 737 } 738 out: 739 return ret_val; 740 } 741