xref: /freebsd/sys/dev/e1000/e1000_i210.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2015, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 /*$FreeBSD$*/
35 
36 #include "e1000_api.h"
37 
38 
39 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
40 static void e1000_release_nvm_i210(struct e1000_hw *hw);
41 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42 				u16 *data);
43 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45 
46 /**
47  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
48  *  @hw: pointer to the HW structure
49  *
50  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
51  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
52  *  Return successful if access grant bit set, else clear the request for
53  *  EEPROM access and return -E1000_ERR_NVM (-1).
54  **/
55 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
56 {
57 	s32 ret_val;
58 
59 	DEBUGFUNC("e1000_acquire_nvm_i210");
60 
61 	ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
62 
63 	return ret_val;
64 }
65 
66 /**
67  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
68  *  @hw: pointer to the HW structure
69  *
70  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
71  *  then release the semaphores acquired.
72  **/
73 static void e1000_release_nvm_i210(struct e1000_hw *hw)
74 {
75 	DEBUGFUNC("e1000_release_nvm_i210");
76 
77 	e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
78 }
79 
80 /**
81  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
82  *  @hw: pointer to the HW structure
83  *  @offset: offset of word in the Shadow Ram to read
84  *  @words: number of words to read
85  *  @data: word read from the Shadow Ram
86  *
87  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
88  *  Uses necessary synchronization semaphores.
89  **/
90 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
91 			     u16 *data)
92 {
93 	s32 status = E1000_SUCCESS;
94 	u16 i, count;
95 
96 	DEBUGFUNC("e1000_read_nvm_srrd_i210");
97 
98 	/* We cannot hold synchronization semaphores for too long,
99 	 * because of forceful takeover procedure. However it is more efficient
100 	 * to read in bursts than synchronizing access for each word. */
101 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
102 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
103 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
104 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
105 			status = e1000_read_nvm_eerd(hw, offset, count,
106 						     data + i);
107 			hw->nvm.ops.release(hw);
108 		} else {
109 			status = E1000_ERR_SWFW_SYNC;
110 		}
111 
112 		if (status != E1000_SUCCESS)
113 			break;
114 	}
115 
116 	return status;
117 }
118 
119 /**
120  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
121  *  @hw: pointer to the HW structure
122  *  @offset: offset within the Shadow RAM to be written to
123  *  @words: number of words to write
124  *  @data: 16 bit word(s) to be written to the Shadow RAM
125  *
126  *  Writes data to Shadow RAM at offset using EEWR register.
127  *
128  *  If e1000_update_nvm_checksum is not called after this function , the
129  *  data will not be committed to FLASH and also Shadow RAM will most likely
130  *  contain an invalid checksum.
131  *
132  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
133  *  partially written.
134  **/
135 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
136 			      u16 *data)
137 {
138 	s32 status = E1000_SUCCESS;
139 	u16 i, count;
140 
141 	DEBUGFUNC("e1000_write_nvm_srwr_i210");
142 
143 	/* We cannot hold synchronization semaphores for too long,
144 	 * because of forceful takeover procedure. However it is more efficient
145 	 * to write in bursts than synchronizing access for each word. */
146 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
147 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
148 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
149 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
150 			status = e1000_write_nvm_srwr(hw, offset, count,
151 						      data + i);
152 			hw->nvm.ops.release(hw);
153 		} else {
154 			status = E1000_ERR_SWFW_SYNC;
155 		}
156 
157 		if (status != E1000_SUCCESS)
158 			break;
159 	}
160 
161 	return status;
162 }
163 
164 /**
165  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
166  *  @hw: pointer to the HW structure
167  *  @offset: offset within the Shadow Ram to be written to
168  *  @words: number of words to write
169  *  @data: 16 bit word(s) to be written to the Shadow Ram
170  *
171  *  Writes data to Shadow Ram at offset using EEWR register.
172  *
173  *  If e1000_update_nvm_checksum is not called after this function , the
174  *  Shadow Ram will most likely contain an invalid checksum.
175  **/
176 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
177 				u16 *data)
178 {
179 	struct e1000_nvm_info *nvm = &hw->nvm;
180 	u32 i, k, eewr = 0;
181 	u32 attempts = 100000;
182 	s32 ret_val = E1000_SUCCESS;
183 
184 	DEBUGFUNC("e1000_write_nvm_srwr");
185 
186 	/*
187 	 * A check for invalid values:  offset too large, too many words,
188 	 * too many words for the offset, and not enough words.
189 	 */
190 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
191 	    (words == 0)) {
192 		DEBUGOUT("nvm parameter(s) out of bounds\n");
193 		ret_val = -E1000_ERR_NVM;
194 		goto out;
195 	}
196 
197 	for (i = 0; i < words; i++) {
198 		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
199 			(data[i] << E1000_NVM_RW_REG_DATA) |
200 			E1000_NVM_RW_REG_START;
201 
202 		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
203 
204 		for (k = 0; k < attempts; k++) {
205 			if (E1000_NVM_RW_REG_DONE &
206 			    E1000_READ_REG(hw, E1000_SRWR)) {
207 				ret_val = E1000_SUCCESS;
208 				break;
209 			}
210 			usec_delay(5);
211 		}
212 
213 		if (ret_val != E1000_SUCCESS) {
214 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
215 			break;
216 		}
217 	}
218 
219 out:
220 	return ret_val;
221 }
222 
223 /** e1000_read_invm_word_i210 - Reads OTP
224  *  @hw: pointer to the HW structure
225  *  @address: the word address (aka eeprom offset) to read
226  *  @data: pointer to the data read
227  *
228  *  Reads 16-bit words from the OTP. Return error when the word is not
229  *  stored in OTP.
230  **/
231 static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
232 {
233 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
234 	u32 invm_dword;
235 	u16 i;
236 	u8 record_type, word_address;
237 
238 	DEBUGFUNC("e1000_read_invm_word_i210");
239 
240 	for (i = 0; i < E1000_INVM_SIZE; i++) {
241 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
242 		/* Get record type */
243 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
244 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
245 			break;
246 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
247 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
248 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
249 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
250 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
251 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
252 			if (word_address == address) {
253 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
254 				DEBUGOUT2("Read INVM Word 0x%02x = %x",
255 					  address, *data);
256 				status = E1000_SUCCESS;
257 				break;
258 			}
259 		}
260 	}
261 	if (status != E1000_SUCCESS)
262 		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
263 	return status;
264 }
265 
266 /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
267  *  @hw: pointer to the HW structure
268  *  @address: the word address (aka eeprom offset) to read
269  *  @data: pointer to the data read
270  *
271  *  Wrapper function to return data formerly found in the NVM.
272  **/
273 static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
274 				u16 E1000_UNUSEDARG words, u16 *data)
275 {
276 	s32 ret_val = E1000_SUCCESS;
277 
278 	DEBUGFUNC("e1000_read_invm_i210");
279 
280 	/* Only the MAC addr is required to be present in the iNVM */
281 	switch (offset) {
282 	case NVM_MAC_ADDR:
283 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
284 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
285 						     &data[1]);
286 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
287 						     &data[2]);
288 		if (ret_val != E1000_SUCCESS)
289 			DEBUGOUT("MAC Addr not found in iNVM\n");
290 		break;
291 	case NVM_INIT_CTRL_2:
292 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
293 		if (ret_val != E1000_SUCCESS) {
294 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
295 			ret_val = E1000_SUCCESS;
296 		}
297 		break;
298 	case NVM_INIT_CTRL_4:
299 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
300 		if (ret_val != E1000_SUCCESS) {
301 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
302 			ret_val = E1000_SUCCESS;
303 		}
304 		break;
305 	case NVM_LED_1_CFG:
306 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
307 		if (ret_val != E1000_SUCCESS) {
308 			*data = NVM_LED_1_CFG_DEFAULT_I211;
309 			ret_val = E1000_SUCCESS;
310 		}
311 		break;
312 	case NVM_LED_0_2_CFG:
313 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
314 		if (ret_val != E1000_SUCCESS) {
315 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
316 			ret_val = E1000_SUCCESS;
317 		}
318 		break;
319 	case NVM_ID_LED_SETTINGS:
320 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
321 		if (ret_val != E1000_SUCCESS) {
322 			*data = ID_LED_RESERVED_FFFF;
323 			ret_val = E1000_SUCCESS;
324 		}
325 		break;
326 	case NVM_SUB_DEV_ID:
327 		*data = hw->subsystem_device_id;
328 		break;
329 	case NVM_SUB_VEN_ID:
330 		*data = hw->subsystem_vendor_id;
331 		break;
332 	case NVM_DEV_ID:
333 		*data = hw->device_id;
334 		break;
335 	case NVM_VEN_ID:
336 		*data = hw->vendor_id;
337 		break;
338 	default:
339 		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
340 		*data = NVM_RESERVED_WORD;
341 		break;
342 	}
343 	return ret_val;
344 }
345 
346 /**
347  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
348  *  @hw: pointer to the HW structure
349  *
350  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
351  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
352  **/
353 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
354 {
355 	s32 status = E1000_SUCCESS;
356 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
357 
358 	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
359 
360 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
361 
362 		/*
363 		 * Replace the read function with semaphore grabbing with
364 		 * the one that skips this for a while.
365 		 * We have semaphore taken already here.
366 		 */
367 		read_op_ptr = hw->nvm.ops.read;
368 		hw->nvm.ops.read = e1000_read_nvm_eerd;
369 
370 		status = e1000_validate_nvm_checksum_generic(hw);
371 
372 		/* Revert original read operation. */
373 		hw->nvm.ops.read = read_op_ptr;
374 
375 		hw->nvm.ops.release(hw);
376 	} else {
377 		status = E1000_ERR_SWFW_SYNC;
378 	}
379 
380 	return status;
381 }
382 
383 
384 /**
385  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
386  *  @hw: pointer to the HW structure
387  *
388  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
389  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
390  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
391  **/
392 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
393 {
394 	s32 ret_val;
395 	u16 checksum = 0;
396 	u16 i, nvm_data;
397 
398 	DEBUGFUNC("e1000_update_nvm_checksum_i210");
399 
400 	/*
401 	 * Read the first word from the EEPROM. If this times out or fails, do
402 	 * not continue or we could be in for a very long wait while every
403 	 * EEPROM read fails
404 	 */
405 	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
406 	if (ret_val != E1000_SUCCESS) {
407 		DEBUGOUT("EEPROM read failed\n");
408 		goto out;
409 	}
410 
411 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
412 		/*
413 		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
414 		 * because we do not want to take the synchronization
415 		 * semaphores twice here.
416 		 */
417 
418 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
419 			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
420 			if (ret_val) {
421 				hw->nvm.ops.release(hw);
422 				DEBUGOUT("NVM Read Error while updating checksum.\n");
423 				goto out;
424 			}
425 			checksum += nvm_data;
426 		}
427 		checksum = (u16) NVM_SUM - checksum;
428 		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
429 						&checksum);
430 		if (ret_val != E1000_SUCCESS) {
431 			hw->nvm.ops.release(hw);
432 			DEBUGOUT("NVM Write Error while updating checksum.\n");
433 			goto out;
434 		}
435 
436 		hw->nvm.ops.release(hw);
437 
438 		ret_val = e1000_update_flash_i210(hw);
439 	} else {
440 		ret_val = E1000_ERR_SWFW_SYNC;
441 	}
442 out:
443 	return ret_val;
444 }
445 
446 /**
447  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
448  *  @hw: pointer to the HW structure
449  *
450  **/
451 bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
452 {
453 	u32 eec = 0;
454 	bool ret_val = FALSE;
455 
456 	DEBUGFUNC("e1000_get_flash_presence_i210");
457 
458 	eec = E1000_READ_REG(hw, E1000_EECD);
459 
460 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
461 		ret_val = TRUE;
462 
463 	return ret_val;
464 }
465 
466 /**
467  *  e1000_update_flash_i210 - Commit EEPROM to the flash
468  *  @hw: pointer to the HW structure
469  *
470  **/
471 s32 e1000_update_flash_i210(struct e1000_hw *hw)
472 {
473 	s32 ret_val;
474 	u32 flup;
475 
476 	DEBUGFUNC("e1000_update_flash_i210");
477 
478 	ret_val = e1000_pool_flash_update_done_i210(hw);
479 	if (ret_val == -E1000_ERR_NVM) {
480 		DEBUGOUT("Flash update time out\n");
481 		goto out;
482 	}
483 
484 	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
485 	E1000_WRITE_REG(hw, E1000_EECD, flup);
486 
487 	ret_val = e1000_pool_flash_update_done_i210(hw);
488 	if (ret_val == E1000_SUCCESS)
489 		DEBUGOUT("Flash update complete\n");
490 	else
491 		DEBUGOUT("Flash update time out\n");
492 
493 out:
494 	return ret_val;
495 }
496 
497 /**
498  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
499  *  @hw: pointer to the HW structure
500  *
501  **/
502 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
503 {
504 	s32 ret_val = -E1000_ERR_NVM;
505 	u32 i, reg;
506 
507 	DEBUGFUNC("e1000_pool_flash_update_done_i210");
508 
509 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
510 		reg = E1000_READ_REG(hw, E1000_EECD);
511 		if (reg & E1000_EECD_FLUDONE_I210) {
512 			ret_val = E1000_SUCCESS;
513 			break;
514 		}
515 		usec_delay(5);
516 	}
517 
518 	return ret_val;
519 }
520 
521 /**
522  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
523  *  @hw: pointer to the HW structure
524  *
525  *  Initialize the i210/i211 NVM parameters and function pointers.
526  **/
527 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
528 {
529 	s32 ret_val;
530 	struct e1000_nvm_info *nvm = &hw->nvm;
531 
532 	DEBUGFUNC("e1000_init_nvm_params_i210");
533 
534 	ret_val = e1000_init_nvm_params_82575(hw);
535 	nvm->ops.acquire = e1000_acquire_nvm_i210;
536 	nvm->ops.release = e1000_release_nvm_i210;
537 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
538 	if (e1000_get_flash_presence_i210(hw)) {
539 		hw->nvm.type = e1000_nvm_flash_hw;
540 		nvm->ops.read    = e1000_read_nvm_srrd_i210;
541 		nvm->ops.write   = e1000_write_nvm_srwr_i210;
542 		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
543 		nvm->ops.update   = e1000_update_nvm_checksum_i210;
544 	} else {
545 		hw->nvm.type = e1000_nvm_invm;
546 		nvm->ops.read     = e1000_read_invm_i210;
547 		nvm->ops.write    = e1000_null_write_nvm;
548 		nvm->ops.validate = e1000_null_ops_generic;
549 		nvm->ops.update   = e1000_null_ops_generic;
550 	}
551 	return ret_val;
552 }
553 
554 /**
555  *  e1000_init_function_pointers_i210 - Init func ptrs.
556  *  @hw: pointer to the HW structure
557  *
558  *  Called to initialize all function pointers and parameters.
559  **/
560 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
561 {
562 	e1000_init_function_pointers_82575(hw);
563 	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
564 
565 	return;
566 }
567 
568 /**
569  *  e1000_valid_led_default_i210 - Verify a valid default LED config
570  *  @hw: pointer to the HW structure
571  *  @data: pointer to the NVM (EEPROM)
572  *
573  *  Read the EEPROM for the current default LED configuration.  If the
574  *  LED configuration is not valid, set to a valid LED configuration.
575  **/
576 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
577 {
578 	s32 ret_val;
579 
580 	DEBUGFUNC("e1000_valid_led_default_i210");
581 
582 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
583 	if (ret_val) {
584 		DEBUGOUT("NVM Read Error\n");
585 		goto out;
586 	}
587 
588 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
589 		switch (hw->phy.media_type) {
590 		case e1000_media_type_internal_serdes:
591 			*data = ID_LED_DEFAULT_I210_SERDES;
592 			break;
593 		case e1000_media_type_copper:
594 		default:
595 			*data = ID_LED_DEFAULT_I210;
596 			break;
597 		}
598 	}
599 out:
600 	return ret_val;
601 }
602 
603 /**
604  *  __e1000_access_xmdio_reg - Read/write XMDIO register
605  *  @hw: pointer to the HW structure
606  *  @address: XMDIO address to program
607  *  @dev_addr: device address to program
608  *  @data: pointer to value to read/write from/to the XMDIO address
609  *  @read: boolean flag to indicate read or write
610  **/
611 static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
612 				    u8 dev_addr, u16 *data, bool read)
613 {
614 	s32 ret_val;
615 
616 	DEBUGFUNC("__e1000_access_xmdio_reg");
617 
618 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
619 	if (ret_val)
620 		return ret_val;
621 
622 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
623 	if (ret_val)
624 		return ret_val;
625 
626 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
627 							 dev_addr);
628 	if (ret_val)
629 		return ret_val;
630 
631 	if (read)
632 		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
633 	else
634 		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
635 	if (ret_val)
636 		return ret_val;
637 
638 	/* Recalibrate the device back to 0 */
639 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
640 	if (ret_val)
641 		return ret_val;
642 
643 	return ret_val;
644 }
645 
646 /**
647  *  e1000_read_xmdio_reg - Read XMDIO register
648  *  @hw: pointer to the HW structure
649  *  @addr: XMDIO address to program
650  *  @dev_addr: device address to program
651  *  @data: value to be read from the EMI address
652  **/
653 s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
654 {
655 	DEBUGFUNC("e1000_read_xmdio_reg");
656 
657 	return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE);
658 }
659 
660 /**
661  *  e1000_write_xmdio_reg - Write XMDIO register
662  *  @hw: pointer to the HW structure
663  *  @addr: XMDIO address to program
664  *  @dev_addr: device address to program
665  *  @data: value to be written to the XMDIO address
666  **/
667 s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
668 {
669 	DEBUGFUNC("e1000_read_xmdio_reg");
670 
671 	return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE);
672 }
673 
674 /**
675  * e1000_pll_workaround_i210
676  * @hw: pointer to the HW structure
677  *
678  * Works around an errata in the PLL circuit where it occasionally
679  * provides the wrong clock frequency after power up.
680  **/
681 static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
682 {
683 	s32 ret_val;
684 	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
685 	u16 nvm_word, phy_word, pci_word, tmp_nvm;
686 	int i;
687 
688 	/* Get and set needed register values */
689 	wuc = E1000_READ_REG(hw, E1000_WUC);
690 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
691 	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
692 	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
693 
694 	/* Get data from NVM, or set default */
695 	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
696 					    &nvm_word);
697 	if (ret_val != E1000_SUCCESS)
698 		nvm_word = E1000_INVM_DEFAULT_AL;
699 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
700 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
701 		/* check current state directly from internal PHY */
702 		e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
703 					 E1000_PHY_PLL_FREQ_REG), &phy_word);
704 		if ((phy_word & E1000_PHY_PLL_UNCONF)
705 		    != E1000_PHY_PLL_UNCONF) {
706 			ret_val = E1000_SUCCESS;
707 			break;
708 		} else {
709 			ret_val = -E1000_ERR_PHY;
710 		}
711 		/* directly reset the internal PHY */
712 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
713 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
714 
715 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
716 		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
717 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
718 
719 		E1000_WRITE_REG(hw, E1000_WUC, 0);
720 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
721 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
722 
723 		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
724 		pci_word |= E1000_PCI_PMCSR_D3;
725 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
726 		msec_delay(1);
727 		pci_word &= ~E1000_PCI_PMCSR_D3;
728 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
729 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
730 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
731 
732 		/* restore WUC register */
733 		E1000_WRITE_REG(hw, E1000_WUC, wuc);
734 	}
735 	/* restore MDICNFG setting */
736 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
737 	return ret_val;
738 }
739 
740 /**
741  *  e1000_get_cfg_done_i210 - Read config done bit
742  *  @hw: pointer to the HW structure
743  *
744  *  Read the management control register for the config done bit for
745  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
746  *  to read the config done bit, so an error is *ONLY* logged and returns
747  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
748  *  would not be able to be reset or change link.
749  **/
750 static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
751 {
752 	s32 timeout = PHY_CFG_TIMEOUT;
753 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
754 
755 	DEBUGFUNC("e1000_get_cfg_done_i210");
756 
757 	while (timeout) {
758 		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
759 			break;
760 		msec_delay(1);
761 		timeout--;
762 	}
763 	if (!timeout)
764 		DEBUGOUT("MNG configuration cycle has not completed.\n");
765 
766 	return E1000_SUCCESS;
767 }
768 
769 /**
770  *  e1000_init_hw_i210 - Init hw for I210/I211
771  *  @hw: pointer to the HW structure
772  *
773  *  Called to initialize hw for i210 hw family.
774  **/
775 s32 e1000_init_hw_i210(struct e1000_hw *hw)
776 {
777 	struct e1000_mac_info *mac = &hw->mac;
778 	s32 ret_val;
779 
780 	DEBUGFUNC("e1000_init_hw_i210");
781 	if ((hw->mac.type >= e1000_i210) &&
782 	    !(e1000_get_flash_presence_i210(hw))) {
783 		ret_val = e1000_pll_workaround_i210(hw);
784 		if (ret_val != E1000_SUCCESS)
785 			return ret_val;
786 	}
787 	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
788 
789 	/* Initialize identification LED */
790 	mac->ops.id_led_init(hw);
791 
792 	ret_val = e1000_init_hw_82575(hw);
793 	return ret_val;
794 }
795