xref: /freebsd/sys/dev/e1000/e1000_i210.c (revision ddfec1fb6814088abc5805f45c4a18c5731d51b9)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2020, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "e1000_api.h"
36 
37 
38 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39 static void e1000_release_nvm_i210(struct e1000_hw *hw);
40 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
41 				u16 *data);
42 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
43 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44 
45 /**
46  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
47  *  @hw: pointer to the HW structure
48  *
49  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
50  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
51  *  Return successful if access grant bit set, else clear the request for
52  *  EEPROM access and return -E1000_ERR_NVM (-1).
53  **/
e1000_acquire_nvm_i210(struct e1000_hw * hw)54 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
55 {
56 	s32 ret_val;
57 
58 	DEBUGFUNC("e1000_acquire_nvm_i210");
59 
60 	ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
61 
62 	return ret_val;
63 }
64 
65 /**
66  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
67  *  @hw: pointer to the HW structure
68  *
69  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
70  *  then release the semaphores acquired.
71  **/
e1000_release_nvm_i210(struct e1000_hw * hw)72 static void e1000_release_nvm_i210(struct e1000_hw *hw)
73 {
74 	DEBUGFUNC("e1000_release_nvm_i210");
75 
76 	e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
77 }
78 
79 /**
80  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
81  *  @hw: pointer to the HW structure
82  *  @offset: offset of word in the Shadow Ram to read
83  *  @words: number of words to read
84  *  @data: word read from the Shadow Ram
85  *
86  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
87  *  Uses necessary synchronization semaphores.
88  **/
e1000_read_nvm_srrd_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)89 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
90 			     u16 *data)
91 {
92 	s32 status = E1000_SUCCESS;
93 	u16 i, count;
94 
95 	DEBUGFUNC("e1000_read_nvm_srrd_i210");
96 
97 	/* We cannot hold synchronization semaphores for too long,
98 	 * because of forceful takeover procedure. However it is more efficient
99 	 * to read in bursts than synchronizing access for each word. */
100 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
101 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
102 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
103 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
104 			status = e1000_read_nvm_eerd(hw, offset, count,
105 						     data + i);
106 			hw->nvm.ops.release(hw);
107 		} else {
108 			status = E1000_ERR_SWFW_SYNC;
109 		}
110 
111 		if (status != E1000_SUCCESS)
112 			break;
113 	}
114 
115 	return status;
116 }
117 
118 /**
119  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
120  *  @hw: pointer to the HW structure
121  *  @offset: offset within the Shadow RAM to be written to
122  *  @words: number of words to write
123  *  @data: 16 bit word(s) to be written to the Shadow RAM
124  *
125  *  Writes data to Shadow RAM at offset using EEWR register.
126  *
127  *  If e1000_update_nvm_checksum is not called after this function , the
128  *  data will not be committed to FLASH and also Shadow RAM will most likely
129  *  contain an invalid checksum.
130  *
131  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
132  *  partially written.
133  **/
e1000_write_nvm_srwr_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)134 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
135 			      u16 *data)
136 {
137 	s32 status = E1000_SUCCESS;
138 	u16 i, count;
139 
140 	DEBUGFUNC("e1000_write_nvm_srwr_i210");
141 
142 	/* We cannot hold synchronization semaphores for too long,
143 	 * because of forceful takeover procedure. However it is more efficient
144 	 * to write in bursts than synchronizing access for each word. */
145 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
146 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
147 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
148 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
149 			status = e1000_write_nvm_srwr(hw, offset, count,
150 						      data + i);
151 			hw->nvm.ops.release(hw);
152 		} else {
153 			status = E1000_ERR_SWFW_SYNC;
154 		}
155 
156 		if (status != E1000_SUCCESS)
157 			break;
158 	}
159 
160 	return status;
161 }
162 
163 /**
164  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
165  *  @hw: pointer to the HW structure
166  *  @offset: offset within the Shadow Ram to be written to
167  *  @words: number of words to write
168  *  @data: 16 bit word(s) to be written to the Shadow Ram
169  *
170  *  Writes data to Shadow Ram at offset using EEWR register.
171  *
172  *  If e1000_update_nvm_checksum is not called after this function , the
173  *  Shadow Ram will most likely contain an invalid checksum.
174  **/
e1000_write_nvm_srwr(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)175 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
176 				u16 *data)
177 {
178 	struct e1000_nvm_info *nvm = &hw->nvm;
179 	u32 i, k, eewr = 0;
180 	u32 attempts = 100000;
181 	s32 ret_val = E1000_SUCCESS;
182 
183 	DEBUGFUNC("e1000_write_nvm_srwr");
184 
185 	/*
186 	 * A check for invalid values:  offset too large, too many words,
187 	 * too many words for the offset, and not enough words.
188 	 */
189 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
190 	    (words == 0)) {
191 		DEBUGOUT("nvm parameter(s) out of bounds\n");
192 		ret_val = -E1000_ERR_NVM;
193 		goto out;
194 	}
195 
196 	for (i = 0; i < words; i++) {
197 		ret_val = -E1000_ERR_NVM;
198 
199 		eewr = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
200 			(data[i] << E1000_NVM_RW_REG_DATA) |
201 			E1000_NVM_RW_REG_START;
202 
203 		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
204 
205 		for (k = 0; k < attempts; k++) {
206 			if (E1000_NVM_RW_REG_DONE &
207 			    E1000_READ_REG(hw, E1000_SRWR)) {
208 				ret_val = E1000_SUCCESS;
209 				break;
210 			}
211 			usec_delay(5);
212 		}
213 
214 		if (ret_val != E1000_SUCCESS) {
215 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
216 			break;
217 		}
218 	}
219 
220 out:
221 	return ret_val;
222 }
223 
224 /** e1000_read_invm_word_i210 - Reads OTP
225  *  @hw: pointer to the HW structure
226  *  @address: the word address (aka eeprom offset) to read
227  *  @data: pointer to the data read
228  *
229  *  Reads 16-bit words from the OTP. Return error when the word is not
230  *  stored in OTP.
231  **/
e1000_read_invm_word_i210(struct e1000_hw * hw,u8 address,u16 * data)232 static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
233 {
234 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
235 	u32 invm_dword;
236 	u16 i;
237 	u8 record_type, word_address;
238 
239 	DEBUGFUNC("e1000_read_invm_word_i210");
240 
241 	for (i = 0; i < E1000_INVM_SIZE; i++) {
242 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
243 		/* Get record type */
244 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
245 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
246 			break;
247 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
248 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
249 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
250 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
251 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
252 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
253 			if (word_address == address) {
254 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
255 				DEBUGOUT2("Read INVM Word 0x%02x = %x",
256 					  address, *data);
257 				status = E1000_SUCCESS;
258 				break;
259 			}
260 		}
261 	}
262 	if (status != E1000_SUCCESS)
263 		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
264 	return status;
265 }
266 
267 /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
268  *  @hw: pointer to the HW structure
269  *  @address: the word address (aka eeprom offset) to read
270  *  @data: pointer to the data read
271  *
272  *  Wrapper function to return data formerly found in the NVM.
273  **/
e1000_read_invm_i210(struct e1000_hw * hw,u16 offset,u16 E1000_UNUSEDARG words,u16 * data)274 static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
275 				u16 E1000_UNUSEDARG words, u16 *data)
276 {
277 	s32 ret_val = E1000_SUCCESS;
278 
279 	DEBUGFUNC("e1000_read_invm_i210");
280 
281 	/* Only the MAC addr is required to be present in the iNVM */
282 	switch (offset) {
283 	case NVM_MAC_ADDR:
284 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
285 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 1,
286 						     &data[1]);
287 		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 2,
288 						     &data[2]);
289 		if (ret_val != E1000_SUCCESS)
290 			DEBUGOUT("MAC Addr not found in iNVM\n");
291 		break;
292 	case NVM_INIT_CTRL_2:
293 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
294 		if (ret_val != E1000_SUCCESS) {
295 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
296 			ret_val = E1000_SUCCESS;
297 		}
298 		break;
299 	case NVM_INIT_CTRL_4:
300 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
301 		if (ret_val != E1000_SUCCESS) {
302 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
303 			ret_val = E1000_SUCCESS;
304 		}
305 		break;
306 	case NVM_LED_1_CFG:
307 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
308 		if (ret_val != E1000_SUCCESS) {
309 			*data = NVM_LED_1_CFG_DEFAULT_I211;
310 			ret_val = E1000_SUCCESS;
311 		}
312 		break;
313 	case NVM_LED_0_2_CFG:
314 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
315 		if (ret_val != E1000_SUCCESS) {
316 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
317 			ret_val = E1000_SUCCESS;
318 		}
319 		break;
320 	case NVM_ID_LED_SETTINGS:
321 		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
322 		if (ret_val != E1000_SUCCESS) {
323 			*data = ID_LED_RESERVED_FFFF;
324 			ret_val = E1000_SUCCESS;
325 		}
326 		break;
327 	case NVM_SUB_DEV_ID:
328 		*data = hw->subsystem_device_id;
329 		break;
330 	case NVM_SUB_VEN_ID:
331 		*data = hw->subsystem_vendor_id;
332 		break;
333 	case NVM_DEV_ID:
334 		*data = hw->device_id;
335 		break;
336 	case NVM_VEN_ID:
337 		*data = hw->vendor_id;
338 		break;
339 	default:
340 		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
341 		*data = NVM_RESERVED_WORD;
342 		break;
343 	}
344 	return ret_val;
345 }
346 
347 /**
348  *  e1000_read_invm_version - Reads iNVM version and image type
349  *  @hw: pointer to the HW structure
350  *  @invm_ver: version structure for the version read
351  *
352  *  Reads iNVM version and image type.
353  **/
e1000_read_invm_version(struct e1000_hw * hw,struct e1000_fw_version * invm_ver)354 s32 e1000_read_invm_version(struct e1000_hw *hw,
355 			    struct e1000_fw_version *invm_ver)
356 {
357 	u32 *record = NULL;
358 	u32 *next_record = NULL;
359 	u32 i = 0;
360 	u32 invm_dword = 0;
361 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
362 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
363 	u32 buffer[E1000_INVM_SIZE];
364 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
365 	u16 nvm_version = 0;
366 
367 	DEBUGFUNC("e1000_read_invm_version");
368 
369 	/* Read iNVM memory */
370 	for (i = 0; i < E1000_INVM_SIZE; i++) {
371 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
372 		buffer[i] = invm_dword;
373 	}
374 
375 	/* Read version number */
376 	for (i = 1; i < invm_blocks; i++) {
377 		record = &buffer[invm_blocks - i];
378 		next_record = &buffer[invm_blocks - i + 1];
379 
380 		/* Check if we have first version location used */
381 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
382 			nvm_version = 0;
383 			status = E1000_SUCCESS;
384 			break;
385 		}
386 		/* Check if we have second version location used */
387 		else if ((i == 1) &&
388 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
389 			nvm_version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
390 			status = E1000_SUCCESS;
391 			break;
392 		}
393 		/*
394 		 * Check if we have odd version location
395 		 * used and it is the last one used
396 		 */
397 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
398 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
399 			 (i != 1))) {
400 			nvm_version = (*next_record & E1000_INVM_VER_FIELD_TWO)
401 				  >> 13;
402 			status = E1000_SUCCESS;
403 			break;
404 		}
405 		/*
406 		 * Check if we have even version location
407 		 * used and it is the last one used
408 		 */
409 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
410 			 ((*record & 0x3) == 0)) {
411 			nvm_version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
412 			status = E1000_SUCCESS;
413 			break;
414 		}
415 	}
416 
417 	if (status == E1000_SUCCESS) {
418 		invm_ver->invm_major = (nvm_version & E1000_INVM_MAJOR_MASK)
419 					>> E1000_INVM_MAJOR_SHIFT;
420 		invm_ver->invm_minor = nvm_version & E1000_INVM_MINOR_MASK;
421 	}
422 	/* Read Image Type */
423 	for (i = 1; i < invm_blocks; i++) {
424 		record = &buffer[invm_blocks - i];
425 		next_record = &buffer[invm_blocks - i + 1];
426 
427 		/* Check if we have image type in first location used */
428 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
429 			invm_ver->invm_img_type = 0;
430 			status = E1000_SUCCESS;
431 			break;
432 		}
433 		/* Check if we have image type in first location used */
434 		else if ((((*record & 0x3) == 0) &&
435 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
436 			 ((((*record & 0x3) != 0) && (i != 1)))) {
437 			invm_ver->invm_img_type =
438 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
439 			status = E1000_SUCCESS;
440 			break;
441 		}
442 	}
443 	return status;
444 }
445 
446 /**
447  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
448  *  @hw: pointer to the HW structure
449  *
450  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
451  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
452  **/
e1000_validate_nvm_checksum_i210(struct e1000_hw * hw)453 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
454 {
455 	s32 status = E1000_SUCCESS;
456 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
457 
458 	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
459 
460 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
461 
462 		/*
463 		 * Replace the read function with semaphore grabbing with
464 		 * the one that skips this for a while.
465 		 * We have semaphore taken already here.
466 		 */
467 		read_op_ptr = hw->nvm.ops.read;
468 		hw->nvm.ops.read = e1000_read_nvm_eerd;
469 
470 		status = e1000_validate_nvm_checksum_generic(hw);
471 
472 		/* Revert original read operation. */
473 		hw->nvm.ops.read = read_op_ptr;
474 
475 		hw->nvm.ops.release(hw);
476 	} else {
477 		status = E1000_ERR_SWFW_SYNC;
478 	}
479 
480 	return status;
481 }
482 
483 
484 /**
485  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
486  *  @hw: pointer to the HW structure
487  *
488  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
489  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
490  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
491  **/
e1000_update_nvm_checksum_i210(struct e1000_hw * hw)492 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
493 {
494 	s32 ret_val;
495 	u16 checksum = 0;
496 	u16 i, nvm_data;
497 
498 	DEBUGFUNC("e1000_update_nvm_checksum_i210");
499 
500 	/*
501 	 * Read the first word from the EEPROM. If this times out or fails, do
502 	 * not continue or we could be in for a very long wait while every
503 	 * EEPROM read fails
504 	 */
505 	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
506 	if (ret_val != E1000_SUCCESS) {
507 		DEBUGOUT("EEPROM read failed\n");
508 		goto out;
509 	}
510 
511 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
512 		/*
513 		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
514 		 * because we do not want to take the synchronization
515 		 * semaphores twice here.
516 		 */
517 
518 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
519 			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
520 			if (ret_val) {
521 				hw->nvm.ops.release(hw);
522 				DEBUGOUT("NVM Read Error while updating checksum.\n");
523 				goto out;
524 			}
525 			checksum += nvm_data;
526 		}
527 		checksum = (u16) NVM_SUM - checksum;
528 		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
529 						&checksum);
530 		if (ret_val != E1000_SUCCESS) {
531 			hw->nvm.ops.release(hw);
532 			DEBUGOUT("NVM Write Error while updating checksum.\n");
533 			goto out;
534 		}
535 
536 		hw->nvm.ops.release(hw);
537 
538 		ret_val = e1000_update_flash_i210(hw);
539 	} else {
540 		ret_val = E1000_ERR_SWFW_SYNC;
541 	}
542 out:
543 	return ret_val;
544 }
545 
546 /**
547  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
548  *  @hw: pointer to the HW structure
549  *
550  **/
e1000_get_flash_presence_i210(struct e1000_hw * hw)551 bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
552 {
553 	u32 eec = 0;
554 	bool ret_val = false;
555 
556 	DEBUGFUNC("e1000_get_flash_presence_i210");
557 
558 	eec = E1000_READ_REG(hw, E1000_EECD);
559 
560 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
561 		ret_val = true;
562 
563 	return ret_val;
564 }
565 
566 /**
567  *  e1000_update_flash_i210 - Commit EEPROM to the flash
568  *  @hw: pointer to the HW structure
569  *
570  **/
e1000_update_flash_i210(struct e1000_hw * hw)571 s32 e1000_update_flash_i210(struct e1000_hw *hw)
572 {
573 	s32 ret_val;
574 	u32 flup;
575 
576 	DEBUGFUNC("e1000_update_flash_i210");
577 
578 	ret_val = e1000_pool_flash_update_done_i210(hw);
579 	if (ret_val == -E1000_ERR_NVM) {
580 		DEBUGOUT("Flash update time out\n");
581 		goto out;
582 	}
583 
584 	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
585 	E1000_WRITE_REG(hw, E1000_EECD, flup);
586 
587 	ret_val = e1000_pool_flash_update_done_i210(hw);
588 	if (ret_val == E1000_SUCCESS)
589 		DEBUGOUT("Flash update complete\n");
590 	else
591 		DEBUGOUT("Flash update time out\n");
592 
593 out:
594 	return ret_val;
595 }
596 
597 /**
598  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
599  *  @hw: pointer to the HW structure
600  *
601  **/
e1000_pool_flash_update_done_i210(struct e1000_hw * hw)602 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
603 {
604 	s32 ret_val = -E1000_ERR_NVM;
605 	u32 i, reg;
606 
607 	DEBUGFUNC("e1000_pool_flash_update_done_i210");
608 
609 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
610 		reg = E1000_READ_REG(hw, E1000_EECD);
611 		if (reg & E1000_EECD_FLUDONE_I210) {
612 			ret_val = E1000_SUCCESS;
613 			break;
614 		}
615 		usec_delay(5);
616 	}
617 
618 	return ret_val;
619 }
620 
621 /**
622  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
623  *  @hw: pointer to the HW structure
624  *
625  *  Initialize the i210/i211 NVM parameters and function pointers.
626  **/
e1000_init_nvm_params_i210(struct e1000_hw * hw)627 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
628 {
629 	s32 ret_val;
630 	struct e1000_nvm_info *nvm = &hw->nvm;
631 
632 	DEBUGFUNC("e1000_init_nvm_params_i210");
633 
634 	ret_val = e1000_init_nvm_params_82575(hw);
635 	nvm->ops.acquire = e1000_acquire_nvm_i210;
636 	nvm->ops.release = e1000_release_nvm_i210;
637 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
638 	if (e1000_get_flash_presence_i210(hw)) {
639 		hw->nvm.type = e1000_nvm_flash_hw;
640 		nvm->ops.read    = e1000_read_nvm_srrd_i210;
641 		nvm->ops.write   = e1000_write_nvm_srwr_i210;
642 		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
643 		nvm->ops.update   = e1000_update_nvm_checksum_i210;
644 	} else {
645 		hw->nvm.type = e1000_nvm_invm;
646 		nvm->ops.read     = e1000_read_invm_i210;
647 		nvm->ops.write    = e1000_null_write_nvm;
648 		nvm->ops.validate = e1000_null_ops_generic;
649 		nvm->ops.update   = e1000_null_ops_generic;
650 	}
651 	return ret_val;
652 }
653 
654 /**
655  *  e1000_init_function_pointers_i210 - Init func ptrs.
656  *  @hw: pointer to the HW structure
657  *
658  *  Called to initialize all function pointers and parameters.
659  **/
e1000_init_function_pointers_i210(struct e1000_hw * hw)660 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
661 {
662 	e1000_init_function_pointers_82575(hw);
663 	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
664 }
665 
666 /**
667  *  e1000_valid_led_default_i210 - Verify a valid default LED config
668  *  @hw: pointer to the HW structure
669  *  @data: pointer to the NVM (EEPROM)
670  *
671  *  Read the EEPROM for the current default LED configuration.  If the
672  *  LED configuration is not valid, set to a valid LED configuration.
673  **/
e1000_valid_led_default_i210(struct e1000_hw * hw,u16 * data)674 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
675 {
676 	s32 ret_val;
677 
678 	DEBUGFUNC("e1000_valid_led_default_i210");
679 
680 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
681 	if (ret_val) {
682 		DEBUGOUT("NVM Read Error\n");
683 		goto out;
684 	}
685 
686 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
687 		switch (hw->phy.media_type) {
688 		case e1000_media_type_internal_serdes:
689 			*data = ID_LED_DEFAULT_I210_SERDES;
690 			break;
691 		case e1000_media_type_copper:
692 		default:
693 			*data = ID_LED_DEFAULT_I210;
694 			break;
695 		}
696 	}
697 out:
698 	return ret_val;
699 }
700 
701 /**
702  * e1000_pll_workaround_i210
703  * @hw: pointer to the HW structure
704  *
705  * Works around an errata in the PLL circuit where it occasionally
706  * provides the wrong clock frequency after power up.
707  **/
e1000_pll_workaround_i210(struct e1000_hw * hw)708 static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
709 {
710 	s32 ret_val;
711 	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
712 	u16 nvm_word, phy_word, pci_word, tmp_nvm;
713 	int i;
714 
715 	/* Get PHY semaphore */
716 	hw->phy.ops.acquire(hw);
717 	/* Get and set needed register values */
718 	wuc = E1000_READ_REG(hw, E1000_WUC);
719 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
720 	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
721 	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
722 
723 	/* Get data from NVM, or set default */
724 	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
725 					    &nvm_word);
726 	if (ret_val != E1000_SUCCESS)
727 		nvm_word = E1000_INVM_DEFAULT_AL;
728 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
729 	phy_word = E1000_PHY_PLL_UNCONF;
730 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
731 		/* check current state directly from internal PHY */
732 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0xFC);
733 		usec_delay(20);
734 		e1000_read_phy_reg_mdic(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
735 		usec_delay(20);
736 		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0);
737 		if ((phy_word & E1000_PHY_PLL_UNCONF)
738 		    != E1000_PHY_PLL_UNCONF) {
739 			ret_val = E1000_SUCCESS;
740 			break;
741 		} else {
742 			ret_val = -E1000_ERR_PHY;
743 		}
744 		/* directly reset the internal PHY */
745 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
746 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
747 
748 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
749 		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
750 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
751 
752 		E1000_WRITE_REG(hw, E1000_WUC, 0);
753 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
754 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
755 
756 		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
757 		pci_word |= E1000_PCI_PMCSR_D3;
758 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
759 		msec_delay(1);
760 		pci_word &= ~E1000_PCI_PMCSR_D3;
761 		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
762 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
763 		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
764 
765 		/* restore WUC register */
766 		E1000_WRITE_REG(hw, E1000_WUC, wuc);
767 	}
768 	/* restore MDICNFG setting */
769 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
770 	/* Release PHY semaphore */
771 	hw->phy.ops.release(hw);
772 	return ret_val;
773 }
774 
775 /**
776  *  e1000_get_cfg_done_i210 - Read config done bit
777  *  @hw: pointer to the HW structure
778  *
779  *  Read the management control register for the config done bit for
780  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
781  *  to read the config done bit, so an error is *ONLY* logged and returns
782  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
783  *  would not be able to be reset or change link.
784  **/
e1000_get_cfg_done_i210(struct e1000_hw * hw)785 static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
786 {
787 	s32 timeout = PHY_CFG_TIMEOUT;
788 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
789 
790 	DEBUGFUNC("e1000_get_cfg_done_i210");
791 
792 	while (timeout) {
793 		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
794 			break;
795 		msec_delay(1);
796 		timeout--;
797 	}
798 	if (!timeout)
799 		DEBUGOUT("MNG configuration cycle has not completed.\n");
800 
801 	return E1000_SUCCESS;
802 }
803 
804 /**
805  *  e1000_init_hw_i210 - Init hw for I210/I211
806  *  @hw: pointer to the HW structure
807  *
808  *  Called to initialize hw for i210 hw family.
809  **/
e1000_init_hw_i210(struct e1000_hw * hw)810 s32 e1000_init_hw_i210(struct e1000_hw *hw)
811 {
812 	struct e1000_mac_info *mac = &hw->mac;
813 	s32 ret_val;
814 
815 	DEBUGFUNC("e1000_init_hw_i210");
816 	if ((hw->mac.type >= e1000_i210) &&
817 	    !(e1000_get_flash_presence_i210(hw))) {
818 		ret_val = e1000_pll_workaround_i210(hw);
819 		if (ret_val != E1000_SUCCESS)
820 			return ret_val;
821 	}
822 	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
823 
824 	/* Initialize identification LED */
825 	mac->ops.id_led_init(hw);
826 
827 	ret_val = e1000_init_hw_base(hw);
828 	return ret_val;
829 }
830