xref: /freebsd/sys/dev/ixl/i40e_nvm.c (revision dad2fb7ece7a186a249cfc077018b18fe6d2c836)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_prototype.h"
36 
37 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 					       u16 *data);
39 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 					    u16 *data);
41 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
42 						 u16 *words, u16 *data);
43 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
44 					      u16 *words, u16 *data);
45 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
46 				       u32 offset, u16 words, void *data,
47 				       bool last_command);
48 
49 /**
50  * i40e_init_nvm_ops - Initialize NVM function pointers
51  * @hw: pointer to the HW structure
52  *
53  * Setup the function pointers and the NVM info structure. Should be called
54  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
55  * Please notice that the NVM term is used here (& in all methods covered
56  * in this file) as an equivalent of the FLASH part mapped into the SR.
57  * We are accessing FLASH always thru the Shadow RAM.
58  **/
59 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 {
61 	struct i40e_nvm_info *nvm = &hw->nvm;
62 	enum i40e_status_code ret_code = I40E_SUCCESS;
63 	u32 fla, gens;
64 	u8 sr_size;
65 
66 	DEBUGFUNC("i40e_init_nvm");
67 
68 	/* The SR size is stored regardless of the nvm programming mode
69 	 * as the blank mode may be used in the factory line.
70 	 */
71 	gens = rd32(hw, I40E_GLNVM_GENS);
72 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
73 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
74 	/* Switching to words (sr_size contains power of 2KB) */
75 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 
77 	/* Check if we are in the normal or blank NVM programming mode */
78 	fla = rd32(hw, I40E_GLNVM_FLA);
79 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 		/* Max NVM timeout */
81 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
82 		nvm->blank_nvm_mode = FALSE;
83 	} else { /* Blank programming mode */
84 		nvm->blank_nvm_mode = TRUE;
85 		ret_code = I40E_ERR_NVM_BLANK_MODE;
86 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
87 	}
88 
89 	return ret_code;
90 }
91 
92 /**
93  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
94  * @hw: pointer to the HW structure
95  * @access: NVM access type (read or write)
96  *
97  * This function will request NVM ownership for reading
98  * via the proper Admin Command.
99  **/
100 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
101 				       enum i40e_aq_resource_access_type access)
102 {
103 	enum i40e_status_code ret_code = I40E_SUCCESS;
104 	u64 gtime, timeout;
105 	u64 time_left = 0;
106 
107 	DEBUGFUNC("i40e_acquire_nvm");
108 
109 	if (hw->nvm.blank_nvm_mode)
110 		goto i40e_i40e_acquire_nvm_exit;
111 
112 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
113 					    0, &time_left, NULL);
114 	/* Reading the Global Device Timer */
115 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 
117 	/* Store the timeout */
118 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
119 
120 	if (ret_code)
121 		i40e_debug(hw, I40E_DEBUG_NVM,
122 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
123 			   access, time_left, ret_code, hw->aq.asq_last_status);
124 
125 	if (ret_code && time_left) {
126 		/* Poll until the current NVM owner timeouts */
127 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
128 		while ((gtime < timeout) && time_left) {
129 			i40e_msec_delay(10);
130 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
131 			ret_code = i40e_aq_request_resource(hw,
132 							I40E_NVM_RESOURCE_ID,
133 							access, 0, &time_left,
134 							NULL);
135 			if (ret_code == I40E_SUCCESS) {
136 				hw->nvm.hw_semaphore_timeout =
137 					    I40E_MS_TO_GTIME(time_left) + gtime;
138 				break;
139 			}
140 		}
141 		if (ret_code != I40E_SUCCESS) {
142 			hw->nvm.hw_semaphore_timeout = 0;
143 			i40e_debug(hw, I40E_DEBUG_NVM,
144 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
145 				   time_left, ret_code, hw->aq.asq_last_status);
146 		}
147 	}
148 
149 i40e_i40e_acquire_nvm_exit:
150 	return ret_code;
151 }
152 
153 /**
154  * i40e_release_nvm - Generic request for releasing the NVM ownership
155  * @hw: pointer to the HW structure
156  *
157  * This function will release NVM resource via the proper Admin Command.
158  **/
159 void i40e_release_nvm(struct i40e_hw *hw)
160 {
161 	enum i40e_status_code ret_code = I40E_SUCCESS;
162 	u32 total_delay = 0;
163 
164 	DEBUGFUNC("i40e_release_nvm");
165 
166 	if (hw->nvm.blank_nvm_mode)
167 		return;
168 
169 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 
171 	/* there are some rare cases when trying to release the resource
172 	 * results in an admin Q timeout, so handle them correctly
173 	 */
174 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
175 	       (total_delay < hw->aq.asq_cmd_timeout)) {
176 			i40e_msec_delay(1);
177 			ret_code = i40e_aq_release_resource(hw,
178 						I40E_NVM_RESOURCE_ID, 0, NULL);
179 			total_delay++;
180 	}
181 }
182 
183 /**
184  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
185  * @hw: pointer to the HW structure
186  *
187  * Polls the SRCTL Shadow RAM register done bit.
188  **/
189 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 {
191 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
192 	u32 srctl, wait_cnt;
193 
194 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 
196 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
197 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
198 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
199 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
200 			ret_code = I40E_SUCCESS;
201 			break;
202 		}
203 		i40e_usec_delay(5);
204 	}
205 	if (ret_code == I40E_ERR_TIMEOUT)
206 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
207 	return ret_code;
208 }
209 
210 /**
211  * i40e_read_nvm_word - Reads Shadow RAM
212  * @hw: pointer to the HW structure
213  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
214  * @data: word read from the Shadow RAM
215  *
216  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217  **/
218 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
219 					 u16 *data)
220 {
221 #ifdef X722_SUPPORT
222 	if (hw->mac.type == I40E_MAC_X722)
223 		return i40e_read_nvm_word_aq(hw, offset, data);
224 #endif
225 	return i40e_read_nvm_word_srctl(hw, offset, data);
226 }
227 
228 /**
229  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
230  * @hw: pointer to the HW structure
231  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
232  * @data: word read from the Shadow RAM
233  *
234  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
235  **/
236 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
237 					       u16 *data)
238 {
239 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
240 	u32 sr_reg;
241 
242 	DEBUGFUNC("i40e_read_nvm_word_srctl");
243 
244 	if (offset >= hw->nvm.sr_size) {
245 		i40e_debug(hw, I40E_DEBUG_NVM,
246 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
247 			   offset, hw->nvm.sr_size);
248 		ret_code = I40E_ERR_PARAM;
249 		goto read_nvm_exit;
250 	}
251 
252 	/* Poll the done bit first */
253 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
254 	if (ret_code == I40E_SUCCESS) {
255 		/* Write the address and start reading */
256 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
257 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
258 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
259 
260 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
261 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
262 		if (ret_code == I40E_SUCCESS) {
263 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
264 			*data = (u16)((sr_reg &
265 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
266 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
267 		}
268 	}
269 	if (ret_code != I40E_SUCCESS)
270 		i40e_debug(hw, I40E_DEBUG_NVM,
271 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
272 			   offset);
273 
274 read_nvm_exit:
275 	return ret_code;
276 }
277 
278 /**
279  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
280  * @hw: pointer to the HW structure
281  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
282  * @data: word read from the Shadow RAM
283  *
284  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
285  **/
286 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
287 					    u16 *data)
288 {
289 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
290 
291 	DEBUGFUNC("i40e_read_nvm_word_aq");
292 
293 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
294 	*data = LE16_TO_CPU(*(__le16 *)data);
295 
296 	return ret_code;
297 }
298 
299 /**
300  * i40e_read_nvm_buffer - Reads Shadow RAM buffer
301  * @hw: pointer to the HW structure
302  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
303  * @words: (in) number of words to read; (out) number of words actually read
304  * @data: words read from the Shadow RAM
305  *
306  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
307  * method. The buffer read is preceded by the NVM ownership take
308  * and followed by the release.
309  **/
310 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
311 					   u16 *words, u16 *data)
312 {
313 #ifdef X722_SUPPORT
314 	if (hw->mac.type == I40E_MAC_X722)
315 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
316 #endif
317 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
318 }
319 
320 /**
321  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
322  * @hw: pointer to the HW structure
323  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
324  * @words: (in) number of words to read; (out) number of words actually read
325  * @data: words read from the Shadow RAM
326  *
327  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
328  * method. The buffer read is preceded by the NVM ownership take
329  * and followed by the release.
330  **/
331 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
332 						 u16 *words, u16 *data)
333 {
334 	enum i40e_status_code ret_code = I40E_SUCCESS;
335 	u16 index, word;
336 
337 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
338 
339 	/* Loop thru the selected region */
340 	for (word = 0; word < *words; word++) {
341 		index = offset + word;
342 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
343 		if (ret_code != I40E_SUCCESS)
344 			break;
345 	}
346 
347 	/* Update the number of words read from the Shadow RAM */
348 	*words = word;
349 
350 	return ret_code;
351 }
352 
353 /**
354  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
355  * @hw: pointer to the HW structure
356  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
357  * @words: (in) number of words to read; (out) number of words actually read
358  * @data: words read from the Shadow RAM
359  *
360  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
361  * method. The buffer read is preceded by the NVM ownership take
362  * and followed by the release.
363  **/
364 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
365 					      u16 *words, u16 *data)
366 {
367 	enum i40e_status_code ret_code;
368 	u16 read_size = *words;
369 	bool last_cmd = FALSE;
370 	u16 words_read = 0;
371 	u16 i = 0;
372 
373 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
374 
375 	do {
376 		/* Calculate number of bytes we should read in this step.
377 		 * FVL AQ do not allow to read more than one page at a time or
378 		 * to cross page boundaries.
379 		 */
380 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
381 			read_size = min(*words,
382 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
383 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
384 		else
385 			read_size = min((*words - words_read),
386 					I40E_SR_SECTOR_SIZE_IN_WORDS);
387 
388 		/* Check if this is last command, if so set proper flag */
389 		if ((words_read + read_size) >= *words)
390 			last_cmd = TRUE;
391 
392 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
393 					    data + words_read, last_cmd);
394 		if (ret_code != I40E_SUCCESS)
395 			goto read_nvm_buffer_aq_exit;
396 
397 		/* Increment counter for words already read and move offset to
398 		 * new read location
399 		 */
400 		words_read += read_size;
401 		offset += read_size;
402 	} while (words_read < *words);
403 
404 	for (i = 0; i < *words; i++)
405 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
406 
407 read_nvm_buffer_aq_exit:
408 	*words = words_read;
409 	return ret_code;
410 }
411 
412 /**
413  * i40e_read_nvm_aq - Read Shadow RAM.
414  * @hw: pointer to the HW structure.
415  * @module_pointer: module pointer location in words from the NVM beginning
416  * @offset: offset in words from module start
417  * @words: number of words to write
418  * @data: buffer with words to write to the Shadow RAM
419  * @last_command: tells the AdminQ that this is the last command
420  *
421  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
422  **/
423 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
424 				       u32 offset, u16 words, void *data,
425 				       bool last_command)
426 {
427 	enum i40e_status_code ret_code = I40E_ERR_NVM;
428 	struct i40e_asq_cmd_details cmd_details;
429 
430 	DEBUGFUNC("i40e_read_nvm_aq");
431 
432 	memset(&cmd_details, 0, sizeof(cmd_details));
433 	cmd_details.wb_desc = &hw->nvm_wb_desc;
434 
435 	/* Here we are checking the SR limit only for the flat memory model.
436 	 * We cannot do it for the module-based model, as we did not acquire
437 	 * the NVM resource yet (we cannot get the module pointer value).
438 	 * Firmware will check the module-based model.
439 	 */
440 	if ((offset + words) > hw->nvm.sr_size)
441 		i40e_debug(hw, I40E_DEBUG_NVM,
442 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
443 			   (offset + words), hw->nvm.sr_size);
444 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
445 		/* We can write only up to 4KB (one sector), in one AQ write */
446 		i40e_debug(hw, I40E_DEBUG_NVM,
447 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
448 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
449 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
450 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
451 		/* A single write cannot spread over two sectors */
452 		i40e_debug(hw, I40E_DEBUG_NVM,
453 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
454 			   offset, words);
455 	else
456 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
457 					    2 * offset,  /*bytes*/
458 					    2 * words,   /*bytes*/
459 					    data, last_command, &cmd_details);
460 
461 	return ret_code;
462 }
463 
464 /**
465  * i40e_write_nvm_aq - Writes Shadow RAM.
466  * @hw: pointer to the HW structure.
467  * @module_pointer: module pointer location in words from the NVM beginning
468  * @offset: offset in words from module start
469  * @words: number of words to write
470  * @data: buffer with words to write to the Shadow RAM
471  * @last_command: tells the AdminQ that this is the last command
472  *
473  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
474  **/
475 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
476 					u32 offset, u16 words, void *data,
477 					bool last_command)
478 {
479 	enum i40e_status_code ret_code = I40E_ERR_NVM;
480 	struct i40e_asq_cmd_details cmd_details;
481 
482 	DEBUGFUNC("i40e_write_nvm_aq");
483 
484 	memset(&cmd_details, 0, sizeof(cmd_details));
485 	cmd_details.wb_desc = &hw->nvm_wb_desc;
486 
487 	/* Here we are checking the SR limit only for the flat memory model.
488 	 * We cannot do it for the module-based model, as we did not acquire
489 	 * the NVM resource yet (we cannot get the module pointer value).
490 	 * Firmware will check the module-based model.
491 	 */
492 	if ((offset + words) > hw->nvm.sr_size)
493 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
494 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
495 		/* We can write only up to 4KB (one sector), in one AQ write */
496 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
497 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
498 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
499 		/* A single write cannot spread over two sectors */
500 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
501 	else
502 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
503 					      2 * offset,  /*bytes*/
504 					      2 * words,   /*bytes*/
505 					      data, last_command, &cmd_details);
506 
507 	return ret_code;
508 }
509 
510 /**
511  * i40e_write_nvm_word - Writes Shadow RAM word
512  * @hw: pointer to the HW structure
513  * @offset: offset of the Shadow RAM word to write
514  * @data: word to write to the Shadow RAM
515  *
516  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
517  * NVM ownership have to be acquired and released (on ARQ completion event
518  * reception) by caller. To commit SR to NVM update checksum function
519  * should be called.
520  **/
521 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
522 					  void *data)
523 {
524 	DEBUGFUNC("i40e_write_nvm_word");
525 
526 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
527 
528 	/* Value 0x00 below means that we treat SR as a flat mem */
529 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
530 }
531 
532 /**
533  * i40e_write_nvm_buffer - Writes Shadow RAM buffer
534  * @hw: pointer to the HW structure
535  * @module_pointer: module pointer location in words from the NVM beginning
536  * @offset: offset of the Shadow RAM buffer to write
537  * @words: number of words to write
538  * @data: words to write to the Shadow RAM
539  *
540  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
541  * NVM ownership must be acquired before calling this function and released
542  * on ARQ completion event reception by caller. To commit SR to NVM update
543  * checksum function should be called.
544  **/
545 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
546 					    u8 module_pointer, u32 offset,
547 					    u16 words, void *data)
548 {
549 	__le16 *le_word_ptr = (__le16 *)data;
550 	u16 *word_ptr = (u16 *)data;
551 	u32 i = 0;
552 
553 	DEBUGFUNC("i40e_write_nvm_buffer");
554 
555 	for (i = 0; i < words; i++)
556 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
557 
558 	/* Here we will only write one buffer as the size of the modules
559 	 * mirrored in the Shadow RAM is always less than 4K.
560 	 */
561 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
562 				 data, FALSE);
563 }
564 
565 /**
566  * i40e_calc_nvm_checksum - Calculates and returns the checksum
567  * @hw: pointer to hardware structure
568  * @checksum: pointer to the checksum
569  *
570  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
571  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
572  * is customer specific and unknown. Therefore, this function skips all maximum
573  * possible size of VPD (1kB).
574  **/
575 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
576 {
577 	enum i40e_status_code ret_code = I40E_SUCCESS;
578 	struct i40e_virt_mem vmem;
579 	u16 pcie_alt_module = 0;
580 	u16 checksum_local = 0;
581 	u16 vpd_module = 0;
582 	u16 *data;
583 	u16 i = 0;
584 
585 	DEBUGFUNC("i40e_calc_nvm_checksum");
586 
587 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
588 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
589 	if (ret_code)
590 		goto i40e_calc_nvm_checksum_exit;
591 	data = (u16 *)vmem.va;
592 
593 	/* read pointer to VPD area */
594 	ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
595 	if (ret_code != I40E_SUCCESS) {
596 		ret_code = I40E_ERR_NVM_CHECKSUM;
597 		goto i40e_calc_nvm_checksum_exit;
598 	}
599 
600 	/* read pointer to PCIe Alt Auto-load module */
601 	ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
602 				      &pcie_alt_module);
603 	if (ret_code != I40E_SUCCESS) {
604 		ret_code = I40E_ERR_NVM_CHECKSUM;
605 		goto i40e_calc_nvm_checksum_exit;
606 	}
607 
608 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
609 	 * except the VPD and PCIe ALT Auto-load modules
610 	 */
611 	for (i = 0; i < hw->nvm.sr_size; i++) {
612 		/* Read SR page */
613 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
614 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
615 
616 			ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
617 			if (ret_code != I40E_SUCCESS) {
618 				ret_code = I40E_ERR_NVM_CHECKSUM;
619 				goto i40e_calc_nvm_checksum_exit;
620 			}
621 		}
622 
623 		/* Skip Checksum word */
624 		if (i == I40E_SR_SW_CHECKSUM_WORD)
625 			continue;
626 		/* Skip VPD module (convert byte size to word count) */
627 		if ((i >= (u32)vpd_module) &&
628 		    (i < ((u32)vpd_module +
629 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
630 			continue;
631 		}
632 		/* Skip PCIe ALT module (convert byte size to word count) */
633 		if ((i >= (u32)pcie_alt_module) &&
634 		    (i < ((u32)pcie_alt_module +
635 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
636 			continue;
637 		}
638 
639 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
640 	}
641 
642 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
643 
644 i40e_calc_nvm_checksum_exit:
645 	i40e_free_virt_mem(hw, &vmem);
646 	return ret_code;
647 }
648 
649 /**
650  * i40e_update_nvm_checksum - Updates the NVM checksum
651  * @hw: pointer to hardware structure
652  *
653  * NVM ownership must be acquired before calling this function and released
654  * on ARQ completion event reception by caller.
655  * This function will commit SR to NVM.
656  **/
657 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
658 {
659 	enum i40e_status_code ret_code = I40E_SUCCESS;
660 	u16 checksum;
661 	__le16 le_sum;
662 
663 	DEBUGFUNC("i40e_update_nvm_checksum");
664 
665 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
666 	le_sum = CPU_TO_LE16(checksum);
667 	if (ret_code == I40E_SUCCESS)
668 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
669 					     1, &le_sum, TRUE);
670 
671 	return ret_code;
672 }
673 
674 /**
675  * i40e_validate_nvm_checksum - Validate EEPROM checksum
676  * @hw: pointer to hardware structure
677  * @checksum: calculated checksum
678  *
679  * Performs checksum calculation and validates the NVM SW checksum. If the
680  * caller does not need checksum, the value can be NULL.
681  **/
682 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
683 						 u16 *checksum)
684 {
685 	enum i40e_status_code ret_code = I40E_SUCCESS;
686 	u16 checksum_sr = 0;
687 	u16 checksum_local = 0;
688 
689 	DEBUGFUNC("i40e_validate_nvm_checksum");
690 
691 	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
692 	if (ret_code != I40E_SUCCESS)
693 		goto i40e_validate_nvm_checksum_exit;
694 
695 	/* Do not use i40e_read_nvm_word() because we do not want to take
696 	 * the synchronization semaphores twice here.
697 	 */
698 	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
699 
700 	/* Verify read checksum from EEPROM is the same as
701 	 * calculated checksum
702 	 */
703 	if (checksum_local != checksum_sr)
704 		ret_code = I40E_ERR_NVM_CHECKSUM;
705 
706 	/* If the user cares, return the calculated checksum */
707 	if (checksum)
708 		*checksum = checksum_local;
709 
710 i40e_validate_nvm_checksum_exit:
711 	return ret_code;
712 }
713