xref: /freebsd/sys/dev/ixl/i40e_nvm.c (revision 1d767a8eae87497c53037a6e470505ce1061592c)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_prototype.h"
36 
37 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 					       u16 *data);
39 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 					    u16 *data);
41 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
42 						 u16 *words, u16 *data);
43 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
44 					      u16 *words, u16 *data);
45 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
46 				       u32 offset, u16 words, void *data,
47 				       bool last_command);
48 
49 /**
50  * i40e_init_nvm_ops - Initialize NVM function pointers
51  * @hw: pointer to the HW structure
52  *
53  * Setup the function pointers and the NVM info structure. Should be called
54  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
55  * Please notice that the NVM term is used here (& in all methods covered
56  * in this file) as an equivalent of the FLASH part mapped into the SR.
57  * We are accessing FLASH always through the Shadow RAM.
58  **/
59 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 {
61 	struct i40e_nvm_info *nvm = &hw->nvm;
62 	enum i40e_status_code ret_code = I40E_SUCCESS;
63 	u32 fla, gens;
64 	u8 sr_size;
65 
66 	DEBUGFUNC("i40e_init_nvm");
67 
68 	/* The SR size is stored regardless of the nvm programming mode
69 	 * as the blank mode may be used in the factory line.
70 	 */
71 	gens = rd32(hw, I40E_GLNVM_GENS);
72 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
73 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
74 	/* Switching to words (sr_size contains power of 2KB) */
75 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 
77 	/* Check if we are in the normal or blank NVM programming mode */
78 	fla = rd32(hw, I40E_GLNVM_FLA);
79 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 		/* Max NVM timeout */
81 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
82 		nvm->blank_nvm_mode = FALSE;
83 	} else { /* Blank programming mode */
84 		nvm->blank_nvm_mode = TRUE;
85 		ret_code = I40E_ERR_NVM_BLANK_MODE;
86 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
87 	}
88 
89 	return ret_code;
90 }
91 
92 /**
93  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
94  * @hw: pointer to the HW structure
95  * @access: NVM access type (read or write)
96  *
97  * This function will request NVM ownership for reading
98  * via the proper Admin Command.
99  **/
100 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
101 				       enum i40e_aq_resource_access_type access)
102 {
103 	enum i40e_status_code ret_code = I40E_SUCCESS;
104 	u64 gtime, timeout;
105 	u64 time_left = 0;
106 
107 	DEBUGFUNC("i40e_acquire_nvm");
108 
109 	if (hw->nvm.blank_nvm_mode)
110 		goto i40e_i40e_acquire_nvm_exit;
111 
112 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
113 					    0, &time_left, NULL);
114 	/* Reading the Global Device Timer */
115 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 
117 	/* Store the timeout */
118 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
119 
120 	if (ret_code)
121 		i40e_debug(hw, I40E_DEBUG_NVM,
122 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
123 			   access, time_left, ret_code, hw->aq.asq_last_status);
124 
125 	if (ret_code && time_left) {
126 		/* Poll until the current NVM owner timeouts */
127 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
128 		while ((gtime < timeout) && time_left) {
129 			i40e_msec_delay(10);
130 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
131 			ret_code = i40e_aq_request_resource(hw,
132 							I40E_NVM_RESOURCE_ID,
133 							access, 0, &time_left,
134 							NULL);
135 			if (ret_code == I40E_SUCCESS) {
136 				hw->nvm.hw_semaphore_timeout =
137 					    I40E_MS_TO_GTIME(time_left) + gtime;
138 				break;
139 			}
140 		}
141 		if (ret_code != I40E_SUCCESS) {
142 			hw->nvm.hw_semaphore_timeout = 0;
143 			i40e_debug(hw, I40E_DEBUG_NVM,
144 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
145 				   time_left, ret_code, hw->aq.asq_last_status);
146 		}
147 	}
148 
149 i40e_i40e_acquire_nvm_exit:
150 	return ret_code;
151 }
152 
153 /**
154  * i40e_release_nvm - Generic request for releasing the NVM ownership
155  * @hw: pointer to the HW structure
156  *
157  * This function will release NVM resource via the proper Admin Command.
158  **/
159 void i40e_release_nvm(struct i40e_hw *hw)
160 {
161 	enum i40e_status_code ret_code = I40E_SUCCESS;
162 	u32 total_delay = 0;
163 
164 	DEBUGFUNC("i40e_release_nvm");
165 
166 	if (hw->nvm.blank_nvm_mode)
167 		return;
168 
169 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 
171 	/* there are some rare cases when trying to release the resource
172 	 * results in an admin Q timeout, so handle them correctly
173 	 */
174 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
175 	       (total_delay < hw->aq.asq_cmd_timeout)) {
176 			i40e_msec_delay(1);
177 			ret_code = i40e_aq_release_resource(hw,
178 						I40E_NVM_RESOURCE_ID, 0, NULL);
179 			total_delay++;
180 	}
181 }
182 
183 /**
184  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
185  * @hw: pointer to the HW structure
186  *
187  * Polls the SRCTL Shadow RAM register done bit.
188  **/
189 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 {
191 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
192 	u32 srctl, wait_cnt;
193 
194 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 
196 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
197 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
198 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
199 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
200 			ret_code = I40E_SUCCESS;
201 			break;
202 		}
203 		i40e_usec_delay(5);
204 	}
205 	if (ret_code == I40E_ERR_TIMEOUT)
206 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
207 	return ret_code;
208 }
209 
210 /**
211  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
212  * @hw: pointer to the HW structure
213  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
214  * @data: word read from the Shadow RAM
215  *
216  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217  **/
218 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
219 					 u16 *data)
220 {
221 	enum i40e_status_code ret_code = I40E_SUCCESS;
222 
223 	ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
224 	return ret_code;
225 }
226 
227 /**
228  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
229  * @hw: pointer to the HW structure
230  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
231  * @data: word read from the Shadow RAM
232  *
233  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
234  **/
235 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
236 					   u16 offset,
237 					   u16 *data)
238 {
239 	enum i40e_status_code ret_code = I40E_SUCCESS;
240 
241 	ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
242 	return ret_code;
243 }
244 
245 /**
246  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
247  * @hw: pointer to the HW structure
248  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
249  * @data: word read from the Shadow RAM
250  *
251  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
252  **/
253 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
254 					       u16 *data)
255 {
256 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
257 	u32 sr_reg;
258 
259 	DEBUGFUNC("i40e_read_nvm_word_srctl");
260 
261 	if (offset >= hw->nvm.sr_size) {
262 		i40e_debug(hw, I40E_DEBUG_NVM,
263 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
264 			   offset, hw->nvm.sr_size);
265 		ret_code = I40E_ERR_PARAM;
266 		goto read_nvm_exit;
267 	}
268 
269 	/* Poll the done bit first */
270 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
271 	if (ret_code == I40E_SUCCESS) {
272 		/* Write the address and start reading */
273 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
274 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
275 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
276 
277 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
278 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
279 		if (ret_code == I40E_SUCCESS) {
280 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
281 			*data = (u16)((sr_reg &
282 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
283 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
284 		}
285 	}
286 	if (ret_code != I40E_SUCCESS)
287 		i40e_debug(hw, I40E_DEBUG_NVM,
288 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
289 			   offset);
290 
291 read_nvm_exit:
292 	return ret_code;
293 }
294 
295 /**
296  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
297  * @hw: pointer to the HW structure
298  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
299  * @data: word read from the Shadow RAM
300  *
301  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
302  **/
303 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
304 					    u16 *data)
305 {
306 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
307 
308 	DEBUGFUNC("i40e_read_nvm_word_aq");
309 
310 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
311 	*data = LE16_TO_CPU(*(__le16 *)data);
312 
313 	return ret_code;
314 }
315 
316 /**
317  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
318  * @hw: pointer to the HW structure
319  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
320  * @words: (in) number of words to read; (out) number of words actually read
321  * @data: words read from the Shadow RAM
322  *
323  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
324  * method. The buffer read is preceded by the NVM ownership take
325  * and followed by the release.
326  **/
327 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
328 					     u16 offset,
329 					     u16 *words, u16 *data)
330 {
331 	enum i40e_status_code ret_code = I40E_SUCCESS;
332 
333 	ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
334 	return ret_code;
335 }
336 
337 /**
338  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
339  * @hw: pointer to the HW structure
340  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
341  * @words: (in) number of words to read; (out) number of words actually read
342  * @data: words read from the Shadow RAM
343  *
344  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
345  * method. The buffer read is preceded by the NVM ownership take
346  * and followed by the release.
347  **/
348 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
349 					   u16 *words, u16 *data)
350 {
351 	enum i40e_status_code ret_code = I40E_SUCCESS;
352 
353 	ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
354 	return ret_code;
355 }
356 
357 /**
358  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
359  * @hw: pointer to the HW structure
360  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
361  * @words: (in) number of words to read; (out) number of words actually read
362  * @data: words read from the Shadow RAM
363  *
364  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
365  * method. The buffer read is preceded by the NVM ownership take
366  * and followed by the release.
367  **/
368 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
369 						 u16 *words, u16 *data)
370 {
371 	enum i40e_status_code ret_code = I40E_SUCCESS;
372 	u16 index, word;
373 
374 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
375 
376 	/* Loop through the selected region */
377 	for (word = 0; word < *words; word++) {
378 		index = offset + word;
379 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
380 		if (ret_code != I40E_SUCCESS)
381 			break;
382 	}
383 
384 	/* Update the number of words read from the Shadow RAM */
385 	*words = word;
386 
387 	return ret_code;
388 }
389 
390 /**
391  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
392  * @hw: pointer to the HW structure
393  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
394  * @words: (in) number of words to read; (out) number of words actually read
395  * @data: words read from the Shadow RAM
396  *
397  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
398  * method. The buffer read is preceded by the NVM ownership take
399  * and followed by the release.
400  **/
401 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
402 					      u16 *words, u16 *data)
403 {
404 	enum i40e_status_code ret_code;
405 	u16 read_size = *words;
406 	bool last_cmd = FALSE;
407 	u16 words_read = 0;
408 	u16 i = 0;
409 
410 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
411 
412 	do {
413 		/* Calculate number of bytes we should read in this step.
414 		 * FVL AQ do not allow to read more than one page at a time or
415 		 * to cross page boundaries.
416 		 */
417 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
418 			read_size = min(*words,
419 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
420 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
421 		else
422 			read_size = min((*words - words_read),
423 					I40E_SR_SECTOR_SIZE_IN_WORDS);
424 
425 		/* Check if this is last command, if so set proper flag */
426 		if ((words_read + read_size) >= *words)
427 			last_cmd = TRUE;
428 
429 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
430 					    data + words_read, last_cmd);
431 		if (ret_code != I40E_SUCCESS)
432 			goto read_nvm_buffer_aq_exit;
433 
434 		/* Increment counter for words already read and move offset to
435 		 * new read location
436 		 */
437 		words_read += read_size;
438 		offset += read_size;
439 	} while (words_read < *words);
440 
441 	for (i = 0; i < *words; i++)
442 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
443 
444 read_nvm_buffer_aq_exit:
445 	*words = words_read;
446 	return ret_code;
447 }
448 
449 /**
450  * i40e_read_nvm_aq - Read Shadow RAM.
451  * @hw: pointer to the HW structure.
452  * @module_pointer: module pointer location in words from the NVM beginning
453  * @offset: offset in words from module start
454  * @words: number of words to write
455  * @data: buffer with words to write to the Shadow RAM
456  * @last_command: tells the AdminQ that this is the last command
457  *
458  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
459  **/
460 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
461 				       u32 offset, u16 words, void *data,
462 				       bool last_command)
463 {
464 	enum i40e_status_code ret_code = I40E_ERR_NVM;
465 	struct i40e_asq_cmd_details cmd_details;
466 
467 	DEBUGFUNC("i40e_read_nvm_aq");
468 
469 	memset(&cmd_details, 0, sizeof(cmd_details));
470 	cmd_details.wb_desc = &hw->nvm_wb_desc;
471 
472 	/* Here we are checking the SR limit only for the flat memory model.
473 	 * We cannot do it for the module-based model, as we did not acquire
474 	 * the NVM resource yet (we cannot get the module pointer value).
475 	 * Firmware will check the module-based model.
476 	 */
477 	if ((offset + words) > hw->nvm.sr_size)
478 		i40e_debug(hw, I40E_DEBUG_NVM,
479 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
480 			   (offset + words), hw->nvm.sr_size);
481 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
482 		/* We can write only up to 4KB (one sector), in one AQ write */
483 		i40e_debug(hw, I40E_DEBUG_NVM,
484 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
485 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
486 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
487 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
488 		/* A single write cannot spread over two sectors */
489 		i40e_debug(hw, I40E_DEBUG_NVM,
490 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
491 			   offset, words);
492 	else
493 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
494 					    2 * offset,  /*bytes*/
495 					    2 * words,   /*bytes*/
496 					    data, last_command, &cmd_details);
497 
498 	return ret_code;
499 }
500 
501 /**
502  * i40e_write_nvm_aq - Writes Shadow RAM.
503  * @hw: pointer to the HW structure.
504  * @module_pointer: module pointer location in words from the NVM beginning
505  * @offset: offset in words from module start
506  * @words: number of words to write
507  * @data: buffer with words to write to the Shadow RAM
508  * @last_command: tells the AdminQ that this is the last command
509  *
510  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
511  **/
512 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
513 					u32 offset, u16 words, void *data,
514 					bool last_command)
515 {
516 	enum i40e_status_code ret_code = I40E_ERR_NVM;
517 	struct i40e_asq_cmd_details cmd_details;
518 
519 	DEBUGFUNC("i40e_write_nvm_aq");
520 
521 	memset(&cmd_details, 0, sizeof(cmd_details));
522 	cmd_details.wb_desc = &hw->nvm_wb_desc;
523 
524 	/* Here we are checking the SR limit only for the flat memory model.
525 	 * We cannot do it for the module-based model, as we did not acquire
526 	 * the NVM resource yet (we cannot get the module pointer value).
527 	 * Firmware will check the module-based model.
528 	 */
529 	if ((offset + words) > hw->nvm.sr_size)
530 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
531 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
532 		/* We can write only up to 4KB (one sector), in one AQ write */
533 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
534 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
535 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
536 		/* A single write cannot spread over two sectors */
537 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
538 	else
539 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
540 					      2 * offset,  /*bytes*/
541 					      2 * words,   /*bytes*/
542 					      data, last_command, &cmd_details);
543 
544 	return ret_code;
545 }
546 
547 /**
548  * __i40e_write_nvm_word - Writes Shadow RAM word
549  * @hw: pointer to the HW structure
550  * @offset: offset of the Shadow RAM word to write
551  * @data: word to write to the Shadow RAM
552  *
553  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
554  * NVM ownership have to be acquired and released (on ARQ completion event
555  * reception) by caller. To commit SR to NVM update checksum function
556  * should be called.
557  **/
558 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
559 					    void *data)
560 {
561 	DEBUGFUNC("i40e_write_nvm_word");
562 
563 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
564 
565 	/* Value 0x00 below means that we treat SR as a flat mem */
566 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
567 }
568 
569 /**
570  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
571  * @hw: pointer to the HW structure
572  * @module_pointer: module pointer location in words from the NVM beginning
573  * @offset: offset of the Shadow RAM buffer to write
574  * @words: number of words to write
575  * @data: words to write to the Shadow RAM
576  *
577  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
578  * NVM ownership must be acquired before calling this function and released
579  * on ARQ completion event reception by caller. To commit SR to NVM update
580  * checksum function should be called.
581  **/
582 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
583 					      u8 module_pointer, u32 offset,
584 					      u16 words, void *data)
585 {
586 	__le16 *le_word_ptr = (__le16 *)data;
587 	u16 *word_ptr = (u16 *)data;
588 	u32 i = 0;
589 
590 	DEBUGFUNC("i40e_write_nvm_buffer");
591 
592 	for (i = 0; i < words; i++)
593 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
594 
595 	/* Here we will only write one buffer as the size of the modules
596 	 * mirrored in the Shadow RAM is always less than 4K.
597 	 */
598 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
599 				 data, FALSE);
600 }
601 
602 /**
603  * i40e_calc_nvm_checksum - Calculates and returns the checksum
604  * @hw: pointer to hardware structure
605  * @checksum: pointer to the checksum
606  *
607  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
608  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
609  * is customer specific and unknown. Therefore, this function skips all maximum
610  * possible size of VPD (1kB).
611  **/
612 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
613 {
614 	enum i40e_status_code ret_code = I40E_SUCCESS;
615 	struct i40e_virt_mem vmem;
616 	u16 pcie_alt_module = 0;
617 	u16 checksum_local = 0;
618 	u16 vpd_module = 0;
619 	u16 *data;
620 	u16 i = 0;
621 
622 	DEBUGFUNC("i40e_calc_nvm_checksum");
623 
624 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
625 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
626 	if (ret_code)
627 		goto i40e_calc_nvm_checksum_exit;
628 	data = (u16 *)vmem.va;
629 
630 	/* read pointer to VPD area */
631 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
632 					&vpd_module);
633 	if (ret_code != I40E_SUCCESS) {
634 		ret_code = I40E_ERR_NVM_CHECKSUM;
635 		goto i40e_calc_nvm_checksum_exit;
636 	}
637 
638 	/* read pointer to PCIe Alt Auto-load module */
639 	ret_code = __i40e_read_nvm_word(hw,
640 					I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
641 					&pcie_alt_module);
642 	if (ret_code != I40E_SUCCESS) {
643 		ret_code = I40E_ERR_NVM_CHECKSUM;
644 		goto i40e_calc_nvm_checksum_exit;
645 	}
646 
647 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
648 	 * except the VPD and PCIe ALT Auto-load modules
649 	 */
650 	for (i = 0; i < hw->nvm.sr_size; i++) {
651 		/* Read SR page */
652 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
653 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
654 
655 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
656 			if (ret_code != I40E_SUCCESS) {
657 				ret_code = I40E_ERR_NVM_CHECKSUM;
658 				goto i40e_calc_nvm_checksum_exit;
659 			}
660 		}
661 
662 		/* Skip Checksum word */
663 		if (i == I40E_SR_SW_CHECKSUM_WORD)
664 			continue;
665 		/* Skip VPD module (convert byte size to word count) */
666 		if ((i >= (u32)vpd_module) &&
667 		    (i < ((u32)vpd_module +
668 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
669 			continue;
670 		}
671 		/* Skip PCIe ALT module (convert byte size to word count) */
672 		if ((i >= (u32)pcie_alt_module) &&
673 		    (i < ((u32)pcie_alt_module +
674 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
675 			continue;
676 		}
677 
678 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
679 	}
680 
681 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
682 
683 i40e_calc_nvm_checksum_exit:
684 	i40e_free_virt_mem(hw, &vmem);
685 	return ret_code;
686 }
687 
688 /**
689  * i40e_update_nvm_checksum - Updates the NVM checksum
690  * @hw: pointer to hardware structure
691  *
692  * NVM ownership must be acquired before calling this function and released
693  * on ARQ completion event reception by caller.
694  * This function will commit SR to NVM.
695  **/
696 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
697 {
698 	enum i40e_status_code ret_code = I40E_SUCCESS;
699 	u16 checksum;
700 	__le16 le_sum;
701 
702 	DEBUGFUNC("i40e_update_nvm_checksum");
703 
704 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
705 	le_sum = CPU_TO_LE16(checksum);
706 	if (ret_code == I40E_SUCCESS)
707 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
708 					     1, &le_sum, TRUE);
709 
710 	return ret_code;
711 }
712 
713 /**
714  * i40e_validate_nvm_checksum - Validate EEPROM checksum
715  * @hw: pointer to hardware structure
716  * @checksum: calculated checksum
717  *
718  * Performs checksum calculation and validates the NVM SW checksum. If the
719  * caller does not need checksum, the value can be NULL.
720  **/
721 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
722 						 u16 *checksum)
723 {
724 	enum i40e_status_code ret_code = I40E_SUCCESS;
725 	u16 checksum_sr = 0;
726 	u16 checksum_local = 0;
727 
728 	DEBUGFUNC("i40e_validate_nvm_checksum");
729 
730 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
731 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
732 	if (!ret_code) {
733 		ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
734 		if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
735 			i40e_release_nvm(hw);
736 		if (ret_code != I40E_SUCCESS)
737 			goto i40e_validate_nvm_checksum_exit;
738 	} else {
739 		goto i40e_validate_nvm_checksum_exit;
740 	}
741 
742 	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
743 
744 	/* Verify read checksum from EEPROM is the same as
745 	 * calculated checksum
746 	 */
747 	if (checksum_local != checksum_sr)
748 		ret_code = I40E_ERR_NVM_CHECKSUM;
749 
750 	/* If the user cares, return the calculated checksum */
751 	if (checksum)
752 		*checksum = checksum_local;
753 
754 i40e_validate_nvm_checksum_exit:
755 	return ret_code;
756 }
757 
758 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
759 						    struct i40e_nvm_access *cmd,
760 						    u8 *bytes, int *perrno);
761 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
762 						    struct i40e_nvm_access *cmd,
763 						    u8 *bytes, int *perrno);
764 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
765 						    struct i40e_nvm_access *cmd,
766 						    u8 *bytes, int *perrno);
767 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
768 						    struct i40e_nvm_access *cmd,
769 						    int *perrno);
770 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
771 						   struct i40e_nvm_access *cmd,
772 						   int *perrno);
773 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
774 						   struct i40e_nvm_access *cmd,
775 						   u8 *bytes, int *perrno);
776 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
777 						  struct i40e_nvm_access *cmd,
778 						  u8 *bytes, int *perrno);
779 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
780 						 struct i40e_nvm_access *cmd,
781 						 u8 *bytes, int *perrno);
782 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
783 						    struct i40e_nvm_access *cmd,
784 						    u8 *bytes, int *perrno);
785 static INLINE u8 i40e_nvmupd_get_module(u32 val)
786 {
787 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
788 }
789 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
790 {
791 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
792 }
793 
794 static const char *i40e_nvm_update_state_str[] = {
795 	"I40E_NVMUPD_INVALID",
796 	"I40E_NVMUPD_READ_CON",
797 	"I40E_NVMUPD_READ_SNT",
798 	"I40E_NVMUPD_READ_LCB",
799 	"I40E_NVMUPD_READ_SA",
800 	"I40E_NVMUPD_WRITE_ERA",
801 	"I40E_NVMUPD_WRITE_CON",
802 	"I40E_NVMUPD_WRITE_SNT",
803 	"I40E_NVMUPD_WRITE_LCB",
804 	"I40E_NVMUPD_WRITE_SA",
805 	"I40E_NVMUPD_CSUM_CON",
806 	"I40E_NVMUPD_CSUM_SA",
807 	"I40E_NVMUPD_CSUM_LCB",
808 	"I40E_NVMUPD_STATUS",
809 	"I40E_NVMUPD_EXEC_AQ",
810 	"I40E_NVMUPD_GET_AQ_RESULT",
811 };
812 
813 /**
814  * i40e_nvmupd_command - Process an NVM update command
815  * @hw: pointer to hardware structure
816  * @cmd: pointer to nvm update command
817  * @bytes: pointer to the data buffer
818  * @perrno: pointer to return error code
819  *
820  * Dispatches command depending on what update state is current
821  **/
822 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
823 					  struct i40e_nvm_access *cmd,
824 					  u8 *bytes, int *perrno)
825 {
826 	enum i40e_status_code status;
827 	enum i40e_nvmupd_cmd upd_cmd;
828 
829 	DEBUGFUNC("i40e_nvmupd_command");
830 
831 	/* assume success */
832 	*perrno = 0;
833 
834 	/* early check for status command and debug msgs */
835 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
836 
837 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
838 		   i40e_nvm_update_state_str[upd_cmd],
839 		   hw->nvmupd_state,
840 		   hw->aq.nvm_release_on_done,
841 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
842 
843 	if (upd_cmd == I40E_NVMUPD_INVALID) {
844 		*perrno = -EFAULT;
845 		i40e_debug(hw, I40E_DEBUG_NVM,
846 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
847 			   upd_cmd, *perrno);
848 	}
849 
850 	/* a status request returns immediately rather than
851 	 * going into the state machine
852 	 */
853 	if (upd_cmd == I40E_NVMUPD_STATUS) {
854 		bytes[0] = hw->nvmupd_state;
855 		return I40E_SUCCESS;
856 	}
857 
858 	switch (hw->nvmupd_state) {
859 	case I40E_NVMUPD_STATE_INIT:
860 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
861 		break;
862 
863 	case I40E_NVMUPD_STATE_READING:
864 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
865 		break;
866 
867 	case I40E_NVMUPD_STATE_WRITING:
868 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
869 		break;
870 
871 	case I40E_NVMUPD_STATE_INIT_WAIT:
872 	case I40E_NVMUPD_STATE_WRITE_WAIT:
873 		status = I40E_ERR_NOT_READY;
874 		*perrno = -EBUSY;
875 		break;
876 
877 	default:
878 		/* invalid state, should never happen */
879 		i40e_debug(hw, I40E_DEBUG_NVM,
880 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
881 		status = I40E_NOT_SUPPORTED;
882 		*perrno = -ESRCH;
883 		break;
884 	}
885 	return status;
886 }
887 
888 /**
889  * i40e_nvmupd_state_init - Handle NVM update state Init
890  * @hw: pointer to hardware structure
891  * @cmd: pointer to nvm update command buffer
892  * @bytes: pointer to the data buffer
893  * @perrno: pointer to return error code
894  *
895  * Process legitimate commands of the Init state and conditionally set next
896  * state. Reject all other commands.
897  **/
898 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
899 						    struct i40e_nvm_access *cmd,
900 						    u8 *bytes, int *perrno)
901 {
902 	enum i40e_status_code status = I40E_SUCCESS;
903 	enum i40e_nvmupd_cmd upd_cmd;
904 
905 	DEBUGFUNC("i40e_nvmupd_state_init");
906 
907 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
908 
909 	switch (upd_cmd) {
910 	case I40E_NVMUPD_READ_SA:
911 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
912 		if (status) {
913 			*perrno = i40e_aq_rc_to_posix(status,
914 						     hw->aq.asq_last_status);
915 		} else {
916 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
917 			i40e_release_nvm(hw);
918 		}
919 		break;
920 
921 	case I40E_NVMUPD_READ_SNT:
922 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
923 		if (status) {
924 			*perrno = i40e_aq_rc_to_posix(status,
925 						     hw->aq.asq_last_status);
926 		} else {
927 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
928 			if (status)
929 				i40e_release_nvm(hw);
930 			else
931 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
932 		}
933 		break;
934 
935 	case I40E_NVMUPD_WRITE_ERA:
936 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
937 		if (status) {
938 			*perrno = i40e_aq_rc_to_posix(status,
939 						     hw->aq.asq_last_status);
940 		} else {
941 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
942 			if (status) {
943 				i40e_release_nvm(hw);
944 			} else {
945 				hw->aq.nvm_release_on_done = TRUE;
946 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
947 			}
948 		}
949 		break;
950 
951 	case I40E_NVMUPD_WRITE_SA:
952 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
953 		if (status) {
954 			*perrno = i40e_aq_rc_to_posix(status,
955 						     hw->aq.asq_last_status);
956 		} else {
957 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
958 			if (status) {
959 				i40e_release_nvm(hw);
960 			} else {
961 				hw->aq.nvm_release_on_done = TRUE;
962 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
963 			}
964 		}
965 		break;
966 
967 	case I40E_NVMUPD_WRITE_SNT:
968 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
969 		if (status) {
970 			*perrno = i40e_aq_rc_to_posix(status,
971 						     hw->aq.asq_last_status);
972 		} else {
973 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
974 			if (status)
975 				i40e_release_nvm(hw);
976 			else
977 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
978 		}
979 		break;
980 
981 	case I40E_NVMUPD_CSUM_SA:
982 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
983 		if (status) {
984 			*perrno = i40e_aq_rc_to_posix(status,
985 						     hw->aq.asq_last_status);
986 		} else {
987 			status = i40e_update_nvm_checksum(hw);
988 			if (status) {
989 				*perrno = hw->aq.asq_last_status ?
990 				   i40e_aq_rc_to_posix(status,
991 						       hw->aq.asq_last_status) :
992 				   -EIO;
993 				i40e_release_nvm(hw);
994 			} else {
995 				hw->aq.nvm_release_on_done = TRUE;
996 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
997 			}
998 		}
999 		break;
1000 
1001 	case I40E_NVMUPD_EXEC_AQ:
1002 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1003 		break;
1004 
1005 	case I40E_NVMUPD_GET_AQ_RESULT:
1006 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1007 		break;
1008 
1009 	default:
1010 		i40e_debug(hw, I40E_DEBUG_NVM,
1011 			   "NVMUPD: bad cmd %s in init state\n",
1012 			   i40e_nvm_update_state_str[upd_cmd]);
1013 		status = I40E_ERR_NVM;
1014 		*perrno = -ESRCH;
1015 		break;
1016 	}
1017 	return status;
1018 }
1019 
1020 /**
1021  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1022  * @hw: pointer to hardware structure
1023  * @cmd: pointer to nvm update command buffer
1024  * @bytes: pointer to the data buffer
1025  * @perrno: pointer to return error code
1026  *
1027  * NVM ownership is already held.  Process legitimate commands and set any
1028  * change in state; reject all other commands.
1029  **/
1030 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1031 						    struct i40e_nvm_access *cmd,
1032 						    u8 *bytes, int *perrno)
1033 {
1034 	enum i40e_status_code status = I40E_SUCCESS;
1035 	enum i40e_nvmupd_cmd upd_cmd;
1036 
1037 	DEBUGFUNC("i40e_nvmupd_state_reading");
1038 
1039 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1040 
1041 	switch (upd_cmd) {
1042 	case I40E_NVMUPD_READ_SA:
1043 	case I40E_NVMUPD_READ_CON:
1044 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1045 		break;
1046 
1047 	case I40E_NVMUPD_READ_LCB:
1048 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1049 		i40e_release_nvm(hw);
1050 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1051 		break;
1052 
1053 	default:
1054 		i40e_debug(hw, I40E_DEBUG_NVM,
1055 			   "NVMUPD: bad cmd %s in reading state.\n",
1056 			   i40e_nvm_update_state_str[upd_cmd]);
1057 		status = I40E_NOT_SUPPORTED;
1058 		*perrno = -ESRCH;
1059 		break;
1060 	}
1061 	return status;
1062 }
1063 
1064 /**
1065  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1066  * @hw: pointer to hardware structure
1067  * @cmd: pointer to nvm update command buffer
1068  * @bytes: pointer to the data buffer
1069  * @perrno: pointer to return error code
1070  *
1071  * NVM ownership is already held.  Process legitimate commands and set any
1072  * change in state; reject all other commands
1073  **/
1074 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1075 						    struct i40e_nvm_access *cmd,
1076 						    u8 *bytes, int *perrno)
1077 {
1078 	enum i40e_status_code status = I40E_SUCCESS;
1079 	enum i40e_nvmupd_cmd upd_cmd;
1080 	bool retry_attempt = FALSE;
1081 
1082 	DEBUGFUNC("i40e_nvmupd_state_writing");
1083 
1084 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1085 
1086 retry:
1087 	switch (upd_cmd) {
1088 	case I40E_NVMUPD_WRITE_CON:
1089 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1090 		if (!status)
1091 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1092 		break;
1093 
1094 	case I40E_NVMUPD_WRITE_LCB:
1095 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1096 		if (status) {
1097 			*perrno = hw->aq.asq_last_status ?
1098 				   i40e_aq_rc_to_posix(status,
1099 						       hw->aq.asq_last_status) :
1100 				   -EIO;
1101 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1102 		} else {
1103 			hw->aq.nvm_release_on_done = TRUE;
1104 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1105 		}
1106 		break;
1107 
1108 	case I40E_NVMUPD_CSUM_CON:
1109 		/* Assumes the caller has acquired the nvm */
1110 		status = i40e_update_nvm_checksum(hw);
1111 		if (status) {
1112 			*perrno = hw->aq.asq_last_status ?
1113 				   i40e_aq_rc_to_posix(status,
1114 						       hw->aq.asq_last_status) :
1115 				   -EIO;
1116 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1117 		} else {
1118 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1119 		}
1120 		break;
1121 
1122 	case I40E_NVMUPD_CSUM_LCB:
1123 		/* Assumes the caller has acquired the nvm */
1124 		status = i40e_update_nvm_checksum(hw);
1125 		if (status) {
1126 			*perrno = hw->aq.asq_last_status ?
1127 				   i40e_aq_rc_to_posix(status,
1128 						       hw->aq.asq_last_status) :
1129 				   -EIO;
1130 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1131 		} else {
1132 			hw->aq.nvm_release_on_done = TRUE;
1133 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1134 		}
1135 		break;
1136 
1137 	default:
1138 		i40e_debug(hw, I40E_DEBUG_NVM,
1139 			   "NVMUPD: bad cmd %s in writing state.\n",
1140 			   i40e_nvm_update_state_str[upd_cmd]);
1141 		status = I40E_NOT_SUPPORTED;
1142 		*perrno = -ESRCH;
1143 		break;
1144 	}
1145 
1146 	/* In some circumstances, a multi-write transaction takes longer
1147 	 * than the default 3 minute timeout on the write semaphore.  If
1148 	 * the write failed with an EBUSY status, this is likely the problem,
1149 	 * so here we try to reacquire the semaphore then retry the write.
1150 	 * We only do one retry, then give up.
1151 	 */
1152 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1153 	    !retry_attempt) {
1154 		enum i40e_status_code old_status = status;
1155 		u32 old_asq_status = hw->aq.asq_last_status;
1156 		u32 gtime;
1157 
1158 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1159 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1160 			i40e_debug(hw, I40E_DEBUG_ALL,
1161 				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1162 				   gtime, hw->nvm.hw_semaphore_timeout);
1163 			i40e_release_nvm(hw);
1164 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1165 			if (status) {
1166 				i40e_debug(hw, I40E_DEBUG_ALL,
1167 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1168 					   hw->aq.asq_last_status);
1169 				status = old_status;
1170 				hw->aq.asq_last_status = old_asq_status;
1171 			} else {
1172 				retry_attempt = TRUE;
1173 				goto retry;
1174 			}
1175 		}
1176 	}
1177 
1178 	return status;
1179 }
1180 
1181 /**
1182  * i40e_nvmupd_validate_command - Validate given command
1183  * @hw: pointer to hardware structure
1184  * @cmd: pointer to nvm update command buffer
1185  * @perrno: pointer to return error code
1186  *
1187  * Return one of the valid command types or I40E_NVMUPD_INVALID
1188  **/
1189 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1190 						    struct i40e_nvm_access *cmd,
1191 						    int *perrno)
1192 {
1193 	enum i40e_nvmupd_cmd upd_cmd;
1194 	u8 module, transaction;
1195 
1196 	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1197 
1198 	/* anything that doesn't match a recognized case is an error */
1199 	upd_cmd = I40E_NVMUPD_INVALID;
1200 
1201 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1202 	module = i40e_nvmupd_get_module(cmd->config);
1203 
1204 	/* limits on data size */
1205 	if ((cmd->data_size < 1) ||
1206 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1207 		i40e_debug(hw, I40E_DEBUG_NVM,
1208 			   "i40e_nvmupd_validate_command data_size %d\n",
1209 			   cmd->data_size);
1210 		*perrno = -EFAULT;
1211 		return I40E_NVMUPD_INVALID;
1212 	}
1213 
1214 	switch (cmd->command) {
1215 	case I40E_NVM_READ:
1216 		switch (transaction) {
1217 		case I40E_NVM_CON:
1218 			upd_cmd = I40E_NVMUPD_READ_CON;
1219 			break;
1220 		case I40E_NVM_SNT:
1221 			upd_cmd = I40E_NVMUPD_READ_SNT;
1222 			break;
1223 		case I40E_NVM_LCB:
1224 			upd_cmd = I40E_NVMUPD_READ_LCB;
1225 			break;
1226 		case I40E_NVM_SA:
1227 			upd_cmd = I40E_NVMUPD_READ_SA;
1228 			break;
1229 		case I40E_NVM_EXEC:
1230 			if (module == 0xf)
1231 				upd_cmd = I40E_NVMUPD_STATUS;
1232 			else if (module == 0)
1233 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1234 			break;
1235 		}
1236 		break;
1237 
1238 	case I40E_NVM_WRITE:
1239 		switch (transaction) {
1240 		case I40E_NVM_CON:
1241 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1242 			break;
1243 		case I40E_NVM_SNT:
1244 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1245 			break;
1246 		case I40E_NVM_LCB:
1247 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1248 			break;
1249 		case I40E_NVM_SA:
1250 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1251 			break;
1252 		case I40E_NVM_ERA:
1253 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1254 			break;
1255 		case I40E_NVM_CSUM:
1256 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1257 			break;
1258 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1259 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1260 			break;
1261 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1262 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1263 			break;
1264 		case I40E_NVM_EXEC:
1265 			if (module == 0)
1266 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1267 			break;
1268 		}
1269 		break;
1270 	}
1271 
1272 	return upd_cmd;
1273 }
1274 
1275 /**
1276  * i40e_nvmupd_exec_aq - Run an AQ command
1277  * @hw: pointer to hardware structure
1278  * @cmd: pointer to nvm update command buffer
1279  * @bytes: pointer to the data buffer
1280  * @perrno: pointer to return error code
1281  *
1282  * cmd structure contains identifiers and data buffer
1283  **/
1284 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1285 						 struct i40e_nvm_access *cmd,
1286 						 u8 *bytes, int *perrno)
1287 {
1288 	struct i40e_asq_cmd_details cmd_details;
1289 	enum i40e_status_code status;
1290 	struct i40e_aq_desc *aq_desc;
1291 	u32 buff_size = 0;
1292 	u8 *buff = NULL;
1293 	u32 aq_desc_len;
1294 	u32 aq_data_len;
1295 
1296 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1297 	memset(&cmd_details, 0, sizeof(cmd_details));
1298 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1299 
1300 	aq_desc_len = sizeof(struct i40e_aq_desc);
1301 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1302 
1303 	/* get the aq descriptor */
1304 	if (cmd->data_size < aq_desc_len) {
1305 		i40e_debug(hw, I40E_DEBUG_NVM,
1306 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1307 			   cmd->data_size, aq_desc_len);
1308 		*perrno = -EINVAL;
1309 		return I40E_ERR_PARAM;
1310 	}
1311 	aq_desc = (struct i40e_aq_desc *)bytes;
1312 
1313 	/* if data buffer needed, make sure it's ready */
1314 	aq_data_len = cmd->data_size - aq_desc_len;
1315 	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1316 	if (buff_size) {
1317 		if (!hw->nvm_buff.va) {
1318 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1319 							hw->aq.asq_buf_size);
1320 			if (status)
1321 				i40e_debug(hw, I40E_DEBUG_NVM,
1322 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1323 					   status);
1324 		}
1325 
1326 		if (hw->nvm_buff.va) {
1327 			buff = hw->nvm_buff.va;
1328 			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1329 		}
1330 	}
1331 
1332 	/* and away we go! */
1333 	status = i40e_asq_send_command(hw, aq_desc, buff,
1334 				       buff_size, &cmd_details);
1335 	if (status) {
1336 		i40e_debug(hw, I40E_DEBUG_NVM,
1337 			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1338 			   i40e_stat_str(hw, status),
1339 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1340 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1341 	}
1342 
1343 	return status;
1344 }
1345 
1346 /**
1347  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1348  * @hw: pointer to hardware structure
1349  * @cmd: pointer to nvm update command buffer
1350  * @bytes: pointer to the data buffer
1351  * @perrno: pointer to return error code
1352  *
1353  * cmd structure contains identifiers and data buffer
1354  **/
1355 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1356 						    struct i40e_nvm_access *cmd,
1357 						    u8 *bytes, int *perrno)
1358 {
1359 	u32 aq_total_len;
1360 	u32 aq_desc_len;
1361 	int remainder;
1362 	u8 *buff;
1363 
1364 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1365 
1366 	aq_desc_len = sizeof(struct i40e_aq_desc);
1367 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1368 
1369 	/* check offset range */
1370 	if (cmd->offset > aq_total_len) {
1371 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1372 			   __func__, cmd->offset, aq_total_len);
1373 		*perrno = -EINVAL;
1374 		return I40E_ERR_PARAM;
1375 	}
1376 
1377 	/* check copylength range */
1378 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1379 		int new_len = aq_total_len - cmd->offset;
1380 
1381 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1382 			   __func__, cmd->data_size, new_len);
1383 		cmd->data_size = new_len;
1384 	}
1385 
1386 	remainder = cmd->data_size;
1387 	if (cmd->offset < aq_desc_len) {
1388 		u32 len = aq_desc_len - cmd->offset;
1389 
1390 		len = min(len, cmd->data_size);
1391 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1392 			   __func__, cmd->offset, cmd->offset + len);
1393 
1394 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1395 		memcpy(bytes, buff, len);
1396 
1397 		bytes += len;
1398 		remainder -= len;
1399 		buff = hw->nvm_buff.va;
1400 	} else {
1401 		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1402 	}
1403 
1404 	if (remainder > 0) {
1405 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1406 
1407 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1408 			   __func__, start_byte, start_byte + remainder);
1409 		memcpy(bytes, buff, remainder);
1410 	}
1411 
1412 	return I40E_SUCCESS;
1413 }
1414 
1415 /**
1416  * i40e_nvmupd_nvm_read - Read NVM
1417  * @hw: pointer to hardware structure
1418  * @cmd: pointer to nvm update command buffer
1419  * @bytes: pointer to the data buffer
1420  * @perrno: pointer to return error code
1421  *
1422  * cmd structure contains identifiers and data buffer
1423  **/
1424 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1425 						  struct i40e_nvm_access *cmd,
1426 						  u8 *bytes, int *perrno)
1427 {
1428 	struct i40e_asq_cmd_details cmd_details;
1429 	enum i40e_status_code status;
1430 	u8 module, transaction;
1431 	bool last;
1432 
1433 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1434 	module = i40e_nvmupd_get_module(cmd->config);
1435 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1436 
1437 	memset(&cmd_details, 0, sizeof(cmd_details));
1438 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1439 
1440 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1441 				  bytes, last, &cmd_details);
1442 	if (status) {
1443 		i40e_debug(hw, I40E_DEBUG_NVM,
1444 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1445 			   module, cmd->offset, cmd->data_size);
1446 		i40e_debug(hw, I40E_DEBUG_NVM,
1447 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1448 			   status, hw->aq.asq_last_status);
1449 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1450 	}
1451 
1452 	return status;
1453 }
1454 
1455 /**
1456  * i40e_nvmupd_nvm_erase - Erase an NVM module
1457  * @hw: pointer to hardware structure
1458  * @cmd: pointer to nvm update command buffer
1459  * @perrno: pointer to return error code
1460  *
1461  * module, offset, data_size and data are in cmd structure
1462  **/
1463 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1464 						   struct i40e_nvm_access *cmd,
1465 						   int *perrno)
1466 {
1467 	enum i40e_status_code status = I40E_SUCCESS;
1468 	struct i40e_asq_cmd_details cmd_details;
1469 	u8 module, transaction;
1470 	bool last;
1471 
1472 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1473 	module = i40e_nvmupd_get_module(cmd->config);
1474 	last = (transaction & I40E_NVM_LCB);
1475 
1476 	memset(&cmd_details, 0, sizeof(cmd_details));
1477 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1478 
1479 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1480 				   last, &cmd_details);
1481 	if (status) {
1482 		i40e_debug(hw, I40E_DEBUG_NVM,
1483 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1484 			   module, cmd->offset, cmd->data_size);
1485 		i40e_debug(hw, I40E_DEBUG_NVM,
1486 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1487 			   status, hw->aq.asq_last_status);
1488 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1489 	}
1490 
1491 	return status;
1492 }
1493 
1494 /**
1495  * i40e_nvmupd_nvm_write - Write NVM
1496  * @hw: pointer to hardware structure
1497  * @cmd: pointer to nvm update command buffer
1498  * @bytes: pointer to the data buffer
1499  * @perrno: pointer to return error code
1500  *
1501  * module, offset, data_size and data are in cmd structure
1502  **/
1503 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1504 						   struct i40e_nvm_access *cmd,
1505 						   u8 *bytes, int *perrno)
1506 {
1507 	enum i40e_status_code status = I40E_SUCCESS;
1508 	struct i40e_asq_cmd_details cmd_details;
1509 	u8 module, transaction;
1510 	bool last;
1511 
1512 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1513 	module = i40e_nvmupd_get_module(cmd->config);
1514 	last = (transaction & I40E_NVM_LCB);
1515 
1516 	memset(&cmd_details, 0, sizeof(cmd_details));
1517 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1518 
1519 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1520 				    (u16)cmd->data_size, bytes, last,
1521 				    &cmd_details);
1522 	if (status) {
1523 		i40e_debug(hw, I40E_DEBUG_NVM,
1524 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1525 			   module, cmd->offset, cmd->data_size);
1526 		i40e_debug(hw, I40E_DEBUG_NVM,
1527 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1528 			   status, hw->aq.asq_last_status);
1529 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1530 	}
1531 
1532 	return status;
1533 }
1534