xref: /freebsd/sys/dev/ixl/i40e_nvm.c (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_prototype.h"
36 
37 /**
38  * i40e_init_nvm_ops - Initialize NVM function pointers
39  * @hw: pointer to the HW structure
40  *
41  * Setup the function pointers and the NVM info structure. Should be called
42  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
43  * Please notice that the NVM term is used here (& in all methods covered
44  * in this file) as an equivalent of the FLASH part mapped into the SR.
45  * We are accessing FLASH always through the Shadow RAM.
46  **/
47 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
48 {
49 	struct i40e_nvm_info *nvm = &hw->nvm;
50 	enum i40e_status_code ret_code = I40E_SUCCESS;
51 	u32 fla, gens;
52 	u8 sr_size;
53 
54 	DEBUGFUNC("i40e_init_nvm");
55 
56 	/* The SR size is stored regardless of the nvm programming mode
57 	 * as the blank mode may be used in the factory line.
58 	 */
59 	gens = rd32(hw, I40E_GLNVM_GENS);
60 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
61 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
62 	/* Switching to words (sr_size contains power of 2KB) */
63 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
64 
65 	/* Check if we are in the normal or blank NVM programming mode */
66 	fla = rd32(hw, I40E_GLNVM_FLA);
67 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
68 		/* Max NVM timeout */
69 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
70 		nvm->blank_nvm_mode = FALSE;
71 	} else { /* Blank programming mode */
72 		nvm->blank_nvm_mode = TRUE;
73 		ret_code = I40E_ERR_NVM_BLANK_MODE;
74 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
75 	}
76 
77 	return ret_code;
78 }
79 
80 /**
81  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
82  * @hw: pointer to the HW structure
83  * @access: NVM access type (read or write)
84  *
85  * This function will request NVM ownership for reading
86  * via the proper Admin Command.
87  **/
88 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
89 				       enum i40e_aq_resource_access_type access)
90 {
91 	enum i40e_status_code ret_code = I40E_SUCCESS;
92 	u64 gtime, timeout;
93 	u64 time_left = 0;
94 
95 	DEBUGFUNC("i40e_acquire_nvm");
96 
97 	if (hw->nvm.blank_nvm_mode)
98 		goto i40e_i40e_acquire_nvm_exit;
99 
100 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
101 					    0, &time_left, NULL);
102 	/* Reading the Global Device Timer */
103 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
104 
105 	/* Store the timeout */
106 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
107 
108 	if (ret_code)
109 		i40e_debug(hw, I40E_DEBUG_NVM,
110 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
111 			   access, time_left, ret_code, hw->aq.asq_last_status);
112 
113 	if (ret_code && time_left) {
114 		/* Poll until the current NVM owner timeouts */
115 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
116 		while ((gtime < timeout) && time_left) {
117 			i40e_msec_delay(10);
118 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
119 			ret_code = i40e_aq_request_resource(hw,
120 							I40E_NVM_RESOURCE_ID,
121 							access, 0, &time_left,
122 							NULL);
123 			if (ret_code == I40E_SUCCESS) {
124 				hw->nvm.hw_semaphore_timeout =
125 					    I40E_MS_TO_GTIME(time_left) + gtime;
126 				break;
127 			}
128 		}
129 		if (ret_code != I40E_SUCCESS) {
130 			hw->nvm.hw_semaphore_timeout = 0;
131 			i40e_debug(hw, I40E_DEBUG_NVM,
132 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
133 				   time_left, ret_code, hw->aq.asq_last_status);
134 		}
135 	}
136 
137 i40e_i40e_acquire_nvm_exit:
138 	return ret_code;
139 }
140 
141 /**
142  * i40e_release_nvm - Generic request for releasing the NVM ownership
143  * @hw: pointer to the HW structure
144  *
145  * This function will release NVM resource via the proper Admin Command.
146  **/
147 void i40e_release_nvm(struct i40e_hw *hw)
148 {
149 	enum i40e_status_code ret_code = I40E_SUCCESS;
150 	u32 total_delay = 0;
151 
152 	DEBUGFUNC("i40e_release_nvm");
153 
154 	if (hw->nvm.blank_nvm_mode)
155 		return;
156 
157 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
158 
159 	/* there are some rare cases when trying to release the resource
160 	 * results in an admin Q timeout, so handle them correctly
161 	 */
162 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
163 	       (total_delay < hw->aq.asq_cmd_timeout)) {
164 			i40e_msec_delay(1);
165 			ret_code = i40e_aq_release_resource(hw,
166 						I40E_NVM_RESOURCE_ID, 0, NULL);
167 			total_delay++;
168 	}
169 }
170 
171 /**
172  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
173  * @hw: pointer to the HW structure
174  *
175  * Polls the SRCTL Shadow RAM register done bit.
176  **/
177 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
178 {
179 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
180 	u32 srctl, wait_cnt;
181 
182 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
183 
184 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
185 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
186 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
187 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
188 			ret_code = I40E_SUCCESS;
189 			break;
190 		}
191 		i40e_usec_delay(5);
192 	}
193 	if (ret_code == I40E_ERR_TIMEOUT)
194 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
195 	return ret_code;
196 }
197 
198 /**
199  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
200  * @hw: pointer to the HW structure
201  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
202  * @data: word read from the Shadow RAM
203  *
204  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
205  **/
206 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
207 					       u16 *data)
208 {
209 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
210 	u32 sr_reg;
211 
212 	DEBUGFUNC("i40e_read_nvm_word_srctl");
213 
214 	if (offset >= hw->nvm.sr_size) {
215 		i40e_debug(hw, I40E_DEBUG_NVM,
216 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
217 			   offset, hw->nvm.sr_size);
218 		ret_code = I40E_ERR_PARAM;
219 		goto read_nvm_exit;
220 	}
221 
222 	/* Poll the done bit first */
223 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
224 	if (ret_code == I40E_SUCCESS) {
225 		/* Write the address and start reading */
226 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
227 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
228 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
229 
230 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
231 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
232 		if (ret_code == I40E_SUCCESS) {
233 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
234 			*data = (u16)((sr_reg &
235 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
236 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
237 		}
238 	}
239 	if (ret_code != I40E_SUCCESS)
240 		i40e_debug(hw, I40E_DEBUG_NVM,
241 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
242 			   offset);
243 
244 read_nvm_exit:
245 	return ret_code;
246 }
247 
248 /**
249  * i40e_read_nvm_aq - Read Shadow RAM.
250  * @hw: pointer to the HW structure.
251  * @module_pointer: module pointer location in words from the NVM beginning
252  * @offset: offset in words from module start
253  * @words: number of words to write
254  * @data: buffer with words to write to the Shadow RAM
255  * @last_command: tells the AdminQ that this is the last command
256  *
257  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
258  **/
259 static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
260 					      u8 module_pointer, u32 offset,
261 					      u16 words, void *data,
262 					      bool last_command)
263 {
264 	enum i40e_status_code ret_code = I40E_ERR_NVM;
265 	struct i40e_asq_cmd_details cmd_details;
266 
267 	DEBUGFUNC("i40e_read_nvm_aq");
268 
269 	memset(&cmd_details, 0, sizeof(cmd_details));
270 	cmd_details.wb_desc = &hw->nvm_wb_desc;
271 
272 	/* Here we are checking the SR limit only for the flat memory model.
273 	 * We cannot do it for the module-based model, as we did not acquire
274 	 * the NVM resource yet (we cannot get the module pointer value).
275 	 * Firmware will check the module-based model.
276 	 */
277 	if ((offset + words) > hw->nvm.sr_size)
278 		i40e_debug(hw, I40E_DEBUG_NVM,
279 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
280 			   (offset + words), hw->nvm.sr_size);
281 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
282 		/* We can write only up to 4KB (one sector), in one AQ write */
283 		i40e_debug(hw, I40E_DEBUG_NVM,
284 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
285 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
286 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
287 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
288 		/* A single write cannot spread over two sectors */
289 		i40e_debug(hw, I40E_DEBUG_NVM,
290 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
291 			   offset, words);
292 	else
293 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
294 					    2 * offset,  /*bytes*/
295 					    2 * words,   /*bytes*/
296 					    data, last_command, &cmd_details);
297 
298 	return ret_code;
299 }
300 
301 /**
302  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
303  * @hw: pointer to the HW structure
304  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
305  * @data: word read from the Shadow RAM
306  *
307  * Reads one 16 bit word from the Shadow RAM using the AdminQ
308  **/
309 static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
310 						   u16 *data)
311 {
312 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
313 
314 	DEBUGFUNC("i40e_read_nvm_word_aq");
315 
316 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
317 	*data = LE16_TO_CPU(*(__le16 *)data);
318 
319 	return ret_code;
320 }
321 
322 /**
323  * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
324  * @hw: pointer to the HW structure
325  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
326  * @data: word read from the Shadow RAM
327  *
328  * Reads one 16 bit word from the Shadow RAM.
329  *
330  * Do not use this function except in cases where the nvm lock is already
331  * taken via i40e_acquire_nvm().
332  **/
333 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
334 					   u16 offset,
335 					   u16 *data)
336 {
337 
338 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
339 		return i40e_read_nvm_word_aq(hw, offset, data);
340 
341 	return i40e_read_nvm_word_srctl(hw, offset, data);
342 }
343 
344 /**
345  * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
346  * @hw: pointer to the HW structure
347  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
348  * @data: word read from the Shadow RAM
349  *
350  * Reads one 16 bit word from the Shadow RAM.
351  **/
352 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
353 					 u16 *data)
354 {
355 	enum i40e_status_code ret_code = I40E_SUCCESS;
356 
357 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
358 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
359 
360 	if (ret_code)
361 		return ret_code;
362 	ret_code = __i40e_read_nvm_word(hw, offset, data);
363 
364 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
365 		i40e_release_nvm(hw);
366 	return ret_code;
367 }
368 
369 /**
370  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
371  * @hw: pointer to the HW structure
372  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
373  * @words: (in) number of words to read; (out) number of words actually read
374  * @data: words read from the Shadow RAM
375  *
376  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
377  * method. The buffer read is preceded by the NVM ownership take
378  * and followed by the release.
379  **/
380 static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
381 							u16 *words, u16 *data)
382 {
383 	enum i40e_status_code ret_code = I40E_SUCCESS;
384 	u16 index, word;
385 
386 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
387 
388 	/* Loop through the selected region */
389 	for (word = 0; word < *words; word++) {
390 		index = offset + word;
391 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
392 		if (ret_code != I40E_SUCCESS)
393 			break;
394 	}
395 
396 	/* Update the number of words read from the Shadow RAM */
397 	*words = word;
398 
399 	return ret_code;
400 }
401 
402 /**
403  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
404  * @hw: pointer to the HW structure
405  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
406  * @words: (in) number of words to read; (out) number of words actually read
407  * @data: words read from the Shadow RAM
408  *
409  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
410  * method. The buffer read is preceded by the NVM ownership take
411  * and followed by the release.
412  **/
413 static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
414 						     u16 *words, u16 *data)
415 {
416 	enum i40e_status_code ret_code;
417 	u16 read_size = *words;
418 	bool last_cmd = FALSE;
419 	u16 words_read = 0;
420 	u16 i = 0;
421 
422 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
423 
424 	do {
425 		/* Calculate number of bytes we should read in this step.
426 		 * FVL AQ do not allow to read more than one page at a time or
427 		 * to cross page boundaries.
428 		 */
429 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
430 			read_size = min(*words,
431 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
432 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
433 		else
434 			read_size = min((*words - words_read),
435 					I40E_SR_SECTOR_SIZE_IN_WORDS);
436 
437 		/* Check if this is last command, if so set proper flag */
438 		if ((words_read + read_size) >= *words)
439 			last_cmd = TRUE;
440 
441 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
442 					    data + words_read, last_cmd);
443 		if (ret_code != I40E_SUCCESS)
444 			goto read_nvm_buffer_aq_exit;
445 
446 		/* Increment counter for words already read and move offset to
447 		 * new read location
448 		 */
449 		words_read += read_size;
450 		offset += read_size;
451 	} while (words_read < *words);
452 
453 	for (i = 0; i < *words; i++)
454 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
455 
456 read_nvm_buffer_aq_exit:
457 	*words = words_read;
458 	return ret_code;
459 }
460 
461 /**
462  * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
463  * @hw: pointer to the HW structure
464  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
465  * @words: (in) number of words to read; (out) number of words actually read
466  * @data: words read from the Shadow RAM
467  *
468  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
469  * method.
470  **/
471 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
472 					     u16 offset,
473 					     u16 *words, u16 *data)
474 {
475 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
476 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
477 
478 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
479 }
480 
481 /**
482  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
483  * @hw: pointer to the HW structure
484  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
485  * @words: (in) number of words to read; (out) number of words actually read
486  * @data: words read from the Shadow RAM
487  *
488  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
489  * method. The buffer read is preceded by the NVM ownership take
490  * and followed by the release.
491  **/
492 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
493 					   u16 *words, u16 *data)
494 {
495 	enum i40e_status_code ret_code = I40E_SUCCESS;
496 
497 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
498 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
499 		if (!ret_code) {
500 			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
501 							 data);
502 			i40e_release_nvm(hw);
503 		}
504 	} else {
505 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
506 	}
507 	return ret_code;
508 }
509 
510 
511 /**
512  * i40e_write_nvm_aq - Writes Shadow RAM.
513  * @hw: pointer to the HW structure.
514  * @module_pointer: module pointer location in words from the NVM beginning
515  * @offset: offset in words from module start
516  * @words: number of words to write
517  * @data: buffer with words to write to the Shadow RAM
518  * @last_command: tells the AdminQ that this is the last command
519  *
520  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
521  **/
522 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
523 					u32 offset, u16 words, void *data,
524 					bool last_command)
525 {
526 	enum i40e_status_code ret_code = I40E_ERR_NVM;
527 	struct i40e_asq_cmd_details cmd_details;
528 
529 	DEBUGFUNC("i40e_write_nvm_aq");
530 
531 	memset(&cmd_details, 0, sizeof(cmd_details));
532 	cmd_details.wb_desc = &hw->nvm_wb_desc;
533 
534 	/* Here we are checking the SR limit only for the flat memory model.
535 	 * We cannot do it for the module-based model, as we did not acquire
536 	 * the NVM resource yet (we cannot get the module pointer value).
537 	 * Firmware will check the module-based model.
538 	 */
539 	if ((offset + words) > hw->nvm.sr_size)
540 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
541 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
542 		/* We can write only up to 4KB (one sector), in one AQ write */
543 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
544 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
545 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
546 		/* A single write cannot spread over two sectors */
547 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
548 	else
549 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
550 					      2 * offset,  /*bytes*/
551 					      2 * words,   /*bytes*/
552 					      data, last_command, 0,
553 					      &cmd_details);
554 
555 	return ret_code;
556 }
557 
558 /**
559  * __i40e_write_nvm_word - Writes Shadow RAM word
560  * @hw: pointer to the HW structure
561  * @offset: offset of the Shadow RAM word to write
562  * @data: word to write to the Shadow RAM
563  *
564  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
565  * NVM ownership have to be acquired and released (on ARQ completion event
566  * reception) by caller. To commit SR to NVM update checksum function
567  * should be called.
568  **/
569 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
570 					    void *data)
571 {
572 	DEBUGFUNC("i40e_write_nvm_word");
573 
574 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
575 
576 	/* Value 0x00 below means that we treat SR as a flat mem */
577 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
578 }
579 
580 /**
581  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
582  * @hw: pointer to the HW structure
583  * @module_pointer: module pointer location in words from the NVM beginning
584  * @offset: offset of the Shadow RAM buffer to write
585  * @words: number of words to write
586  * @data: words to write to the Shadow RAM
587  *
588  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
589  * NVM ownership must be acquired before calling this function and released
590  * on ARQ completion event reception by caller. To commit SR to NVM update
591  * checksum function should be called.
592  **/
593 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
594 					      u8 module_pointer, u32 offset,
595 					      u16 words, void *data)
596 {
597 	__le16 *le_word_ptr = (__le16 *)data;
598 	u16 *word_ptr = (u16 *)data;
599 	u32 i = 0;
600 
601 	DEBUGFUNC("i40e_write_nvm_buffer");
602 
603 	for (i = 0; i < words; i++)
604 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
605 
606 	/* Here we will only write one buffer as the size of the modules
607 	 * mirrored in the Shadow RAM is always less than 4K.
608 	 */
609 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
610 				 data, FALSE);
611 }
612 
613 /**
614  * i40e_calc_nvm_checksum - Calculates and returns the checksum
615  * @hw: pointer to hardware structure
616  * @checksum: pointer to the checksum
617  *
618  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
619  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
620  * is customer specific and unknown. Therefore, this function skips all maximum
621  * possible size of VPD (1kB).
622  **/
623 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
624 {
625 	enum i40e_status_code ret_code = I40E_SUCCESS;
626 	struct i40e_virt_mem vmem;
627 	u16 pcie_alt_module = 0;
628 	u16 checksum_local = 0;
629 	u16 vpd_module = 0;
630 	u16 *data;
631 	u16 i = 0;
632 
633 	DEBUGFUNC("i40e_calc_nvm_checksum");
634 
635 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
636 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
637 	if (ret_code)
638 		goto i40e_calc_nvm_checksum_exit;
639 	data = (u16 *)vmem.va;
640 
641 	/* read pointer to VPD area */
642 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
643 	if (ret_code != I40E_SUCCESS) {
644 		ret_code = I40E_ERR_NVM_CHECKSUM;
645 		goto i40e_calc_nvm_checksum_exit;
646 	}
647 
648 	/* read pointer to PCIe Alt Auto-load module */
649 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
650 					&pcie_alt_module);
651 	if (ret_code != I40E_SUCCESS) {
652 		ret_code = I40E_ERR_NVM_CHECKSUM;
653 		goto i40e_calc_nvm_checksum_exit;
654 	}
655 
656 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
657 	 * except the VPD and PCIe ALT Auto-load modules
658 	 */
659 	for (i = 0; i < hw->nvm.sr_size; i++) {
660 		/* Read SR page */
661 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
662 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
663 
664 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
665 			if (ret_code != I40E_SUCCESS) {
666 				ret_code = I40E_ERR_NVM_CHECKSUM;
667 				goto i40e_calc_nvm_checksum_exit;
668 			}
669 		}
670 
671 		/* Skip Checksum word */
672 		if (i == I40E_SR_SW_CHECKSUM_WORD)
673 			continue;
674 		/* Skip VPD module (convert byte size to word count) */
675 		if ((i >= (u32)vpd_module) &&
676 		    (i < ((u32)vpd_module +
677 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
678 			continue;
679 		}
680 		/* Skip PCIe ALT module (convert byte size to word count) */
681 		if ((i >= (u32)pcie_alt_module) &&
682 		    (i < ((u32)pcie_alt_module +
683 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
684 			continue;
685 		}
686 
687 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
688 	}
689 
690 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
691 
692 i40e_calc_nvm_checksum_exit:
693 	i40e_free_virt_mem(hw, &vmem);
694 	return ret_code;
695 }
696 
697 /**
698  * i40e_update_nvm_checksum - Updates the NVM checksum
699  * @hw: pointer to hardware structure
700  *
701  * NVM ownership must be acquired before calling this function and released
702  * on ARQ completion event reception by caller.
703  * This function will commit SR to NVM.
704  **/
705 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
706 {
707 	enum i40e_status_code ret_code = I40E_SUCCESS;
708 	u16 checksum;
709 	__le16 le_sum;
710 
711 	DEBUGFUNC("i40e_update_nvm_checksum");
712 
713 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
714 	le_sum = CPU_TO_LE16(checksum);
715 	if (ret_code == I40E_SUCCESS)
716 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
717 					     1, &le_sum, TRUE);
718 
719 	return ret_code;
720 }
721 
722 /**
723  * i40e_validate_nvm_checksum - Validate EEPROM checksum
724  * @hw: pointer to hardware structure
725  * @checksum: calculated checksum
726  *
727  * Performs checksum calculation and validates the NVM SW checksum. If the
728  * caller does not need checksum, the value can be NULL.
729  **/
730 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
731 						 u16 *checksum)
732 {
733 	enum i40e_status_code ret_code = I40E_SUCCESS;
734 	u16 checksum_sr = 0;
735 	u16 checksum_local = 0;
736 
737 	DEBUGFUNC("i40e_validate_nvm_checksum");
738 
739 	/* We must acquire the NVM lock in order to correctly synchronize the
740 	 * NVM accesses across multiple PFs. Without doing so it is possible
741 	 * for one of the PFs to read invalid data potentially indicating that
742 	 * the checksum is invalid.
743 	 */
744 	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
745 	if (ret_code)
746 		return ret_code;
747 	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
748 	__i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
749 	i40e_release_nvm(hw);
750 	if (ret_code)
751 		return ret_code;
752 
753 	/* Verify read checksum from EEPROM is the same as
754 	 * calculated checksum
755 	 */
756 	if (checksum_local != checksum_sr)
757 		ret_code = I40E_ERR_NVM_CHECKSUM;
758 
759 	/* If the user cares, return the calculated checksum */
760 	if (checksum)
761 		*checksum = checksum_local;
762 
763 	return ret_code;
764 }
765 
766 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
767 						    struct i40e_nvm_access *cmd,
768 						    u8 *bytes, int *perrno);
769 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
770 						    struct i40e_nvm_access *cmd,
771 						    u8 *bytes, int *perrno);
772 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
773 						    struct i40e_nvm_access *cmd,
774 						    u8 *bytes, int *perrno);
775 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
776 						    struct i40e_nvm_access *cmd,
777 						    int *perrno);
778 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
779 						   struct i40e_nvm_access *cmd,
780 						   int *perrno);
781 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
782 						   struct i40e_nvm_access *cmd,
783 						   u8 *bytes, int *perrno);
784 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
785 						  struct i40e_nvm_access *cmd,
786 						  u8 *bytes, int *perrno);
787 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
788 						 struct i40e_nvm_access *cmd,
789 						 u8 *bytes, int *perrno);
790 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
791 						    struct i40e_nvm_access *cmd,
792 						    u8 *bytes, int *perrno);
793 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
794 						    struct i40e_nvm_access *cmd,
795 						    u8 *bytes, int *perrno);
796 static INLINE u8 i40e_nvmupd_get_module(u32 val)
797 {
798 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
799 }
800 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
801 {
802 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
803 }
804 
805 static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
806 {
807 	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
808 		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
809 }
810 
811 static const char *i40e_nvm_update_state_str[] = {
812 	"I40E_NVMUPD_INVALID",
813 	"I40E_NVMUPD_READ_CON",
814 	"I40E_NVMUPD_READ_SNT",
815 	"I40E_NVMUPD_READ_LCB",
816 	"I40E_NVMUPD_READ_SA",
817 	"I40E_NVMUPD_WRITE_ERA",
818 	"I40E_NVMUPD_WRITE_CON",
819 	"I40E_NVMUPD_WRITE_SNT",
820 	"I40E_NVMUPD_WRITE_LCB",
821 	"I40E_NVMUPD_WRITE_SA",
822 	"I40E_NVMUPD_CSUM_CON",
823 	"I40E_NVMUPD_CSUM_SA",
824 	"I40E_NVMUPD_CSUM_LCB",
825 	"I40E_NVMUPD_STATUS",
826 	"I40E_NVMUPD_EXEC_AQ",
827 	"I40E_NVMUPD_GET_AQ_RESULT",
828 	"I40E_NVMUPD_GET_AQ_EVENT",
829 };
830 
831 /**
832  * i40e_nvmupd_command - Process an NVM update command
833  * @hw: pointer to hardware structure
834  * @cmd: pointer to nvm update command
835  * @bytes: pointer to the data buffer
836  * @perrno: pointer to return error code
837  *
838  * Dispatches command depending on what update state is current
839  **/
840 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
841 					  struct i40e_nvm_access *cmd,
842 					  u8 *bytes, int *perrno)
843 {
844 	enum i40e_status_code status;
845 	enum i40e_nvmupd_cmd upd_cmd;
846 
847 	DEBUGFUNC("i40e_nvmupd_command");
848 
849 	/* assume success */
850 	*perrno = 0;
851 
852 	/* early check for status command and debug msgs */
853 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
854 
855 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
856 		   i40e_nvm_update_state_str[upd_cmd],
857 		   hw->nvmupd_state,
858 		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
859 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
860 
861 	if (upd_cmd == I40E_NVMUPD_INVALID) {
862 		*perrno = -EFAULT;
863 		i40e_debug(hw, I40E_DEBUG_NVM,
864 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
865 			   upd_cmd, *perrno);
866 	}
867 
868 	/* a status request returns immediately rather than
869 	 * going into the state machine
870 	 */
871 	if (upd_cmd == I40E_NVMUPD_STATUS) {
872 		if (!cmd->data_size) {
873 			*perrno = -EFAULT;
874 			return I40E_ERR_BUF_TOO_SHORT;
875 		}
876 
877 		bytes[0] = hw->nvmupd_state;
878 
879 		if (cmd->data_size >= 4) {
880 			bytes[1] = 0;
881 			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
882 		}
883 
884 		/* Clear error status on read */
885 		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
886 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
887 
888 		return I40E_SUCCESS;
889 	}
890 
891 	/* Clear status even it is not read and log */
892 	if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
893 		i40e_debug(hw, I40E_DEBUG_NVM,
894 			   "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
895 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
896 	}
897 
898 	/* Acquire lock to prevent race condition where adminq_task
899 	 * can execute after i40e_nvmupd_nvm_read/write but before state
900 	 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
901 	 *
902 	 * During NVMUpdate, it is observed that lock could be held for
903 	 * ~5ms for most commands. However lock is held for ~60ms for
904 	 * NVMUPD_CSUM_LCB command.
905 	 */
906 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
907 	switch (hw->nvmupd_state) {
908 	case I40E_NVMUPD_STATE_INIT:
909 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
910 		break;
911 
912 	case I40E_NVMUPD_STATE_READING:
913 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
914 		break;
915 
916 	case I40E_NVMUPD_STATE_WRITING:
917 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
918 		break;
919 
920 	case I40E_NVMUPD_STATE_INIT_WAIT:
921 	case I40E_NVMUPD_STATE_WRITE_WAIT:
922 		/* if we need to stop waiting for an event, clear
923 		 * the wait info and return before doing anything else
924 		 */
925 		if (cmd->offset == 0xffff) {
926 			i40e_nvmupd_clear_wait_state(hw);
927 			status = I40E_SUCCESS;
928 			break;
929 		}
930 
931 		status = I40E_ERR_NOT_READY;
932 		*perrno = -EBUSY;
933 		break;
934 
935 	default:
936 		/* invalid state, should never happen */
937 		i40e_debug(hw, I40E_DEBUG_NVM,
938 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
939 		status = I40E_NOT_SUPPORTED;
940 		*perrno = -ESRCH;
941 		break;
942 	}
943 
944 	i40e_release_spinlock(&hw->aq.arq_spinlock);
945 	return status;
946 }
947 
948 /**
949  * i40e_nvmupd_state_init - Handle NVM update state Init
950  * @hw: pointer to hardware structure
951  * @cmd: pointer to nvm update command buffer
952  * @bytes: pointer to the data buffer
953  * @perrno: pointer to return error code
954  *
955  * Process legitimate commands of the Init state and conditionally set next
956  * state. Reject all other commands.
957  **/
958 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
959 						    struct i40e_nvm_access *cmd,
960 						    u8 *bytes, int *perrno)
961 {
962 	enum i40e_status_code status = I40E_SUCCESS;
963 	enum i40e_nvmupd_cmd upd_cmd;
964 
965 	DEBUGFUNC("i40e_nvmupd_state_init");
966 
967 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
968 
969 	switch (upd_cmd) {
970 	case I40E_NVMUPD_READ_SA:
971 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
972 		if (status) {
973 			*perrno = i40e_aq_rc_to_posix(status,
974 						     hw->aq.asq_last_status);
975 		} else {
976 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
977 			i40e_release_nvm(hw);
978 		}
979 		break;
980 
981 	case I40E_NVMUPD_READ_SNT:
982 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
983 		if (status) {
984 			*perrno = i40e_aq_rc_to_posix(status,
985 						     hw->aq.asq_last_status);
986 		} else {
987 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
988 			if (status)
989 				i40e_release_nvm(hw);
990 			else
991 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
992 		}
993 		break;
994 
995 	case I40E_NVMUPD_WRITE_ERA:
996 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
997 		if (status) {
998 			*perrno = i40e_aq_rc_to_posix(status,
999 						     hw->aq.asq_last_status);
1000 		} else {
1001 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1002 			if (status) {
1003 				i40e_release_nvm(hw);
1004 			} else {
1005 				hw->nvm_release_on_done = TRUE;
1006 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1007 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1008 			}
1009 		}
1010 		break;
1011 
1012 	case I40E_NVMUPD_WRITE_SA:
1013 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1014 		if (status) {
1015 			*perrno = i40e_aq_rc_to_posix(status,
1016 						     hw->aq.asq_last_status);
1017 		} else {
1018 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1019 			if (status) {
1020 				i40e_release_nvm(hw);
1021 			} else {
1022 				hw->nvm_release_on_done = TRUE;
1023 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1024 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1025 			}
1026 		}
1027 		break;
1028 
1029 	case I40E_NVMUPD_WRITE_SNT:
1030 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1031 		if (status) {
1032 			*perrno = i40e_aq_rc_to_posix(status,
1033 						     hw->aq.asq_last_status);
1034 		} else {
1035 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1036 			if (status) {
1037 				i40e_release_nvm(hw);
1038 			} else {
1039 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1040 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1041 			}
1042 		}
1043 		break;
1044 
1045 	case I40E_NVMUPD_CSUM_SA:
1046 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1047 		if (status) {
1048 			*perrno = i40e_aq_rc_to_posix(status,
1049 						     hw->aq.asq_last_status);
1050 		} else {
1051 			status = i40e_update_nvm_checksum(hw);
1052 			if (status) {
1053 				*perrno = hw->aq.asq_last_status ?
1054 				   i40e_aq_rc_to_posix(status,
1055 						       hw->aq.asq_last_status) :
1056 				   -EIO;
1057 				i40e_release_nvm(hw);
1058 			} else {
1059 				hw->nvm_release_on_done = TRUE;
1060 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1061 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1062 			}
1063 		}
1064 		break;
1065 
1066 	case I40E_NVMUPD_EXEC_AQ:
1067 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1068 		break;
1069 
1070 	case I40E_NVMUPD_GET_AQ_RESULT:
1071 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1072 		break;
1073 
1074 	case I40E_NVMUPD_GET_AQ_EVENT:
1075 		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1076 		break;
1077 
1078 	default:
1079 		i40e_debug(hw, I40E_DEBUG_NVM,
1080 			   "NVMUPD: bad cmd %s in init state\n",
1081 			   i40e_nvm_update_state_str[upd_cmd]);
1082 		status = I40E_ERR_NVM;
1083 		*perrno = -ESRCH;
1084 		break;
1085 	}
1086 	return status;
1087 }
1088 
1089 /**
1090  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1091  * @hw: pointer to hardware structure
1092  * @cmd: pointer to nvm update command buffer
1093  * @bytes: pointer to the data buffer
1094  * @perrno: pointer to return error code
1095  *
1096  * NVM ownership is already held.  Process legitimate commands and set any
1097  * change in state; reject all other commands.
1098  **/
1099 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1100 						    struct i40e_nvm_access *cmd,
1101 						    u8 *bytes, int *perrno)
1102 {
1103 	enum i40e_status_code status = I40E_SUCCESS;
1104 	enum i40e_nvmupd_cmd upd_cmd;
1105 
1106 	DEBUGFUNC("i40e_nvmupd_state_reading");
1107 
1108 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1109 
1110 	switch (upd_cmd) {
1111 	case I40E_NVMUPD_READ_SA:
1112 	case I40E_NVMUPD_READ_CON:
1113 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1114 		break;
1115 
1116 	case I40E_NVMUPD_READ_LCB:
1117 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1118 		i40e_release_nvm(hw);
1119 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1120 		break;
1121 
1122 	default:
1123 		i40e_debug(hw, I40E_DEBUG_NVM,
1124 			   "NVMUPD: bad cmd %s in reading state.\n",
1125 			   i40e_nvm_update_state_str[upd_cmd]);
1126 		status = I40E_NOT_SUPPORTED;
1127 		*perrno = -ESRCH;
1128 		break;
1129 	}
1130 	return status;
1131 }
1132 
1133 /**
1134  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1135  * @hw: pointer to hardware structure
1136  * @cmd: pointer to nvm update command buffer
1137  * @bytes: pointer to the data buffer
1138  * @perrno: pointer to return error code
1139  *
1140  * NVM ownership is already held.  Process legitimate commands and set any
1141  * change in state; reject all other commands
1142  **/
1143 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1144 						    struct i40e_nvm_access *cmd,
1145 						    u8 *bytes, int *perrno)
1146 {
1147 	enum i40e_status_code status = I40E_SUCCESS;
1148 	enum i40e_nvmupd_cmd upd_cmd;
1149 	bool retry_attempt = FALSE;
1150 
1151 	DEBUGFUNC("i40e_nvmupd_state_writing");
1152 
1153 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1154 
1155 retry:
1156 	switch (upd_cmd) {
1157 	case I40E_NVMUPD_WRITE_CON:
1158 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1159 		if (!status) {
1160 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1161 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1162 		}
1163 		break;
1164 
1165 	case I40E_NVMUPD_WRITE_LCB:
1166 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1167 		if (status) {
1168 			*perrno = hw->aq.asq_last_status ?
1169 				   i40e_aq_rc_to_posix(status,
1170 						       hw->aq.asq_last_status) :
1171 				   -EIO;
1172 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1173 		} else {
1174 			hw->nvm_release_on_done = TRUE;
1175 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1176 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1177 		}
1178 		break;
1179 
1180 	case I40E_NVMUPD_CSUM_CON:
1181 		/* Assumes the caller has acquired the nvm */
1182 		status = i40e_update_nvm_checksum(hw);
1183 		if (status) {
1184 			*perrno = hw->aq.asq_last_status ?
1185 				   i40e_aq_rc_to_posix(status,
1186 						       hw->aq.asq_last_status) :
1187 				   -EIO;
1188 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1189 		} else {
1190 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1191 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1192 		}
1193 		break;
1194 
1195 	case I40E_NVMUPD_CSUM_LCB:
1196 		/* Assumes the caller has acquired the nvm */
1197 		status = i40e_update_nvm_checksum(hw);
1198 		if (status) {
1199 			*perrno = hw->aq.asq_last_status ?
1200 				   i40e_aq_rc_to_posix(status,
1201 						       hw->aq.asq_last_status) :
1202 				   -EIO;
1203 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1204 		} else {
1205 			hw->nvm_release_on_done = TRUE;
1206 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1207 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1208 		}
1209 		break;
1210 
1211 	default:
1212 		i40e_debug(hw, I40E_DEBUG_NVM,
1213 			   "NVMUPD: bad cmd %s in writing state.\n",
1214 			   i40e_nvm_update_state_str[upd_cmd]);
1215 		status = I40E_NOT_SUPPORTED;
1216 		*perrno = -ESRCH;
1217 		break;
1218 	}
1219 
1220 	/* In some circumstances, a multi-write transaction takes longer
1221 	 * than the default 3 minute timeout on the write semaphore.  If
1222 	 * the write failed with an EBUSY status, this is likely the problem,
1223 	 * so here we try to reacquire the semaphore then retry the write.
1224 	 * We only do one retry, then give up.
1225 	 */
1226 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1227 	    !retry_attempt) {
1228 		enum i40e_status_code old_status = status;
1229 		u32 old_asq_status = hw->aq.asq_last_status;
1230 		u32 gtime;
1231 
1232 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1233 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1234 			i40e_debug(hw, I40E_DEBUG_ALL,
1235 				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1236 				   gtime, hw->nvm.hw_semaphore_timeout);
1237 			i40e_release_nvm(hw);
1238 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1239 			if (status) {
1240 				i40e_debug(hw, I40E_DEBUG_ALL,
1241 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1242 					   hw->aq.asq_last_status);
1243 				status = old_status;
1244 				hw->aq.asq_last_status = old_asq_status;
1245 			} else {
1246 				retry_attempt = TRUE;
1247 				goto retry;
1248 			}
1249 		}
1250 	}
1251 
1252 	return status;
1253 }
1254 
1255 /**
1256  * i40e_nvmupd_clear_wait_state - clear wait state on hw
1257  * @hw: pointer to the hardware structure
1258  **/
1259 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1260 {
1261 	i40e_debug(hw, I40E_DEBUG_NVM,
1262 		   "NVMUPD: clearing wait on opcode 0x%04x\n",
1263 		   hw->nvm_wait_opcode);
1264 
1265 	if (hw->nvm_release_on_done) {
1266 		i40e_release_nvm(hw);
1267 		hw->nvm_release_on_done = FALSE;
1268 	}
1269 	hw->nvm_wait_opcode = 0;
1270 
1271 	if (hw->aq.arq_last_status) {
1272 		hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1273 		return;
1274 	}
1275 
1276 	switch (hw->nvmupd_state) {
1277 	case I40E_NVMUPD_STATE_INIT_WAIT:
1278 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1279 		break;
1280 
1281 	case I40E_NVMUPD_STATE_WRITE_WAIT:
1282 		hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1283 		break;
1284 
1285 	default:
1286 		break;
1287 	}
1288 }
1289 
1290 /**
1291  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1292  * @hw: pointer to the hardware structure
1293  * @opcode: the event that just happened
1294  * @desc: AdminQ descriptor
1295  **/
1296 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1297 				  struct i40e_aq_desc *desc)
1298 {
1299 	u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1300 
1301 	if (opcode == hw->nvm_wait_opcode) {
1302 		i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1303 			    aq_desc_len, I40E_NONDMA_TO_NONDMA);
1304 		i40e_nvmupd_clear_wait_state(hw);
1305 	}
1306 }
1307 
1308 /**
1309  * i40e_nvmupd_validate_command - Validate given command
1310  * @hw: pointer to hardware structure
1311  * @cmd: pointer to nvm update command buffer
1312  * @perrno: pointer to return error code
1313  *
1314  * Return one of the valid command types or I40E_NVMUPD_INVALID
1315  **/
1316 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1317 						    struct i40e_nvm_access *cmd,
1318 						    int *perrno)
1319 {
1320 	enum i40e_nvmupd_cmd upd_cmd;
1321 	u8 module, transaction;
1322 
1323 	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1324 
1325 	/* anything that doesn't match a recognized case is an error */
1326 	upd_cmd = I40E_NVMUPD_INVALID;
1327 
1328 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1329 	module = i40e_nvmupd_get_module(cmd->config);
1330 
1331 	/* limits on data size */
1332 	if ((cmd->data_size < 1) ||
1333 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1334 		i40e_debug(hw, I40E_DEBUG_NVM,
1335 			   "i40e_nvmupd_validate_command data_size %d\n",
1336 			   cmd->data_size);
1337 		*perrno = -EFAULT;
1338 		return I40E_NVMUPD_INVALID;
1339 	}
1340 
1341 	switch (cmd->command) {
1342 	case I40E_NVM_READ:
1343 		switch (transaction) {
1344 		case I40E_NVM_CON:
1345 			upd_cmd = I40E_NVMUPD_READ_CON;
1346 			break;
1347 		case I40E_NVM_SNT:
1348 			upd_cmd = I40E_NVMUPD_READ_SNT;
1349 			break;
1350 		case I40E_NVM_LCB:
1351 			upd_cmd = I40E_NVMUPD_READ_LCB;
1352 			break;
1353 		case I40E_NVM_SA:
1354 			upd_cmd = I40E_NVMUPD_READ_SA;
1355 			break;
1356 		case I40E_NVM_EXEC:
1357 			if (module == 0xf)
1358 				upd_cmd = I40E_NVMUPD_STATUS;
1359 			else if (module == 0)
1360 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1361 			break;
1362 		case I40E_NVM_AQE:
1363 			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1364 			break;
1365 		}
1366 		break;
1367 
1368 	case I40E_NVM_WRITE:
1369 		switch (transaction) {
1370 		case I40E_NVM_CON:
1371 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1372 			break;
1373 		case I40E_NVM_SNT:
1374 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1375 			break;
1376 		case I40E_NVM_LCB:
1377 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1378 			break;
1379 		case I40E_NVM_SA:
1380 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1381 			break;
1382 		case I40E_NVM_ERA:
1383 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1384 			break;
1385 		case I40E_NVM_CSUM:
1386 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1387 			break;
1388 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1389 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1390 			break;
1391 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1392 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1393 			break;
1394 		case I40E_NVM_EXEC:
1395 			if (module == 0)
1396 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1397 			break;
1398 		}
1399 		break;
1400 	}
1401 
1402 	return upd_cmd;
1403 }
1404 
1405 /**
1406  * i40e_nvmupd_exec_aq - Run an AQ command
1407  * @hw: pointer to hardware structure
1408  * @cmd: pointer to nvm update command buffer
1409  * @bytes: pointer to the data buffer
1410  * @perrno: pointer to return error code
1411  *
1412  * cmd structure contains identifiers and data buffer
1413  **/
1414 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1415 						 struct i40e_nvm_access *cmd,
1416 						 u8 *bytes, int *perrno)
1417 {
1418 	struct i40e_asq_cmd_details cmd_details;
1419 	enum i40e_status_code status;
1420 	struct i40e_aq_desc *aq_desc;
1421 	u32 buff_size = 0;
1422 	u8 *buff = NULL;
1423 	u32 aq_desc_len;
1424 	u32 aq_data_len;
1425 
1426 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1427 	if (cmd->offset == 0xffff)
1428 		return I40E_SUCCESS;
1429 
1430 	memset(&cmd_details, 0, sizeof(cmd_details));
1431 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1432 
1433 	aq_desc_len = sizeof(struct i40e_aq_desc);
1434 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1435 
1436 	/* get the aq descriptor */
1437 	if (cmd->data_size < aq_desc_len) {
1438 		i40e_debug(hw, I40E_DEBUG_NVM,
1439 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1440 			   cmd->data_size, aq_desc_len);
1441 		*perrno = -EINVAL;
1442 		return I40E_ERR_PARAM;
1443 	}
1444 	aq_desc = (struct i40e_aq_desc *)bytes;
1445 
1446 	/* if data buffer needed, make sure it's ready */
1447 	aq_data_len = cmd->data_size - aq_desc_len;
1448 	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1449 	if (buff_size) {
1450 		if (!hw->nvm_buff.va) {
1451 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1452 							hw->aq.asq_buf_size);
1453 			if (status)
1454 				i40e_debug(hw, I40E_DEBUG_NVM,
1455 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1456 					   status);
1457 		}
1458 
1459 		if (hw->nvm_buff.va) {
1460 			buff = hw->nvm_buff.va;
1461 			i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1462 				I40E_NONDMA_TO_NONDMA);
1463 		}
1464 	}
1465 
1466 	if (cmd->offset)
1467 		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1468 
1469 	/* and away we go! */
1470 	status = i40e_asq_send_command(hw, aq_desc, buff,
1471 				       buff_size, &cmd_details);
1472 	if (status) {
1473 		i40e_debug(hw, I40E_DEBUG_NVM,
1474 			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1475 			   i40e_stat_str(hw, status),
1476 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1477 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1478 		return status;
1479 	}
1480 
1481 	/* should we wait for a followup event? */
1482 	if (cmd->offset) {
1483 		hw->nvm_wait_opcode = cmd->offset;
1484 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1485 	}
1486 
1487 	return status;
1488 }
1489 
1490 /**
1491  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1492  * @hw: pointer to hardware structure
1493  * @cmd: pointer to nvm update command buffer
1494  * @bytes: pointer to the data buffer
1495  * @perrno: pointer to return error code
1496  *
1497  * cmd structure contains identifiers and data buffer
1498  **/
1499 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1500 						    struct i40e_nvm_access *cmd,
1501 						    u8 *bytes, int *perrno)
1502 {
1503 	u32 aq_total_len;
1504 	u32 aq_desc_len;
1505 	int remainder;
1506 	u8 *buff;
1507 
1508 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1509 
1510 	aq_desc_len = sizeof(struct i40e_aq_desc);
1511 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1512 
1513 	/* check offset range */
1514 	if (cmd->offset > aq_total_len) {
1515 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1516 			   __func__, cmd->offset, aq_total_len);
1517 		*perrno = -EINVAL;
1518 		return I40E_ERR_PARAM;
1519 	}
1520 
1521 	/* check copylength range */
1522 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1523 		int new_len = aq_total_len - cmd->offset;
1524 
1525 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1526 			   __func__, cmd->data_size, new_len);
1527 		cmd->data_size = new_len;
1528 	}
1529 
1530 	remainder = cmd->data_size;
1531 	if (cmd->offset < aq_desc_len) {
1532 		u32 len = aq_desc_len - cmd->offset;
1533 
1534 		len = min(len, cmd->data_size);
1535 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1536 			   __func__, cmd->offset, cmd->offset + len);
1537 
1538 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1539 		i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1540 
1541 		bytes += len;
1542 		remainder -= len;
1543 		buff = hw->nvm_buff.va;
1544 	} else {
1545 		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1546 	}
1547 
1548 	if (remainder > 0) {
1549 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1550 
1551 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1552 			   __func__, start_byte, start_byte + remainder);
1553 		i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1554 	}
1555 
1556 	return I40E_SUCCESS;
1557 }
1558 
1559 /**
1560  * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1561  * @hw: pointer to hardware structure
1562  * @cmd: pointer to nvm update command buffer
1563  * @bytes: pointer to the data buffer
1564  * @perrno: pointer to return error code
1565  *
1566  * cmd structure contains identifiers and data buffer
1567  **/
1568 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1569 						    struct i40e_nvm_access *cmd,
1570 						    u8 *bytes, int *perrno)
1571 {
1572 	u32 aq_total_len;
1573 	u32 aq_desc_len;
1574 
1575 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1576 
1577 	aq_desc_len = sizeof(struct i40e_aq_desc);
1578 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1579 
1580 	/* check copylength range */
1581 	if (cmd->data_size > aq_total_len) {
1582 		i40e_debug(hw, I40E_DEBUG_NVM,
1583 			   "%s: copy length %d too big, trimming to %d\n",
1584 			   __func__, cmd->data_size, aq_total_len);
1585 		cmd->data_size = aq_total_len;
1586 	}
1587 
1588 	i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1589 		    I40E_NONDMA_TO_NONDMA);
1590 
1591 	return I40E_SUCCESS;
1592 }
1593 
1594 /**
1595  * i40e_nvmupd_nvm_read - Read NVM
1596  * @hw: pointer to hardware structure
1597  * @cmd: pointer to nvm update command buffer
1598  * @bytes: pointer to the data buffer
1599  * @perrno: pointer to return error code
1600  *
1601  * cmd structure contains identifiers and data buffer
1602  **/
1603 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1604 						  struct i40e_nvm_access *cmd,
1605 						  u8 *bytes, int *perrno)
1606 {
1607 	struct i40e_asq_cmd_details cmd_details;
1608 	enum i40e_status_code status;
1609 	u8 module, transaction;
1610 	bool last;
1611 
1612 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1613 	module = i40e_nvmupd_get_module(cmd->config);
1614 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1615 
1616 	memset(&cmd_details, 0, sizeof(cmd_details));
1617 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1618 
1619 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1620 				  bytes, last, &cmd_details);
1621 	if (status) {
1622 		i40e_debug(hw, I40E_DEBUG_NVM,
1623 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1624 			   module, cmd->offset, cmd->data_size);
1625 		i40e_debug(hw, I40E_DEBUG_NVM,
1626 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1627 			   status, hw->aq.asq_last_status);
1628 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1629 	}
1630 
1631 	return status;
1632 }
1633 
1634 /**
1635  * i40e_nvmupd_nvm_erase - Erase an NVM module
1636  * @hw: pointer to hardware structure
1637  * @cmd: pointer to nvm update command buffer
1638  * @perrno: pointer to return error code
1639  *
1640  * module, offset, data_size and data are in cmd structure
1641  **/
1642 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1643 						   struct i40e_nvm_access *cmd,
1644 						   int *perrno)
1645 {
1646 	enum i40e_status_code status = I40E_SUCCESS;
1647 	struct i40e_asq_cmd_details cmd_details;
1648 	u8 module, transaction;
1649 	bool last;
1650 
1651 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1652 	module = i40e_nvmupd_get_module(cmd->config);
1653 	last = (transaction & I40E_NVM_LCB);
1654 
1655 	memset(&cmd_details, 0, sizeof(cmd_details));
1656 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1657 
1658 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1659 				   last, &cmd_details);
1660 	if (status) {
1661 		i40e_debug(hw, I40E_DEBUG_NVM,
1662 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1663 			   module, cmd->offset, cmd->data_size);
1664 		i40e_debug(hw, I40E_DEBUG_NVM,
1665 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1666 			   status, hw->aq.asq_last_status);
1667 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1668 	}
1669 
1670 	return status;
1671 }
1672 
1673 /**
1674  * i40e_nvmupd_nvm_write - Write NVM
1675  * @hw: pointer to hardware structure
1676  * @cmd: pointer to nvm update command buffer
1677  * @bytes: pointer to the data buffer
1678  * @perrno: pointer to return error code
1679  *
1680  * module, offset, data_size and data are in cmd structure
1681  **/
1682 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1683 						   struct i40e_nvm_access *cmd,
1684 						   u8 *bytes, int *perrno)
1685 {
1686 	enum i40e_status_code status = I40E_SUCCESS;
1687 	struct i40e_asq_cmd_details cmd_details;
1688 	u8 module, transaction;
1689 	u8 preservation_flags;
1690 	bool last;
1691 
1692 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1693 	module = i40e_nvmupd_get_module(cmd->config);
1694 	last = (transaction & I40E_NVM_LCB);
1695 	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1696 
1697 	memset(&cmd_details, 0, sizeof(cmd_details));
1698 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1699 
1700 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1701 				    (u16)cmd->data_size, bytes, last,
1702 				    preservation_flags, &cmd_details);
1703 	if (status) {
1704 		i40e_debug(hw, I40E_DEBUG_NVM,
1705 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1706 			   module, cmd->offset, cmd->data_size);
1707 		i40e_debug(hw, I40E_DEBUG_NVM,
1708 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1709 			   status, hw->aq.asq_last_status);
1710 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1711 	}
1712 
1713 	return status;
1714 }
1715