xref: /freebsd/sys/dev/ixl/i40e_nvm.c (revision 223d846d93fe4f04cae073df1bee6554b35a3dc7)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_prototype.h"
36 
37 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 					       u16 *data);
39 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 					    u16 *data);
41 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
42 						 u16 *words, u16 *data);
43 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
44 					      u16 *words, u16 *data);
45 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
46 				       u32 offset, u16 words, void *data,
47 				       bool last_command);
48 
49 /**
50  * i40e_init_nvm_ops - Initialize NVM function pointers
51  * @hw: pointer to the HW structure
52  *
53  * Setup the function pointers and the NVM info structure. Should be called
54  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
55  * Please notice that the NVM term is used here (& in all methods covered
56  * in this file) as an equivalent of the FLASH part mapped into the SR.
57  * We are accessing FLASH always thru the Shadow RAM.
58  **/
59 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 {
61 	struct i40e_nvm_info *nvm = &hw->nvm;
62 	enum i40e_status_code ret_code = I40E_SUCCESS;
63 	u32 fla, gens;
64 	u8 sr_size;
65 
66 	DEBUGFUNC("i40e_init_nvm");
67 
68 	/* The SR size is stored regardless of the nvm programming mode
69 	 * as the blank mode may be used in the factory line.
70 	 */
71 	gens = rd32(hw, I40E_GLNVM_GENS);
72 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
73 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
74 	/* Switching to words (sr_size contains power of 2KB) */
75 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 
77 	/* Check if we are in the normal or blank NVM programming mode */
78 	fla = rd32(hw, I40E_GLNVM_FLA);
79 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 		/* Max NVM timeout */
81 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
82 		nvm->blank_nvm_mode = FALSE;
83 	} else { /* Blank programming mode */
84 		nvm->blank_nvm_mode = TRUE;
85 		ret_code = I40E_ERR_NVM_BLANK_MODE;
86 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
87 	}
88 
89 	return ret_code;
90 }
91 
92 /**
93  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
94  * @hw: pointer to the HW structure
95  * @access: NVM access type (read or write)
96  *
97  * This function will request NVM ownership for reading
98  * via the proper Admin Command.
99  **/
100 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
101 				       enum i40e_aq_resource_access_type access)
102 {
103 	enum i40e_status_code ret_code = I40E_SUCCESS;
104 	u64 gtime, timeout;
105 	u64 time_left = 0;
106 
107 	DEBUGFUNC("i40e_acquire_nvm");
108 
109 	if (hw->nvm.blank_nvm_mode)
110 		goto i40e_i40e_acquire_nvm_exit;
111 
112 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
113 					    0, &time_left, NULL);
114 	/* Reading the Global Device Timer */
115 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 
117 	/* Store the timeout */
118 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
119 
120 	if (ret_code)
121 		i40e_debug(hw, I40E_DEBUG_NVM,
122 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
123 			   access, time_left, ret_code, hw->aq.asq_last_status);
124 
125 	if (ret_code && time_left) {
126 		/* Poll until the current NVM owner timeouts */
127 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
128 		while ((gtime < timeout) && time_left) {
129 			i40e_msec_delay(10);
130 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
131 			ret_code = i40e_aq_request_resource(hw,
132 							I40E_NVM_RESOURCE_ID,
133 							access, 0, &time_left,
134 							NULL);
135 			if (ret_code == I40E_SUCCESS) {
136 				hw->nvm.hw_semaphore_timeout =
137 					    I40E_MS_TO_GTIME(time_left) + gtime;
138 				break;
139 			}
140 		}
141 		if (ret_code != I40E_SUCCESS) {
142 			hw->nvm.hw_semaphore_timeout = 0;
143 			i40e_debug(hw, I40E_DEBUG_NVM,
144 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
145 				   time_left, ret_code, hw->aq.asq_last_status);
146 		}
147 	}
148 
149 i40e_i40e_acquire_nvm_exit:
150 	return ret_code;
151 }
152 
153 /**
154  * i40e_release_nvm - Generic request for releasing the NVM ownership
155  * @hw: pointer to the HW structure
156  *
157  * This function will release NVM resource via the proper Admin Command.
158  **/
159 void i40e_release_nvm(struct i40e_hw *hw)
160 {
161 	enum i40e_status_code ret_code = I40E_SUCCESS;
162 	u32 total_delay = 0;
163 
164 	DEBUGFUNC("i40e_release_nvm");
165 
166 	if (hw->nvm.blank_nvm_mode)
167 		return;
168 
169 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 
171 	/* there are some rare cases when trying to release the resource
172 	 * results in an admin Q timeout, so handle them correctly
173 	 */
174 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
175 	       (total_delay < hw->aq.asq_cmd_timeout)) {
176 			i40e_msec_delay(1);
177 			ret_code = i40e_aq_release_resource(hw,
178 						I40E_NVM_RESOURCE_ID, 0, NULL);
179 			total_delay++;
180 	}
181 }
182 
183 /**
184  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
185  * @hw: pointer to the HW structure
186  *
187  * Polls the SRCTL Shadow RAM register done bit.
188  **/
189 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 {
191 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
192 	u32 srctl, wait_cnt;
193 
194 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 
196 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
197 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
198 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
199 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
200 			ret_code = I40E_SUCCESS;
201 			break;
202 		}
203 		i40e_usec_delay(5);
204 	}
205 	if (ret_code == I40E_ERR_TIMEOUT)
206 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
207 	return ret_code;
208 }
209 
210 /**
211  * i40e_read_nvm_word - Reads Shadow RAM
212  * @hw: pointer to the HW structure
213  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
214  * @data: word read from the Shadow RAM
215  *
216  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217  **/
218 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
219 					 u16 *data)
220 {
221 	enum i40e_status_code ret_code = I40E_SUCCESS;
222 
223 	ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
224 	return ret_code;
225 }
226 
227 /**
228  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
229  * @hw: pointer to the HW structure
230  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
231  * @data: word read from the Shadow RAM
232  *
233  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
234  **/
235 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
236 					       u16 *data)
237 {
238 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
239 	u32 sr_reg;
240 
241 	DEBUGFUNC("i40e_read_nvm_word_srctl");
242 
243 	if (offset >= hw->nvm.sr_size) {
244 		i40e_debug(hw, I40E_DEBUG_NVM,
245 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
246 			   offset, hw->nvm.sr_size);
247 		ret_code = I40E_ERR_PARAM;
248 		goto read_nvm_exit;
249 	}
250 
251 	/* Poll the done bit first */
252 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
253 	if (ret_code == I40E_SUCCESS) {
254 		/* Write the address and start reading */
255 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
256 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
257 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
258 
259 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
260 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
261 		if (ret_code == I40E_SUCCESS) {
262 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
263 			*data = (u16)((sr_reg &
264 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
265 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
266 		}
267 	}
268 	if (ret_code != I40E_SUCCESS)
269 		i40e_debug(hw, I40E_DEBUG_NVM,
270 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
271 			   offset);
272 
273 read_nvm_exit:
274 	return ret_code;
275 }
276 
277 /**
278  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
279  * @hw: pointer to the HW structure
280  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
281  * @data: word read from the Shadow RAM
282  *
283  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
284  **/
285 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
286 					    u16 *data)
287 {
288 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
289 
290 	DEBUGFUNC("i40e_read_nvm_word_aq");
291 
292 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
293 	*data = LE16_TO_CPU(*(__le16 *)data);
294 
295 	return ret_code;
296 }
297 
298 /**
299  * i40e_read_nvm_buffer - Reads Shadow RAM buffer
300  * @hw: pointer to the HW structure
301  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
302  * @words: (in) number of words to read; (out) number of words actually read
303  * @data: words read from the Shadow RAM
304  *
305  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
306  * method. The buffer read is preceded by the NVM ownership take
307  * and followed by the release.
308  **/
309 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
310 					   u16 *words, u16 *data)
311 {
312 	enum i40e_status_code ret_code = I40E_SUCCESS;
313 
314 	ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
315 	return ret_code;
316 }
317 
318 /**
319  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
320  * @hw: pointer to the HW structure
321  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
322  * @words: (in) number of words to read; (out) number of words actually read
323  * @data: words read from the Shadow RAM
324  *
325  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
326  * method. The buffer read is preceded by the NVM ownership take
327  * and followed by the release.
328  **/
329 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
330 						 u16 *words, u16 *data)
331 {
332 	enum i40e_status_code ret_code = I40E_SUCCESS;
333 	u16 index, word;
334 
335 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
336 
337 	/* Loop thru the selected region */
338 	for (word = 0; word < *words; word++) {
339 		index = offset + word;
340 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
341 		if (ret_code != I40E_SUCCESS)
342 			break;
343 	}
344 
345 	/* Update the number of words read from the Shadow RAM */
346 	*words = word;
347 
348 	return ret_code;
349 }
350 
351 /**
352  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
353  * @hw: pointer to the HW structure
354  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
355  * @words: (in) number of words to read; (out) number of words actually read
356  * @data: words read from the Shadow RAM
357  *
358  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
359  * method. The buffer read is preceded by the NVM ownership take
360  * and followed by the release.
361  **/
362 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
363 					      u16 *words, u16 *data)
364 {
365 	enum i40e_status_code ret_code;
366 	u16 read_size = *words;
367 	bool last_cmd = FALSE;
368 	u16 words_read = 0;
369 	u16 i = 0;
370 
371 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
372 
373 	do {
374 		/* Calculate number of bytes we should read in this step.
375 		 * FVL AQ do not allow to read more than one page at a time or
376 		 * to cross page boundaries.
377 		 */
378 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
379 			read_size = min(*words,
380 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
381 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
382 		else
383 			read_size = min((*words - words_read),
384 					I40E_SR_SECTOR_SIZE_IN_WORDS);
385 
386 		/* Check if this is last command, if so set proper flag */
387 		if ((words_read + read_size) >= *words)
388 			last_cmd = TRUE;
389 
390 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
391 					    data + words_read, last_cmd);
392 		if (ret_code != I40E_SUCCESS)
393 			goto read_nvm_buffer_aq_exit;
394 
395 		/* Increment counter for words already read and move offset to
396 		 * new read location
397 		 */
398 		words_read += read_size;
399 		offset += read_size;
400 	} while (words_read < *words);
401 
402 	for (i = 0; i < *words; i++)
403 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
404 
405 read_nvm_buffer_aq_exit:
406 	*words = words_read;
407 	return ret_code;
408 }
409 
410 /**
411  * i40e_read_nvm_aq - Read Shadow RAM.
412  * @hw: pointer to the HW structure.
413  * @module_pointer: module pointer location in words from the NVM beginning
414  * @offset: offset in words from module start
415  * @words: number of words to write
416  * @data: buffer with words to write to the Shadow RAM
417  * @last_command: tells the AdminQ that this is the last command
418  *
419  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
420  **/
421 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
422 				       u32 offset, u16 words, void *data,
423 				       bool last_command)
424 {
425 	enum i40e_status_code ret_code = I40E_ERR_NVM;
426 	struct i40e_asq_cmd_details cmd_details;
427 
428 	DEBUGFUNC("i40e_read_nvm_aq");
429 
430 	memset(&cmd_details, 0, sizeof(cmd_details));
431 	cmd_details.wb_desc = &hw->nvm_wb_desc;
432 
433 	/* Here we are checking the SR limit only for the flat memory model.
434 	 * We cannot do it for the module-based model, as we did not acquire
435 	 * the NVM resource yet (we cannot get the module pointer value).
436 	 * Firmware will check the module-based model.
437 	 */
438 	if ((offset + words) > hw->nvm.sr_size)
439 		i40e_debug(hw, I40E_DEBUG_NVM,
440 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
441 			   (offset + words), hw->nvm.sr_size);
442 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
443 		/* We can write only up to 4KB (one sector), in one AQ write */
444 		i40e_debug(hw, I40E_DEBUG_NVM,
445 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
446 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
447 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
448 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
449 		/* A single write cannot spread over two sectors */
450 		i40e_debug(hw, I40E_DEBUG_NVM,
451 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
452 			   offset, words);
453 	else
454 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
455 					    2 * offset,  /*bytes*/
456 					    2 * words,   /*bytes*/
457 					    data, last_command, &cmd_details);
458 
459 	return ret_code;
460 }
461 
462 /**
463  * i40e_write_nvm_aq - Writes Shadow RAM.
464  * @hw: pointer to the HW structure.
465  * @module_pointer: module pointer location in words from the NVM beginning
466  * @offset: offset in words from module start
467  * @words: number of words to write
468  * @data: buffer with words to write to the Shadow RAM
469  * @last_command: tells the AdminQ that this is the last command
470  *
471  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
472  **/
473 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
474 					u32 offset, u16 words, void *data,
475 					bool last_command)
476 {
477 	enum i40e_status_code ret_code = I40E_ERR_NVM;
478 	struct i40e_asq_cmd_details cmd_details;
479 
480 	DEBUGFUNC("i40e_write_nvm_aq");
481 
482 	memset(&cmd_details, 0, sizeof(cmd_details));
483 	cmd_details.wb_desc = &hw->nvm_wb_desc;
484 
485 	/* Here we are checking the SR limit only for the flat memory model.
486 	 * We cannot do it for the module-based model, as we did not acquire
487 	 * the NVM resource yet (we cannot get the module pointer value).
488 	 * Firmware will check the module-based model.
489 	 */
490 	if ((offset + words) > hw->nvm.sr_size)
491 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
492 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
493 		/* We can write only up to 4KB (one sector), in one AQ write */
494 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
495 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
496 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
497 		/* A single write cannot spread over two sectors */
498 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
499 	else
500 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
501 					      2 * offset,  /*bytes*/
502 					      2 * words,   /*bytes*/
503 					      data, last_command, &cmd_details);
504 
505 	return ret_code;
506 }
507 
508 /**
509  * i40e_write_nvm_word - Writes Shadow RAM word
510  * @hw: pointer to the HW structure
511  * @offset: offset of the Shadow RAM word to write
512  * @data: word to write to the Shadow RAM
513  *
514  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
515  * NVM ownership have to be acquired and released (on ARQ completion event
516  * reception) by caller. To commit SR to NVM update checksum function
517  * should be called.
518  **/
519 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
520 					  void *data)
521 {
522 	DEBUGFUNC("i40e_write_nvm_word");
523 
524 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
525 
526 	/* Value 0x00 below means that we treat SR as a flat mem */
527 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
528 }
529 
530 /**
531  * i40e_write_nvm_buffer - Writes Shadow RAM buffer
532  * @hw: pointer to the HW structure
533  * @module_pointer: module pointer location in words from the NVM beginning
534  * @offset: offset of the Shadow RAM buffer to write
535  * @words: number of words to write
536  * @data: words to write to the Shadow RAM
537  *
538  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
539  * NVM ownership must be acquired before calling this function and released
540  * on ARQ completion event reception by caller. To commit SR to NVM update
541  * checksum function should be called.
542  **/
543 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
544 					    u8 module_pointer, u32 offset,
545 					    u16 words, void *data)
546 {
547 	__le16 *le_word_ptr = (__le16 *)data;
548 	u16 *word_ptr = (u16 *)data;
549 	u32 i = 0;
550 
551 	DEBUGFUNC("i40e_write_nvm_buffer");
552 
553 	for (i = 0; i < words; i++)
554 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
555 
556 	/* Here we will only write one buffer as the size of the modules
557 	 * mirrored in the Shadow RAM is always less than 4K.
558 	 */
559 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
560 				 data, FALSE);
561 }
562 
563 /**
564  * i40e_calc_nvm_checksum - Calculates and returns the checksum
565  * @hw: pointer to hardware structure
566  * @checksum: pointer to the checksum
567  *
568  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
569  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
570  * is customer specific and unknown. Therefore, this function skips all maximum
571  * possible size of VPD (1kB).
572  **/
573 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
574 {
575 	enum i40e_status_code ret_code = I40E_SUCCESS;
576 	struct i40e_virt_mem vmem;
577 	u16 pcie_alt_module = 0;
578 	u16 checksum_local = 0;
579 	u16 vpd_module = 0;
580 	u16 *data;
581 	u16 i = 0;
582 
583 	DEBUGFUNC("i40e_calc_nvm_checksum");
584 
585 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
586 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
587 	if (ret_code)
588 		goto i40e_calc_nvm_checksum_exit;
589 	data = (u16 *)vmem.va;
590 
591 	/* read pointer to VPD area */
592 	ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
593 	if (ret_code != I40E_SUCCESS) {
594 		ret_code = I40E_ERR_NVM_CHECKSUM;
595 		goto i40e_calc_nvm_checksum_exit;
596 	}
597 
598 	/* read pointer to PCIe Alt Auto-load module */
599 	ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
600 				      &pcie_alt_module);
601 	if (ret_code != I40E_SUCCESS) {
602 		ret_code = I40E_ERR_NVM_CHECKSUM;
603 		goto i40e_calc_nvm_checksum_exit;
604 	}
605 
606 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
607 	 * except the VPD and PCIe ALT Auto-load modules
608 	 */
609 	for (i = 0; i < hw->nvm.sr_size; i++) {
610 		/* Read SR page */
611 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
612 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
613 
614 			ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
615 			if (ret_code != I40E_SUCCESS) {
616 				ret_code = I40E_ERR_NVM_CHECKSUM;
617 				goto i40e_calc_nvm_checksum_exit;
618 			}
619 		}
620 
621 		/* Skip Checksum word */
622 		if (i == I40E_SR_SW_CHECKSUM_WORD)
623 			continue;
624 		/* Skip VPD module (convert byte size to word count) */
625 		if ((i >= (u32)vpd_module) &&
626 		    (i < ((u32)vpd_module +
627 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
628 			continue;
629 		}
630 		/* Skip PCIe ALT module (convert byte size to word count) */
631 		if ((i >= (u32)pcie_alt_module) &&
632 		    (i < ((u32)pcie_alt_module +
633 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
634 			continue;
635 		}
636 
637 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
638 	}
639 
640 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
641 
642 i40e_calc_nvm_checksum_exit:
643 	i40e_free_virt_mem(hw, &vmem);
644 	return ret_code;
645 }
646 
647 /**
648  * i40e_update_nvm_checksum - Updates the NVM checksum
649  * @hw: pointer to hardware structure
650  *
651  * NVM ownership must be acquired before calling this function and released
652  * on ARQ completion event reception by caller.
653  * This function will commit SR to NVM.
654  **/
655 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
656 {
657 	enum i40e_status_code ret_code = I40E_SUCCESS;
658 	u16 checksum;
659 	__le16 le_sum;
660 
661 	DEBUGFUNC("i40e_update_nvm_checksum");
662 
663 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
664 	le_sum = CPU_TO_LE16(checksum);
665 	if (ret_code == I40E_SUCCESS)
666 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
667 					     1, &le_sum, TRUE);
668 
669 	return ret_code;
670 }
671 
672 /**
673  * i40e_validate_nvm_checksum - Validate EEPROM checksum
674  * @hw: pointer to hardware structure
675  * @checksum: calculated checksum
676  *
677  * Performs checksum calculation and validates the NVM SW checksum. If the
678  * caller does not need checksum, the value can be NULL.
679  **/
680 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
681 						 u16 *checksum)
682 {
683 	enum i40e_status_code ret_code = I40E_SUCCESS;
684 	u16 checksum_sr = 0;
685 	u16 checksum_local = 0;
686 
687 	DEBUGFUNC("i40e_validate_nvm_checksum");
688 
689 	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
690 	if (ret_code != I40E_SUCCESS)
691 		goto i40e_validate_nvm_checksum_exit;
692 
693 	/* Do not use i40e_read_nvm_word() because we do not want to take
694 	 * the synchronization semaphores twice here.
695 	 */
696 	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
697 
698 	/* Verify read checksum from EEPROM is the same as
699 	 * calculated checksum
700 	 */
701 	if (checksum_local != checksum_sr)
702 		ret_code = I40E_ERR_NVM_CHECKSUM;
703 
704 	/* If the user cares, return the calculated checksum */
705 	if (checksum)
706 		*checksum = checksum_local;
707 
708 i40e_validate_nvm_checksum_exit:
709 	return ret_code;
710 }
711 
712 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
713 						    struct i40e_nvm_access *cmd,
714 						    u8 *bytes, int *perrno);
715 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
716 						    struct i40e_nvm_access *cmd,
717 						    u8 *bytes, int *perrno);
718 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
719 						    struct i40e_nvm_access *cmd,
720 						    u8 *bytes, int *perrno);
721 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
722 						    struct i40e_nvm_access *cmd,
723 						    int *perrno);
724 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
725 						   struct i40e_nvm_access *cmd,
726 						   int *perrno);
727 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
728 						   struct i40e_nvm_access *cmd,
729 						   u8 *bytes, int *perrno);
730 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
731 						  struct i40e_nvm_access *cmd,
732 						  u8 *bytes, int *perrno);
733 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
734 						 struct i40e_nvm_access *cmd,
735 						 u8 *bytes, int *perrno);
736 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
737 						    struct i40e_nvm_access *cmd,
738 						    u8 *bytes, int *perrno);
739 static INLINE u8 i40e_nvmupd_get_module(u32 val)
740 {
741 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
742 }
743 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
744 {
745 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
746 }
747 
748 static const char *i40e_nvm_update_state_str[] = {
749 	"I40E_NVMUPD_INVALID",
750 	"I40E_NVMUPD_READ_CON",
751 	"I40E_NVMUPD_READ_SNT",
752 	"I40E_NVMUPD_READ_LCB",
753 	"I40E_NVMUPD_READ_SA",
754 	"I40E_NVMUPD_WRITE_ERA",
755 	"I40E_NVMUPD_WRITE_CON",
756 	"I40E_NVMUPD_WRITE_SNT",
757 	"I40E_NVMUPD_WRITE_LCB",
758 	"I40E_NVMUPD_WRITE_SA",
759 	"I40E_NVMUPD_CSUM_CON",
760 	"I40E_NVMUPD_CSUM_SA",
761 	"I40E_NVMUPD_CSUM_LCB",
762 	"I40E_NVMUPD_STATUS",
763 	"I40E_NVMUPD_EXEC_AQ",
764 	"I40E_NVMUPD_GET_AQ_RESULT",
765 };
766 
767 /**
768  * i40e_nvmupd_command - Process an NVM update command
769  * @hw: pointer to hardware structure
770  * @cmd: pointer to nvm update command
771  * @bytes: pointer to the data buffer
772  * @perrno: pointer to return error code
773  *
774  * Dispatches command depending on what update state is current
775  **/
776 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
777 					  struct i40e_nvm_access *cmd,
778 					  u8 *bytes, int *perrno)
779 {
780 	enum i40e_status_code status;
781 	enum i40e_nvmupd_cmd upd_cmd;
782 
783 	DEBUGFUNC("i40e_nvmupd_command");
784 
785 	/* assume success */
786 	*perrno = 0;
787 
788 	/* early check for status command and debug msgs */
789 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
790 
791 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
792 		   i40e_nvm_update_state_str[upd_cmd],
793 		   hw->nvmupd_state,
794 		   hw->aq.nvm_release_on_done);
795 
796 	if (upd_cmd == I40E_NVMUPD_INVALID) {
797 		*perrno = -EFAULT;
798 		i40e_debug(hw, I40E_DEBUG_NVM,
799 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
800 			   upd_cmd, *perrno);
801 	}
802 
803 	/* a status request returns immediately rather than
804 	 * going into the state machine
805 	 */
806 	if (upd_cmd == I40E_NVMUPD_STATUS) {
807 		bytes[0] = hw->nvmupd_state;
808 		return I40E_SUCCESS;
809 	}
810 
811 	switch (hw->nvmupd_state) {
812 	case I40E_NVMUPD_STATE_INIT:
813 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
814 		break;
815 
816 	case I40E_NVMUPD_STATE_READING:
817 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
818 		break;
819 
820 	case I40E_NVMUPD_STATE_WRITING:
821 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
822 		break;
823 
824 	case I40E_NVMUPD_STATE_INIT_WAIT:
825 	case I40E_NVMUPD_STATE_WRITE_WAIT:
826 		status = I40E_ERR_NOT_READY;
827 		*perrno = -EBUSY;
828 		break;
829 
830 	default:
831 		/* invalid state, should never happen */
832 		i40e_debug(hw, I40E_DEBUG_NVM,
833 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
834 		status = I40E_NOT_SUPPORTED;
835 		*perrno = -ESRCH;
836 		break;
837 	}
838 	return status;
839 }
840 
841 /**
842  * i40e_nvmupd_state_init - Handle NVM update state Init
843  * @hw: pointer to hardware structure
844  * @cmd: pointer to nvm update command buffer
845  * @bytes: pointer to the data buffer
846  * @perrno: pointer to return error code
847  *
848  * Process legitimate commands of the Init state and conditionally set next
849  * state. Reject all other commands.
850  **/
851 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
852 						    struct i40e_nvm_access *cmd,
853 						    u8 *bytes, int *perrno)
854 {
855 	enum i40e_status_code status = I40E_SUCCESS;
856 	enum i40e_nvmupd_cmd upd_cmd;
857 
858 	DEBUGFUNC("i40e_nvmupd_state_init");
859 
860 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
861 
862 	switch (upd_cmd) {
863 	case I40E_NVMUPD_READ_SA:
864 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
865 		if (status) {
866 			*perrno = i40e_aq_rc_to_posix(status,
867 						     hw->aq.asq_last_status);
868 		} else {
869 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
870 			i40e_release_nvm(hw);
871 		}
872 		break;
873 
874 	case I40E_NVMUPD_READ_SNT:
875 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
876 		if (status) {
877 			*perrno = i40e_aq_rc_to_posix(status,
878 						     hw->aq.asq_last_status);
879 		} else {
880 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
881 			if (status)
882 				i40e_release_nvm(hw);
883 			else
884 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
885 		}
886 		break;
887 
888 	case I40E_NVMUPD_WRITE_ERA:
889 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
890 		if (status) {
891 			*perrno = i40e_aq_rc_to_posix(status,
892 						     hw->aq.asq_last_status);
893 		} else {
894 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
895 			if (status) {
896 				i40e_release_nvm(hw);
897 			} else {
898 				hw->aq.nvm_release_on_done = TRUE;
899 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
900 			}
901 		}
902 		break;
903 
904 	case I40E_NVMUPD_WRITE_SA:
905 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
906 		if (status) {
907 			*perrno = i40e_aq_rc_to_posix(status,
908 						     hw->aq.asq_last_status);
909 		} else {
910 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
911 			if (status) {
912 				i40e_release_nvm(hw);
913 			} else {
914 				hw->aq.nvm_release_on_done = TRUE;
915 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
916 			}
917 		}
918 		break;
919 
920 	case I40E_NVMUPD_WRITE_SNT:
921 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
922 		if (status) {
923 			*perrno = i40e_aq_rc_to_posix(status,
924 						     hw->aq.asq_last_status);
925 		} else {
926 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
927 			if (status)
928 				i40e_release_nvm(hw);
929 			else
930 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
931 		}
932 		break;
933 
934 	case I40E_NVMUPD_CSUM_SA:
935 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
936 		if (status) {
937 			*perrno = i40e_aq_rc_to_posix(status,
938 						     hw->aq.asq_last_status);
939 		} else {
940 			status = i40e_update_nvm_checksum(hw);
941 			if (status) {
942 				*perrno = hw->aq.asq_last_status ?
943 				   i40e_aq_rc_to_posix(status,
944 						       hw->aq.asq_last_status) :
945 				   -EIO;
946 				i40e_release_nvm(hw);
947 			} else {
948 				hw->aq.nvm_release_on_done = TRUE;
949 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
950 			}
951 		}
952 		break;
953 
954 	case I40E_NVMUPD_EXEC_AQ:
955 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
956 		break;
957 
958 	case I40E_NVMUPD_GET_AQ_RESULT:
959 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
960 		break;
961 
962 	default:
963 		i40e_debug(hw, I40E_DEBUG_NVM,
964 			   "NVMUPD: bad cmd %s in init state\n",
965 			   i40e_nvm_update_state_str[upd_cmd]);
966 		status = I40E_ERR_NVM;
967 		*perrno = -ESRCH;
968 		break;
969 	}
970 	return status;
971 }
972 
973 /**
974  * i40e_nvmupd_state_reading - Handle NVM update state Reading
975  * @hw: pointer to hardware structure
976  * @cmd: pointer to nvm update command buffer
977  * @bytes: pointer to the data buffer
978  * @perrno: pointer to return error code
979  *
980  * NVM ownership is already held.  Process legitimate commands and set any
981  * change in state; reject all other commands.
982  **/
983 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
984 						    struct i40e_nvm_access *cmd,
985 						    u8 *bytes, int *perrno)
986 {
987 	enum i40e_status_code status = I40E_SUCCESS;
988 	enum i40e_nvmupd_cmd upd_cmd;
989 
990 	DEBUGFUNC("i40e_nvmupd_state_reading");
991 
992 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
993 
994 	switch (upd_cmd) {
995 	case I40E_NVMUPD_READ_SA:
996 	case I40E_NVMUPD_READ_CON:
997 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
998 		break;
999 
1000 	case I40E_NVMUPD_READ_LCB:
1001 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1002 		i40e_release_nvm(hw);
1003 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1004 		break;
1005 
1006 	default:
1007 		i40e_debug(hw, I40E_DEBUG_NVM,
1008 			   "NVMUPD: bad cmd %s in reading state.\n",
1009 			   i40e_nvm_update_state_str[upd_cmd]);
1010 		status = I40E_NOT_SUPPORTED;
1011 		*perrno = -ESRCH;
1012 		break;
1013 	}
1014 	return status;
1015 }
1016 
1017 /**
1018  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1019  * @hw: pointer to hardware structure
1020  * @cmd: pointer to nvm update command buffer
1021  * @bytes: pointer to the data buffer
1022  * @perrno: pointer to return error code
1023  *
1024  * NVM ownership is already held.  Process legitimate commands and set any
1025  * change in state; reject all other commands
1026  **/
1027 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1028 						    struct i40e_nvm_access *cmd,
1029 						    u8 *bytes, int *perrno)
1030 {
1031 	enum i40e_status_code status = I40E_SUCCESS;
1032 	enum i40e_nvmupd_cmd upd_cmd;
1033 	bool retry_attempt = FALSE;
1034 
1035 	DEBUGFUNC("i40e_nvmupd_state_writing");
1036 
1037 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1038 
1039 retry:
1040 	switch (upd_cmd) {
1041 	case I40E_NVMUPD_WRITE_CON:
1042 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1043 		if (!status)
1044 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1045 		break;
1046 
1047 	case I40E_NVMUPD_WRITE_LCB:
1048 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1049 		if (status) {
1050 			*perrno = hw->aq.asq_last_status ?
1051 				   i40e_aq_rc_to_posix(status,
1052 						       hw->aq.asq_last_status) :
1053 				   -EIO;
1054 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1055 		} else {
1056 			hw->aq.nvm_release_on_done = TRUE;
1057 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1058 		}
1059 		break;
1060 
1061 	case I40E_NVMUPD_CSUM_CON:
1062 		status = i40e_update_nvm_checksum(hw);
1063 		if (status) {
1064 			*perrno = hw->aq.asq_last_status ?
1065 				   i40e_aq_rc_to_posix(status,
1066 						       hw->aq.asq_last_status) :
1067 				   -EIO;
1068 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1069 		} else {
1070 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1071 		}
1072 		break;
1073 
1074 	case I40E_NVMUPD_CSUM_LCB:
1075 		status = i40e_update_nvm_checksum(hw);
1076 		if (status) {
1077 			*perrno = hw->aq.asq_last_status ?
1078 				   i40e_aq_rc_to_posix(status,
1079 						       hw->aq.asq_last_status) :
1080 				   -EIO;
1081 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1082 		} else {
1083 			hw->aq.nvm_release_on_done = TRUE;
1084 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1085 		}
1086 		break;
1087 
1088 	default:
1089 		i40e_debug(hw, I40E_DEBUG_NVM,
1090 			   "NVMUPD: bad cmd %s in writing state.\n",
1091 			   i40e_nvm_update_state_str[upd_cmd]);
1092 		status = I40E_NOT_SUPPORTED;
1093 		*perrno = -ESRCH;
1094 		break;
1095 	}
1096 
1097 	/* In some circumstances, a multi-write transaction takes longer
1098 	 * than the default 3 minute timeout on the write semaphore.  If
1099 	 * the write failed with an EBUSY status, this is likely the problem,
1100 	 * so here we try to reacquire the semaphore then retry the write.
1101 	 * We only do one retry, then give up.
1102 	 */
1103 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1104 	    !retry_attempt) {
1105 		enum i40e_status_code old_status = status;
1106 		u32 old_asq_status = hw->aq.asq_last_status;
1107 		u32 gtime;
1108 
1109 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1110 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1111 			i40e_debug(hw, I40E_DEBUG_ALL,
1112 				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1113 				   gtime, hw->nvm.hw_semaphore_timeout);
1114 			i40e_release_nvm(hw);
1115 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1116 			if (status) {
1117 				i40e_debug(hw, I40E_DEBUG_ALL,
1118 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1119 					   hw->aq.asq_last_status);
1120 				status = old_status;
1121 				hw->aq.asq_last_status = old_asq_status;
1122 			} else {
1123 				retry_attempt = TRUE;
1124 				goto retry;
1125 			}
1126 		}
1127 	}
1128 
1129 	return status;
1130 }
1131 
1132 /**
1133  * i40e_nvmupd_validate_command - Validate given command
1134  * @hw: pointer to hardware structure
1135  * @cmd: pointer to nvm update command buffer
1136  * @perrno: pointer to return error code
1137  *
1138  * Return one of the valid command types or I40E_NVMUPD_INVALID
1139  **/
1140 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1141 						    struct i40e_nvm_access *cmd,
1142 						    int *perrno)
1143 {
1144 	enum i40e_nvmupd_cmd upd_cmd;
1145 	u8 module, transaction;
1146 
1147 	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1148 
1149 	/* anything that doesn't match a recognized case is an error */
1150 	upd_cmd = I40E_NVMUPD_INVALID;
1151 
1152 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1153 	module = i40e_nvmupd_get_module(cmd->config);
1154 
1155 	/* limits on data size */
1156 	if ((cmd->data_size < 1) ||
1157 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1158 		i40e_debug(hw, I40E_DEBUG_NVM,
1159 			   "i40e_nvmupd_validate_command data_size %d\n",
1160 			   cmd->data_size);
1161 		*perrno = -EFAULT;
1162 		return I40E_NVMUPD_INVALID;
1163 	}
1164 
1165 	switch (cmd->command) {
1166 	case I40E_NVM_READ:
1167 		switch (transaction) {
1168 		case I40E_NVM_CON:
1169 			upd_cmd = I40E_NVMUPD_READ_CON;
1170 			break;
1171 		case I40E_NVM_SNT:
1172 			upd_cmd = I40E_NVMUPD_READ_SNT;
1173 			break;
1174 		case I40E_NVM_LCB:
1175 			upd_cmd = I40E_NVMUPD_READ_LCB;
1176 			break;
1177 		case I40E_NVM_SA:
1178 			upd_cmd = I40E_NVMUPD_READ_SA;
1179 			break;
1180 		case I40E_NVM_EXEC:
1181 			if (module == 0xf)
1182 				upd_cmd = I40E_NVMUPD_STATUS;
1183 			else if (module == 0)
1184 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1185 			break;
1186 		}
1187 		break;
1188 
1189 	case I40E_NVM_WRITE:
1190 		switch (transaction) {
1191 		case I40E_NVM_CON:
1192 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1193 			break;
1194 		case I40E_NVM_SNT:
1195 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1196 			break;
1197 		case I40E_NVM_LCB:
1198 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1199 			break;
1200 		case I40E_NVM_SA:
1201 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1202 			break;
1203 		case I40E_NVM_ERA:
1204 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1205 			break;
1206 		case I40E_NVM_CSUM:
1207 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1208 			break;
1209 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1210 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1211 			break;
1212 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1213 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1214 			break;
1215 		case I40E_NVM_EXEC:
1216 			if (module == 0)
1217 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1218 			break;
1219 		}
1220 		break;
1221 	}
1222 
1223 	return upd_cmd;
1224 }
1225 
1226 /**
1227  * i40e_nvmupd_exec_aq - Run an AQ command
1228  * @hw: pointer to hardware structure
1229  * @cmd: pointer to nvm update command buffer
1230  * @bytes: pointer to the data buffer
1231  * @perrno: pointer to return error code
1232  *
1233  * cmd structure contains identifiers and data buffer
1234  **/
1235 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1236 						 struct i40e_nvm_access *cmd,
1237 						 u8 *bytes, int *perrno)
1238 {
1239 	struct i40e_asq_cmd_details cmd_details;
1240 	enum i40e_status_code status;
1241 	struct i40e_aq_desc *aq_desc;
1242 	u32 buff_size = 0;
1243 	u8 *buff = NULL;
1244 	u32 aq_desc_len;
1245 	u32 aq_data_len;
1246 
1247 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1248 	memset(&cmd_details, 0, sizeof(cmd_details));
1249 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1250 
1251 	aq_desc_len = sizeof(struct i40e_aq_desc);
1252 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1253 
1254 	/* get the aq descriptor */
1255 	if (cmd->data_size < aq_desc_len) {
1256 		i40e_debug(hw, I40E_DEBUG_NVM,
1257 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1258 			   cmd->data_size, aq_desc_len);
1259 		*perrno = -EINVAL;
1260 		return I40E_ERR_PARAM;
1261 	}
1262 	aq_desc = (struct i40e_aq_desc *)bytes;
1263 
1264 	/* if data buffer needed, make sure it's ready */
1265 	aq_data_len = cmd->data_size - aq_desc_len;
1266 	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1267 	if (buff_size) {
1268 		if (!hw->nvm_buff.va) {
1269 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1270 							hw->aq.asq_buf_size);
1271 			if (status)
1272 				i40e_debug(hw, I40E_DEBUG_NVM,
1273 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1274 					   status);
1275 		}
1276 
1277 		if (hw->nvm_buff.va) {
1278 			buff = hw->nvm_buff.va;
1279 			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1280 		}
1281 	}
1282 
1283 	/* and away we go! */
1284 	status = i40e_asq_send_command(hw, aq_desc, buff,
1285 				       buff_size, &cmd_details);
1286 	if (status) {
1287 		i40e_debug(hw, I40E_DEBUG_NVM,
1288 			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1289 			   i40e_stat_str(hw, status),
1290 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1291 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1292 	}
1293 
1294 	return status;
1295 }
1296 
1297 /**
1298  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1299  * @hw: pointer to hardware structure
1300  * @cmd: pointer to nvm update command buffer
1301  * @bytes: pointer to the data buffer
1302  * @perrno: pointer to return error code
1303  *
1304  * cmd structure contains identifiers and data buffer
1305  **/
1306 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1307 						    struct i40e_nvm_access *cmd,
1308 						    u8 *bytes, int *perrno)
1309 {
1310 	u32 aq_total_len;
1311 	u32 aq_desc_len;
1312 	int remainder;
1313 	u8 *buff;
1314 
1315 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1316 
1317 	aq_desc_len = sizeof(struct i40e_aq_desc);
1318 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1319 
1320 	/* check offset range */
1321 	if (cmd->offset > aq_total_len) {
1322 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1323 			   __func__, cmd->offset, aq_total_len);
1324 		*perrno = -EINVAL;
1325 		return I40E_ERR_PARAM;
1326 	}
1327 
1328 	/* check copylength range */
1329 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1330 		int new_len = aq_total_len - cmd->offset;
1331 
1332 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1333 			   __func__, cmd->data_size, new_len);
1334 		cmd->data_size = new_len;
1335 	}
1336 
1337 	remainder = cmd->data_size;
1338 	if (cmd->offset < aq_desc_len) {
1339 		u32 len = aq_desc_len - cmd->offset;
1340 
1341 		len = min(len, cmd->data_size);
1342 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1343 			   __func__, cmd->offset, cmd->offset + len);
1344 
1345 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1346 		memcpy(bytes, buff, len);
1347 
1348 		bytes += len;
1349 		remainder -= len;
1350 		buff = hw->nvm_buff.va;
1351 	} else {
1352 		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1353 	}
1354 
1355 	if (remainder > 0) {
1356 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1357 
1358 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1359 			   __func__, start_byte, start_byte + remainder);
1360 		memcpy(bytes, buff, remainder);
1361 	}
1362 
1363 	return I40E_SUCCESS;
1364 }
1365 
1366 /**
1367  * i40e_nvmupd_nvm_read - Read NVM
1368  * @hw: pointer to hardware structure
1369  * @cmd: pointer to nvm update command buffer
1370  * @bytes: pointer to the data buffer
1371  * @perrno: pointer to return error code
1372  *
1373  * cmd structure contains identifiers and data buffer
1374  **/
1375 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1376 						  struct i40e_nvm_access *cmd,
1377 						  u8 *bytes, int *perrno)
1378 {
1379 	struct i40e_asq_cmd_details cmd_details;
1380 	enum i40e_status_code status;
1381 	u8 module, transaction;
1382 	bool last;
1383 
1384 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1385 	module = i40e_nvmupd_get_module(cmd->config);
1386 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1387 
1388 	memset(&cmd_details, 0, sizeof(cmd_details));
1389 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1390 
1391 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1392 				  bytes, last, &cmd_details);
1393 	if (status) {
1394 		i40e_debug(hw, I40E_DEBUG_NVM,
1395 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1396 			   module, cmd->offset, cmd->data_size);
1397 		i40e_debug(hw, I40E_DEBUG_NVM,
1398 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1399 			   status, hw->aq.asq_last_status);
1400 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1401 	}
1402 
1403 	return status;
1404 }
1405 
1406 /**
1407  * i40e_nvmupd_nvm_erase - Erase an NVM module
1408  * @hw: pointer to hardware structure
1409  * @cmd: pointer to nvm update command buffer
1410  * @perrno: pointer to return error code
1411  *
1412  * module, offset, data_size and data are in cmd structure
1413  **/
1414 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1415 						   struct i40e_nvm_access *cmd,
1416 						   int *perrno)
1417 {
1418 	enum i40e_status_code status = I40E_SUCCESS;
1419 	struct i40e_asq_cmd_details cmd_details;
1420 	u8 module, transaction;
1421 	bool last;
1422 
1423 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1424 	module = i40e_nvmupd_get_module(cmd->config);
1425 	last = (transaction & I40E_NVM_LCB);
1426 
1427 	memset(&cmd_details, 0, sizeof(cmd_details));
1428 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1429 
1430 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1431 				   last, &cmd_details);
1432 	if (status) {
1433 		i40e_debug(hw, I40E_DEBUG_NVM,
1434 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1435 			   module, cmd->offset, cmd->data_size);
1436 		i40e_debug(hw, I40E_DEBUG_NVM,
1437 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1438 			   status, hw->aq.asq_last_status);
1439 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1440 	}
1441 
1442 	return status;
1443 }
1444 
1445 /**
1446  * i40e_nvmupd_nvm_write - Write NVM
1447  * @hw: pointer to hardware structure
1448  * @cmd: pointer to nvm update command buffer
1449  * @bytes: pointer to the data buffer
1450  * @perrno: pointer to return error code
1451  *
1452  * module, offset, data_size and data are in cmd structure
1453  **/
1454 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1455 						   struct i40e_nvm_access *cmd,
1456 						   u8 *bytes, int *perrno)
1457 {
1458 	enum i40e_status_code status = I40E_SUCCESS;
1459 	struct i40e_asq_cmd_details cmd_details;
1460 	u8 module, transaction;
1461 	bool last;
1462 
1463 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1464 	module = i40e_nvmupd_get_module(cmd->config);
1465 	last = (transaction & I40E_NVM_LCB);
1466 
1467 	memset(&cmd_details, 0, sizeof(cmd_details));
1468 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1469 
1470 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1471 				    (u16)cmd->data_size, bytes, last,
1472 				    &cmd_details);
1473 	if (status) {
1474 		i40e_debug(hw, I40E_DEBUG_NVM,
1475 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1476 			   module, cmd->offset, cmd->data_size);
1477 		i40e_debug(hw, I40E_DEBUG_NVM,
1478 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1479 			   status, hw->aq.asq_last_status);
1480 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1481 	}
1482 
1483 	return status;
1484 }
1485