xref: /linux/drivers/net/ethernet/intel/i40e/i40e_nvm.c (revision 4e94ddfe2aab72139acb8d5372fac9e6c3f3e383)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/delay.h>
5 #include "i40e_alloc.h"
6 #include "i40e_prototype.h"
7 
8 /**
9  * i40e_init_nvm - Initialize NVM function pointers
10  * @hw: pointer to the HW structure
11  *
12  * Setup the function pointers and the NVM info structure. Should be called
13  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
14  * Please notice that the NVM term is used here (& in all methods covered
15  * in this file) as an equivalent of the FLASH part mapped into the SR.
16  * We are accessing FLASH always thru the Shadow RAM.
17  **/
18 int i40e_init_nvm(struct i40e_hw *hw)
19 {
20 	struct i40e_nvm_info *nvm = &hw->nvm;
21 	int ret_code = 0;
22 	u32 fla, gens;
23 	u8 sr_size;
24 
25 	/* The SR size is stored regardless of the nvm programming mode
26 	 * as the blank mode may be used in the factory line.
27 	 */
28 	gens = rd32(hw, I40E_GLNVM_GENS);
29 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
30 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
31 	/* Switching to words (sr_size contains power of 2KB) */
32 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
33 
34 	/* Check if we are in the normal or blank NVM programming mode */
35 	fla = rd32(hw, I40E_GLNVM_FLA);
36 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
37 		/* Max NVM timeout */
38 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
39 		nvm->blank_nvm_mode = false;
40 	} else { /* Blank programming mode */
41 		nvm->blank_nvm_mode = true;
42 		ret_code = -EIO;
43 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
44 	}
45 
46 	return ret_code;
47 }
48 
49 /**
50  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
51  * @hw: pointer to the HW structure
52  * @access: NVM access type (read or write)
53  *
54  * This function will request NVM ownership for reading
55  * via the proper Admin Command.
56  **/
57 int i40e_acquire_nvm(struct i40e_hw *hw,
58 		     enum i40e_aq_resource_access_type access)
59 {
60 	u64 gtime, timeout;
61 	u64 time_left = 0;
62 	int ret_code = 0;
63 
64 	if (hw->nvm.blank_nvm_mode)
65 		goto i40e_i40e_acquire_nvm_exit;
66 
67 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
68 					    0, &time_left, NULL);
69 	/* Reading the Global Device Timer */
70 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
71 
72 	/* Store the timeout */
73 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
74 
75 	if (ret_code)
76 		i40e_debug(hw, I40E_DEBUG_NVM,
77 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
78 			   access, time_left, ret_code, hw->aq.asq_last_status);
79 
80 	if (ret_code && time_left) {
81 		/* Poll until the current NVM owner timeouts */
82 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
83 		while ((gtime < timeout) && time_left) {
84 			usleep_range(10000, 20000);
85 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
86 			ret_code = i40e_aq_request_resource(hw,
87 							I40E_NVM_RESOURCE_ID,
88 							access, 0, &time_left,
89 							NULL);
90 			if (!ret_code) {
91 				hw->nvm.hw_semaphore_timeout =
92 					    I40E_MS_TO_GTIME(time_left) + gtime;
93 				break;
94 			}
95 		}
96 		if (ret_code) {
97 			hw->nvm.hw_semaphore_timeout = 0;
98 			i40e_debug(hw, I40E_DEBUG_NVM,
99 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
100 				   time_left, ret_code, hw->aq.asq_last_status);
101 		}
102 	}
103 
104 i40e_i40e_acquire_nvm_exit:
105 	return ret_code;
106 }
107 
108 /**
109  * i40e_release_nvm - Generic request for releasing the NVM ownership
110  * @hw: pointer to the HW structure
111  *
112  * This function will release NVM resource via the proper Admin Command.
113  **/
114 void i40e_release_nvm(struct i40e_hw *hw)
115 {
116 	u32 total_delay = 0;
117 	int ret_code = 0;
118 
119 	if (hw->nvm.blank_nvm_mode)
120 		return;
121 
122 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
123 
124 	/* there are some rare cases when trying to release the resource
125 	 * results in an admin Q timeout, so handle them correctly
126 	 */
127 	while ((ret_code == -EIO) &&
128 	       (total_delay < hw->aq.asq_cmd_timeout)) {
129 		usleep_range(1000, 2000);
130 		ret_code = i40e_aq_release_resource(hw,
131 						    I40E_NVM_RESOURCE_ID,
132 						    0, NULL);
133 		total_delay++;
134 	}
135 }
136 
137 /**
138  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
139  * @hw: pointer to the HW structure
140  *
141  * Polls the SRCTL Shadow RAM register done bit.
142  **/
143 static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
144 {
145 	int ret_code = -EIO;
146 	u32 srctl, wait_cnt;
147 
148 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
149 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
150 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
151 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
152 			ret_code = 0;
153 			break;
154 		}
155 		udelay(5);
156 	}
157 	if (ret_code == -EIO)
158 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
159 	return ret_code;
160 }
161 
162 /**
163  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
164  * @hw: pointer to the HW structure
165  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
166  * @data: word read from the Shadow RAM
167  *
168  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
169  **/
170 static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
171 				    u16 *data)
172 {
173 	int ret_code = -EIO;
174 	u32 sr_reg;
175 
176 	if (offset >= hw->nvm.sr_size) {
177 		i40e_debug(hw, I40E_DEBUG_NVM,
178 			   "NVM read error: offset %d beyond Shadow RAM limit %d\n",
179 			   offset, hw->nvm.sr_size);
180 		ret_code = -EINVAL;
181 		goto read_nvm_exit;
182 	}
183 
184 	/* Poll the done bit first */
185 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
186 	if (!ret_code) {
187 		/* Write the address and start reading */
188 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
189 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
190 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
191 
192 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
193 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
194 		if (!ret_code) {
195 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
196 			*data = (u16)((sr_reg &
197 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
198 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
199 		}
200 	}
201 	if (ret_code)
202 		i40e_debug(hw, I40E_DEBUG_NVM,
203 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
204 			   offset);
205 
206 read_nvm_exit:
207 	return ret_code;
208 }
209 
210 /**
211  * i40e_read_nvm_aq - Read Shadow RAM.
212  * @hw: pointer to the HW structure.
213  * @module_pointer: module pointer location in words from the NVM beginning
214  * @offset: offset in words from module start
215  * @words: number of words to read
216  * @data: buffer with words to read to the Shadow RAM
217  * @last_command: tells the AdminQ that this is the last command
218  *
219  * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
220  **/
221 static int i40e_read_nvm_aq(struct i40e_hw *hw,
222 			    u8 module_pointer, u32 offset,
223 			    u16 words, void *data,
224 			    bool last_command)
225 {
226 	struct i40e_asq_cmd_details cmd_details;
227 	int ret_code = -EIO;
228 
229 	memset(&cmd_details, 0, sizeof(cmd_details));
230 	cmd_details.wb_desc = &hw->nvm_wb_desc;
231 
232 	/* Here we are checking the SR limit only for the flat memory model.
233 	 * We cannot do it for the module-based model, as we did not acquire
234 	 * the NVM resource yet (we cannot get the module pointer value).
235 	 * Firmware will check the module-based model.
236 	 */
237 	if ((offset + words) > hw->nvm.sr_size)
238 		i40e_debug(hw, I40E_DEBUG_NVM,
239 			   "NVM read error: offset %d beyond Shadow RAM limit %d\n",
240 			   (offset + words), hw->nvm.sr_size);
241 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
242 		/* We can read only up to 4KB (one sector), in one AQ write */
243 		i40e_debug(hw, I40E_DEBUG_NVM,
244 			   "NVM read fail error: tried to read %d words, limit is %d.\n",
245 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
246 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
247 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
248 		/* A single read cannot spread over two sectors */
249 		i40e_debug(hw, I40E_DEBUG_NVM,
250 			   "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
251 			   offset, words);
252 	else
253 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
254 					    2 * offset,  /*bytes*/
255 					    2 * words,   /*bytes*/
256 					    data, last_command, &cmd_details);
257 
258 	return ret_code;
259 }
260 
261 /**
262  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
263  * @hw: pointer to the HW structure
264  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
265  * @data: word read from the Shadow RAM
266  *
267  * Reads one 16 bit word from the Shadow RAM using the AdminQ
268  **/
269 static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
270 				 u16 *data)
271 {
272 	int ret_code = -EIO;
273 
274 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
275 	*data = le16_to_cpu(*(__le16 *)data);
276 
277 	return ret_code;
278 }
279 
280 /**
281  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
282  * @hw: pointer to the HW structure
283  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
284  * @data: word read from the Shadow RAM
285  *
286  * Reads one 16 bit word from the Shadow RAM.
287  *
288  * Do not use this function except in cases where the nvm lock is already
289  * taken via i40e_acquire_nvm().
290  **/
291 static int __i40e_read_nvm_word(struct i40e_hw *hw,
292 				u16 offset, u16 *data)
293 {
294 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
295 		return i40e_read_nvm_word_aq(hw, offset, data);
296 
297 	return i40e_read_nvm_word_srctl(hw, offset, data);
298 }
299 
300 /**
301  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
302  * @hw: pointer to the HW structure
303  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304  * @data: word read from the Shadow RAM
305  *
306  * Reads one 16 bit word from the Shadow RAM.
307  **/
308 int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
309 		       u16 *data)
310 {
311 	int ret_code = 0;
312 
313 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
314 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
315 	if (ret_code)
316 		return ret_code;
317 
318 	ret_code = __i40e_read_nvm_word(hw, offset, data);
319 
320 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
321 		i40e_release_nvm(hw);
322 
323 	return ret_code;
324 }
325 
326 /**
327  * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
328  * @hw: Pointer to the HW structure
329  * @module_ptr: Pointer to module in words with respect to NVM beginning
330  * @module_offset: Offset in words from module start
331  * @data_offset: Offset in words from reading data area start
332  * @words_data_size: Words to read from NVM
333  * @data_ptr: Pointer to memory location where resulting buffer will be stored
334  **/
335 int i40e_read_nvm_module_data(struct i40e_hw *hw,
336 			      u8 module_ptr,
337 			      u16 module_offset,
338 			      u16 data_offset,
339 			      u16 words_data_size,
340 			      u16 *data_ptr)
341 {
342 	u16 specific_ptr = 0;
343 	u16 ptr_value = 0;
344 	u32 offset = 0;
345 	int status;
346 
347 	if (module_ptr != 0) {
348 		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
349 		if (status) {
350 			i40e_debug(hw, I40E_DEBUG_ALL,
351 				   "Reading nvm word failed.Error code: %d.\n",
352 				   status);
353 			return -EIO;
354 		}
355 	}
356 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
357 #define I40E_NVM_INVALID_VAL 0xFFFF
358 
359 	/* Pointer not initialized */
360 	if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
361 	    ptr_value == I40E_NVM_INVALID_VAL) {
362 		i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
363 		return -EINVAL;
364 	}
365 
366 	/* Check whether the module is in SR mapped area or outside */
367 	if (ptr_value & I40E_PTR_TYPE) {
368 		/* Pointer points outside of the Shared RAM mapped area */
369 		i40e_debug(hw, I40E_DEBUG_ALL,
370 			   "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
371 
372 		return -EINVAL;
373 	} else {
374 		/* Read from the Shadow RAM */
375 
376 		status = i40e_read_nvm_word(hw, ptr_value + module_offset,
377 					    &specific_ptr);
378 		if (status) {
379 			i40e_debug(hw, I40E_DEBUG_ALL,
380 				   "Reading nvm word failed.Error code: %d.\n",
381 				   status);
382 			return -EIO;
383 		}
384 
385 		offset = ptr_value + module_offset + specific_ptr +
386 			data_offset;
387 
388 		status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
389 					      data_ptr);
390 		if (status) {
391 			i40e_debug(hw, I40E_DEBUG_ALL,
392 				   "Reading nvm buffer failed.Error code: %d.\n",
393 				   status);
394 		}
395 	}
396 
397 	return status;
398 }
399 
400 /**
401  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
402  * @hw: pointer to the HW structure
403  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
404  * @words: (in) number of words to read; (out) number of words actually read
405  * @data: words read from the Shadow RAM
406  *
407  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
408  * method. The buffer read is preceded by the NVM ownership take
409  * and followed by the release.
410  **/
411 static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
412 				      u16 *words, u16 *data)
413 {
414 	int ret_code = 0;
415 	u16 index, word;
416 
417 	/* Loop thru the selected region */
418 	for (word = 0; word < *words; word++) {
419 		index = offset + word;
420 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
421 		if (ret_code)
422 			break;
423 	}
424 
425 	/* Update the number of words read from the Shadow RAM */
426 	*words = word;
427 
428 	return ret_code;
429 }
430 
431 /**
432  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
433  * @hw: pointer to the HW structure
434  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
435  * @words: (in) number of words to read; (out) number of words actually read
436  * @data: words read from the Shadow RAM
437  *
438  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
439  * method. The buffer read is preceded by the NVM ownership take
440  * and followed by the release.
441  **/
442 static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
443 				   u16 *words, u16 *data)
444 {
445 	bool last_cmd = false;
446 	u16 words_read = 0;
447 	u16 read_size;
448 	int ret_code;
449 	u16 i = 0;
450 
451 	do {
452 		/* Calculate number of bytes we should read in this step.
453 		 * FVL AQ do not allow to read more than one page at a time or
454 		 * to cross page boundaries.
455 		 */
456 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
457 			read_size = min(*words,
458 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
459 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
460 		else
461 			read_size = min((*words - words_read),
462 					I40E_SR_SECTOR_SIZE_IN_WORDS);
463 
464 		/* Check if this is last command, if so set proper flag */
465 		if ((words_read + read_size) >= *words)
466 			last_cmd = true;
467 
468 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
469 					    data + words_read, last_cmd);
470 		if (ret_code)
471 			goto read_nvm_buffer_aq_exit;
472 
473 		/* Increment counter for words already read and move offset to
474 		 * new read location
475 		 */
476 		words_read += read_size;
477 		offset += read_size;
478 	} while (words_read < *words);
479 
480 	for (i = 0; i < *words; i++)
481 		data[i] = le16_to_cpu(((__le16 *)data)[i]);
482 
483 read_nvm_buffer_aq_exit:
484 	*words = words_read;
485 	return ret_code;
486 }
487 
488 /**
489  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
490  * @hw: pointer to the HW structure
491  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
492  * @words: (in) number of words to read; (out) number of words actually read
493  * @data: words read from the Shadow RAM
494  *
495  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
496  * method.
497  **/
498 static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
499 				  u16 offset, u16 *words,
500 				  u16 *data)
501 {
502 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
503 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
504 
505 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
506 }
507 
508 /**
509  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
510  * @hw: pointer to the HW structure
511  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
512  * @words: (in) number of words to read; (out) number of words actually read
513  * @data: words read from the Shadow RAM
514  *
515  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
516  * method. The buffer read is preceded by the NVM ownership take
517  * and followed by the release.
518  **/
519 int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
520 			 u16 *words, u16 *data)
521 {
522 	int ret_code = 0;
523 
524 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
525 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
526 		if (!ret_code) {
527 			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
528 							   data);
529 			i40e_release_nvm(hw);
530 		}
531 	} else {
532 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
533 	}
534 
535 	return ret_code;
536 }
537 
538 /**
539  * i40e_write_nvm_aq - Writes Shadow RAM.
540  * @hw: pointer to the HW structure.
541  * @module_pointer: module pointer location in words from the NVM beginning
542  * @offset: offset in words from module start
543  * @words: number of words to write
544  * @data: buffer with words to write to the Shadow RAM
545  * @last_command: tells the AdminQ that this is the last command
546  *
547  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
548  **/
549 static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
550 			     u32 offset, u16 words, void *data,
551 			     bool last_command)
552 {
553 	struct i40e_asq_cmd_details cmd_details;
554 	int ret_code = -EIO;
555 
556 	memset(&cmd_details, 0, sizeof(cmd_details));
557 	cmd_details.wb_desc = &hw->nvm_wb_desc;
558 
559 	/* Here we are checking the SR limit only for the flat memory model.
560 	 * We cannot do it for the module-based model, as we did not acquire
561 	 * the NVM resource yet (we cannot get the module pointer value).
562 	 * Firmware will check the module-based model.
563 	 */
564 	if ((offset + words) > hw->nvm.sr_size)
565 		i40e_debug(hw, I40E_DEBUG_NVM,
566 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
567 			   (offset + words), hw->nvm.sr_size);
568 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
569 		/* We can write only up to 4KB (one sector), in one AQ write */
570 		i40e_debug(hw, I40E_DEBUG_NVM,
571 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
572 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
573 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
574 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
575 		/* A single write cannot spread over two sectors */
576 		i40e_debug(hw, I40E_DEBUG_NVM,
577 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
578 			   offset, words);
579 	else
580 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
581 					      2 * offset,  /*bytes*/
582 					      2 * words,   /*bytes*/
583 					      data, last_command, 0,
584 					      &cmd_details);
585 
586 	return ret_code;
587 }
588 
589 /**
590  * i40e_calc_nvm_checksum - Calculates and returns the checksum
591  * @hw: pointer to hardware structure
592  * @checksum: pointer to the checksum
593  *
594  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
595  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
596  * is customer specific and unknown. Therefore, this function skips all maximum
597  * possible size of VPD (1kB).
598  **/
599 static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
600 				  u16 *checksum)
601 {
602 	struct i40e_virt_mem vmem;
603 	u16 pcie_alt_module = 0;
604 	u16 checksum_local = 0;
605 	u16 vpd_module = 0;
606 	int ret_code;
607 	u16 *data;
608 	u16 i = 0;
609 
610 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
611 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
612 	if (ret_code)
613 		goto i40e_calc_nvm_checksum_exit;
614 	data = (u16 *)vmem.va;
615 
616 	/* read pointer to VPD area */
617 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
618 	if (ret_code) {
619 		ret_code = -EIO;
620 		goto i40e_calc_nvm_checksum_exit;
621 	}
622 
623 	/* read pointer to PCIe Alt Auto-load module */
624 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
625 					&pcie_alt_module);
626 	if (ret_code) {
627 		ret_code = -EIO;
628 		goto i40e_calc_nvm_checksum_exit;
629 	}
630 
631 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
632 	 * except the VPD and PCIe ALT Auto-load modules
633 	 */
634 	for (i = 0; i < hw->nvm.sr_size; i++) {
635 		/* Read SR page */
636 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
637 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
638 
639 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
640 			if (ret_code) {
641 				ret_code = -EIO;
642 				goto i40e_calc_nvm_checksum_exit;
643 			}
644 		}
645 
646 		/* Skip Checksum word */
647 		if (i == I40E_SR_SW_CHECKSUM_WORD)
648 			continue;
649 		/* Skip VPD module (convert byte size to word count) */
650 		if ((i >= (u32)vpd_module) &&
651 		    (i < ((u32)vpd_module +
652 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
653 			continue;
654 		}
655 		/* Skip PCIe ALT module (convert byte size to word count) */
656 		if ((i >= (u32)pcie_alt_module) &&
657 		    (i < ((u32)pcie_alt_module +
658 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
659 			continue;
660 		}
661 
662 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
663 	}
664 
665 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
666 
667 i40e_calc_nvm_checksum_exit:
668 	i40e_free_virt_mem(hw, &vmem);
669 	return ret_code;
670 }
671 
672 /**
673  * i40e_update_nvm_checksum - Updates the NVM checksum
674  * @hw: pointer to hardware structure
675  *
676  * NVM ownership must be acquired before calling this function and released
677  * on ARQ completion event reception by caller.
678  * This function will commit SR to NVM.
679  **/
680 int i40e_update_nvm_checksum(struct i40e_hw *hw)
681 {
682 	__le16 le_sum;
683 	int ret_code;
684 	u16 checksum;
685 
686 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
687 	if (!ret_code) {
688 		le_sum = cpu_to_le16(checksum);
689 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
690 					     1, &le_sum, true);
691 	}
692 
693 	return ret_code;
694 }
695 
696 /**
697  * i40e_validate_nvm_checksum - Validate EEPROM checksum
698  * @hw: pointer to hardware structure
699  * @checksum: calculated checksum
700  *
701  * Performs checksum calculation and validates the NVM SW checksum. If the
702  * caller does not need checksum, the value can be NULL.
703  **/
704 int i40e_validate_nvm_checksum(struct i40e_hw *hw,
705 			       u16 *checksum)
706 {
707 	u16 checksum_local = 0;
708 	u16 checksum_sr = 0;
709 	int ret_code = 0;
710 
711 	/* We must acquire the NVM lock in order to correctly synchronize the
712 	 * NVM accesses across multiple PFs. Without doing so it is possible
713 	 * for one of the PFs to read invalid data potentially indicating that
714 	 * the checksum is invalid.
715 	 */
716 	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
717 	if (ret_code)
718 		return ret_code;
719 	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
720 	__i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
721 	i40e_release_nvm(hw);
722 	if (ret_code)
723 		return ret_code;
724 
725 	/* Verify read checksum from EEPROM is the same as
726 	 * calculated checksum
727 	 */
728 	if (checksum_local != checksum_sr)
729 		ret_code = -EIO;
730 
731 	/* If the user cares, return the calculated checksum */
732 	if (checksum)
733 		*checksum = checksum_local;
734 
735 	return ret_code;
736 }
737 
738 static int i40e_nvmupd_state_init(struct i40e_hw *hw,
739 				  struct i40e_nvm_access *cmd,
740 				  u8 *bytes, int *perrno);
741 static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
742 				     struct i40e_nvm_access *cmd,
743 				     u8 *bytes, int *perrno);
744 static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
745 				     struct i40e_nvm_access *cmd,
746 				     u8 *bytes, int *errno);
747 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
748 						struct i40e_nvm_access *cmd,
749 						int *perrno);
750 static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
751 				 struct i40e_nvm_access *cmd,
752 				 int *perrno);
753 static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
754 				 struct i40e_nvm_access *cmd,
755 				 u8 *bytes, int *perrno);
756 static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
757 				struct i40e_nvm_access *cmd,
758 				u8 *bytes, int *perrno);
759 static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
760 			       struct i40e_nvm_access *cmd,
761 			       u8 *bytes, int *perrno);
762 static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
763 				     struct i40e_nvm_access *cmd,
764 				     u8 *bytes, int *perrno);
765 static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
766 				    struct i40e_nvm_access *cmd,
767 				    u8 *bytes, int *perrno);
768 static inline u8 i40e_nvmupd_get_module(u32 val)
769 {
770 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
771 }
772 static inline u8 i40e_nvmupd_get_transaction(u32 val)
773 {
774 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
775 }
776 
777 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
778 {
779 	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
780 		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
781 }
782 
783 static const char * const i40e_nvm_update_state_str[] = {
784 	"I40E_NVMUPD_INVALID",
785 	"I40E_NVMUPD_READ_CON",
786 	"I40E_NVMUPD_READ_SNT",
787 	"I40E_NVMUPD_READ_LCB",
788 	"I40E_NVMUPD_READ_SA",
789 	"I40E_NVMUPD_WRITE_ERA",
790 	"I40E_NVMUPD_WRITE_CON",
791 	"I40E_NVMUPD_WRITE_SNT",
792 	"I40E_NVMUPD_WRITE_LCB",
793 	"I40E_NVMUPD_WRITE_SA",
794 	"I40E_NVMUPD_CSUM_CON",
795 	"I40E_NVMUPD_CSUM_SA",
796 	"I40E_NVMUPD_CSUM_LCB",
797 	"I40E_NVMUPD_STATUS",
798 	"I40E_NVMUPD_EXEC_AQ",
799 	"I40E_NVMUPD_GET_AQ_RESULT",
800 	"I40E_NVMUPD_GET_AQ_EVENT",
801 };
802 
803 /**
804  * i40e_nvmupd_command - Process an NVM update command
805  * @hw: pointer to hardware structure
806  * @cmd: pointer to nvm update command
807  * @bytes: pointer to the data buffer
808  * @perrno: pointer to return error code
809  *
810  * Dispatches command depending on what update state is current
811  **/
812 int i40e_nvmupd_command(struct i40e_hw *hw,
813 			struct i40e_nvm_access *cmd,
814 			u8 *bytes, int *perrno)
815 {
816 	enum i40e_nvmupd_cmd upd_cmd;
817 	int status;
818 
819 	/* assume success */
820 	*perrno = 0;
821 
822 	/* early check for status command and debug msgs */
823 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
824 
825 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
826 		   i40e_nvm_update_state_str[upd_cmd],
827 		   hw->nvmupd_state,
828 		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
829 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
830 
831 	if (upd_cmd == I40E_NVMUPD_INVALID) {
832 		*perrno = -EFAULT;
833 		i40e_debug(hw, I40E_DEBUG_NVM,
834 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
835 			   upd_cmd, *perrno);
836 	}
837 
838 	/* a status request returns immediately rather than
839 	 * going into the state machine
840 	 */
841 	if (upd_cmd == I40E_NVMUPD_STATUS) {
842 		if (!cmd->data_size) {
843 			*perrno = -EFAULT;
844 			return -EINVAL;
845 		}
846 
847 		bytes[0] = hw->nvmupd_state;
848 
849 		if (cmd->data_size >= 4) {
850 			bytes[1] = 0;
851 			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
852 		}
853 
854 		/* Clear error status on read */
855 		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
856 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
857 
858 		return 0;
859 	}
860 
861 	/* Clear status even it is not read and log */
862 	if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
863 		i40e_debug(hw, I40E_DEBUG_NVM,
864 			   "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
865 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
866 	}
867 
868 	/* Acquire lock to prevent race condition where adminq_task
869 	 * can execute after i40e_nvmupd_nvm_read/write but before state
870 	 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
871 	 *
872 	 * During NVMUpdate, it is observed that lock could be held for
873 	 * ~5ms for most commands. However lock is held for ~60ms for
874 	 * NVMUPD_CSUM_LCB command.
875 	 */
876 	mutex_lock(&hw->aq.arq_mutex);
877 	switch (hw->nvmupd_state) {
878 	case I40E_NVMUPD_STATE_INIT:
879 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
880 		break;
881 
882 	case I40E_NVMUPD_STATE_READING:
883 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
884 		break;
885 
886 	case I40E_NVMUPD_STATE_WRITING:
887 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
888 		break;
889 
890 	case I40E_NVMUPD_STATE_INIT_WAIT:
891 	case I40E_NVMUPD_STATE_WRITE_WAIT:
892 		/* if we need to stop waiting for an event, clear
893 		 * the wait info and return before doing anything else
894 		 */
895 		if (cmd->offset == 0xffff) {
896 			i40e_nvmupd_clear_wait_state(hw);
897 			status = 0;
898 			break;
899 		}
900 
901 		status = -EBUSY;
902 		*perrno = -EBUSY;
903 		break;
904 
905 	default:
906 		/* invalid state, should never happen */
907 		i40e_debug(hw, I40E_DEBUG_NVM,
908 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
909 		status = -EOPNOTSUPP;
910 		*perrno = -ESRCH;
911 		break;
912 	}
913 
914 	mutex_unlock(&hw->aq.arq_mutex);
915 	return status;
916 }
917 
918 /**
919  * i40e_nvmupd_state_init - Handle NVM update state Init
920  * @hw: pointer to hardware structure
921  * @cmd: pointer to nvm update command buffer
922  * @bytes: pointer to the data buffer
923  * @perrno: pointer to return error code
924  *
925  * Process legitimate commands of the Init state and conditionally set next
926  * state. Reject all other commands.
927  **/
928 static int i40e_nvmupd_state_init(struct i40e_hw *hw,
929 				  struct i40e_nvm_access *cmd,
930 				  u8 *bytes, int *perrno)
931 {
932 	enum i40e_nvmupd_cmd upd_cmd;
933 	int status = 0;
934 
935 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
936 
937 	switch (upd_cmd) {
938 	case I40E_NVMUPD_READ_SA:
939 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
940 		if (status) {
941 			*perrno = i40e_aq_rc_to_posix(status,
942 						     hw->aq.asq_last_status);
943 		} else {
944 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
945 			i40e_release_nvm(hw);
946 		}
947 		break;
948 
949 	case I40E_NVMUPD_READ_SNT:
950 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
951 		if (status) {
952 			*perrno = i40e_aq_rc_to_posix(status,
953 						     hw->aq.asq_last_status);
954 		} else {
955 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
956 			if (status)
957 				i40e_release_nvm(hw);
958 			else
959 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
960 		}
961 		break;
962 
963 	case I40E_NVMUPD_WRITE_ERA:
964 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
965 		if (status) {
966 			*perrno = i40e_aq_rc_to_posix(status,
967 						     hw->aq.asq_last_status);
968 		} else {
969 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
970 			if (status) {
971 				i40e_release_nvm(hw);
972 			} else {
973 				hw->nvm_release_on_done = true;
974 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
975 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
976 			}
977 		}
978 		break;
979 
980 	case I40E_NVMUPD_WRITE_SA:
981 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
982 		if (status) {
983 			*perrno = i40e_aq_rc_to_posix(status,
984 						     hw->aq.asq_last_status);
985 		} else {
986 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
987 			if (status) {
988 				i40e_release_nvm(hw);
989 			} else {
990 				hw->nvm_release_on_done = true;
991 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
992 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
993 			}
994 		}
995 		break;
996 
997 	case I40E_NVMUPD_WRITE_SNT:
998 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
999 		if (status) {
1000 			*perrno = i40e_aq_rc_to_posix(status,
1001 						     hw->aq.asq_last_status);
1002 		} else {
1003 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1004 			if (status) {
1005 				i40e_release_nvm(hw);
1006 			} else {
1007 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1008 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1009 			}
1010 		}
1011 		break;
1012 
1013 	case I40E_NVMUPD_CSUM_SA:
1014 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1015 		if (status) {
1016 			*perrno = i40e_aq_rc_to_posix(status,
1017 						     hw->aq.asq_last_status);
1018 		} else {
1019 			status = i40e_update_nvm_checksum(hw);
1020 			if (status) {
1021 				*perrno = hw->aq.asq_last_status ?
1022 				   i40e_aq_rc_to_posix(status,
1023 						       hw->aq.asq_last_status) :
1024 				   -EIO;
1025 				i40e_release_nvm(hw);
1026 			} else {
1027 				hw->nvm_release_on_done = true;
1028 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1029 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1030 			}
1031 		}
1032 		break;
1033 
1034 	case I40E_NVMUPD_EXEC_AQ:
1035 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1036 		break;
1037 
1038 	case I40E_NVMUPD_GET_AQ_RESULT:
1039 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1040 		break;
1041 
1042 	case I40E_NVMUPD_GET_AQ_EVENT:
1043 		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1044 		break;
1045 
1046 	default:
1047 		i40e_debug(hw, I40E_DEBUG_NVM,
1048 			   "NVMUPD: bad cmd %s in init state\n",
1049 			   i40e_nvm_update_state_str[upd_cmd]);
1050 		status = -EIO;
1051 		*perrno = -ESRCH;
1052 		break;
1053 	}
1054 	return status;
1055 }
1056 
1057 /**
1058  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1059  * @hw: pointer to hardware structure
1060  * @cmd: pointer to nvm update command buffer
1061  * @bytes: pointer to the data buffer
1062  * @perrno: pointer to return error code
1063  *
1064  * NVM ownership is already held.  Process legitimate commands and set any
1065  * change in state; reject all other commands.
1066  **/
1067 static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
1068 				     struct i40e_nvm_access *cmd,
1069 				     u8 *bytes, int *perrno)
1070 {
1071 	enum i40e_nvmupd_cmd upd_cmd;
1072 	int status = 0;
1073 
1074 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1075 
1076 	switch (upd_cmd) {
1077 	case I40E_NVMUPD_READ_SA:
1078 	case I40E_NVMUPD_READ_CON:
1079 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1080 		break;
1081 
1082 	case I40E_NVMUPD_READ_LCB:
1083 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1084 		i40e_release_nvm(hw);
1085 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1086 		break;
1087 
1088 	default:
1089 		i40e_debug(hw, I40E_DEBUG_NVM,
1090 			   "NVMUPD: bad cmd %s in reading state.\n",
1091 			   i40e_nvm_update_state_str[upd_cmd]);
1092 		status = -EOPNOTSUPP;
1093 		*perrno = -ESRCH;
1094 		break;
1095 	}
1096 	return status;
1097 }
1098 
1099 /**
1100  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1101  * @hw: pointer to hardware structure
1102  * @cmd: pointer to nvm update command buffer
1103  * @bytes: pointer to the data buffer
1104  * @perrno: pointer to return error code
1105  *
1106  * NVM ownership is already held.  Process legitimate commands and set any
1107  * change in state; reject all other commands
1108  **/
1109 static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
1110 				     struct i40e_nvm_access *cmd,
1111 				     u8 *bytes, int *perrno)
1112 {
1113 	enum i40e_nvmupd_cmd upd_cmd;
1114 	bool retry_attempt = false;
1115 	int status = 0;
1116 
1117 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1118 
1119 retry:
1120 	switch (upd_cmd) {
1121 	case I40E_NVMUPD_WRITE_CON:
1122 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1123 		if (!status) {
1124 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1125 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1126 		}
1127 		break;
1128 
1129 	case I40E_NVMUPD_WRITE_LCB:
1130 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1131 		if (status) {
1132 			*perrno = hw->aq.asq_last_status ?
1133 				   i40e_aq_rc_to_posix(status,
1134 						       hw->aq.asq_last_status) :
1135 				   -EIO;
1136 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1137 		} else {
1138 			hw->nvm_release_on_done = true;
1139 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1140 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1141 		}
1142 		break;
1143 
1144 	case I40E_NVMUPD_CSUM_CON:
1145 		/* Assumes the caller has acquired the nvm */
1146 		status = i40e_update_nvm_checksum(hw);
1147 		if (status) {
1148 			*perrno = hw->aq.asq_last_status ?
1149 				   i40e_aq_rc_to_posix(status,
1150 						       hw->aq.asq_last_status) :
1151 				   -EIO;
1152 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1153 		} else {
1154 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1155 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1156 		}
1157 		break;
1158 
1159 	case I40E_NVMUPD_CSUM_LCB:
1160 		/* Assumes the caller has acquired the nvm */
1161 		status = i40e_update_nvm_checksum(hw);
1162 		if (status) {
1163 			*perrno = hw->aq.asq_last_status ?
1164 				   i40e_aq_rc_to_posix(status,
1165 						       hw->aq.asq_last_status) :
1166 				   -EIO;
1167 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1168 		} else {
1169 			hw->nvm_release_on_done = true;
1170 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1171 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1172 		}
1173 		break;
1174 
1175 	default:
1176 		i40e_debug(hw, I40E_DEBUG_NVM,
1177 			   "NVMUPD: bad cmd %s in writing state.\n",
1178 			   i40e_nvm_update_state_str[upd_cmd]);
1179 		status = -EOPNOTSUPP;
1180 		*perrno = -ESRCH;
1181 		break;
1182 	}
1183 
1184 	/* In some circumstances, a multi-write transaction takes longer
1185 	 * than the default 3 minute timeout on the write semaphore.  If
1186 	 * the write failed with an EBUSY status, this is likely the problem,
1187 	 * so here we try to reacquire the semaphore then retry the write.
1188 	 * We only do one retry, then give up.
1189 	 */
1190 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1191 	    !retry_attempt) {
1192 		u32 old_asq_status = hw->aq.asq_last_status;
1193 		int old_status = status;
1194 		u32 gtime;
1195 
1196 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1197 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1198 			i40e_debug(hw, I40E_DEBUG_ALL,
1199 				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1200 				   gtime, hw->nvm.hw_semaphore_timeout);
1201 			i40e_release_nvm(hw);
1202 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1203 			if (status) {
1204 				i40e_debug(hw, I40E_DEBUG_ALL,
1205 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1206 					   hw->aq.asq_last_status);
1207 				status = old_status;
1208 				hw->aq.asq_last_status = old_asq_status;
1209 			} else {
1210 				retry_attempt = true;
1211 				goto retry;
1212 			}
1213 		}
1214 	}
1215 
1216 	return status;
1217 }
1218 
1219 /**
1220  * i40e_nvmupd_clear_wait_state - clear wait state on hw
1221  * @hw: pointer to the hardware structure
1222  **/
1223 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1224 {
1225 	i40e_debug(hw, I40E_DEBUG_NVM,
1226 		   "NVMUPD: clearing wait on opcode 0x%04x\n",
1227 		   hw->nvm_wait_opcode);
1228 
1229 	if (hw->nvm_release_on_done) {
1230 		i40e_release_nvm(hw);
1231 		hw->nvm_release_on_done = false;
1232 	}
1233 	hw->nvm_wait_opcode = 0;
1234 
1235 	if (hw->aq.arq_last_status) {
1236 		hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1237 		return;
1238 	}
1239 
1240 	switch (hw->nvmupd_state) {
1241 	case I40E_NVMUPD_STATE_INIT_WAIT:
1242 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1243 		break;
1244 
1245 	case I40E_NVMUPD_STATE_WRITE_WAIT:
1246 		hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1247 		break;
1248 
1249 	default:
1250 		break;
1251 	}
1252 }
1253 
1254 /**
1255  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1256  * @hw: pointer to the hardware structure
1257  * @opcode: the event that just happened
1258  * @desc: AdminQ descriptor
1259  **/
1260 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1261 				  struct i40e_aq_desc *desc)
1262 {
1263 	u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1264 
1265 	if (opcode == hw->nvm_wait_opcode) {
1266 		memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1267 		i40e_nvmupd_clear_wait_state(hw);
1268 	}
1269 }
1270 
1271 /**
1272  * i40e_nvmupd_validate_command - Validate given command
1273  * @hw: pointer to hardware structure
1274  * @cmd: pointer to nvm update command buffer
1275  * @perrno: pointer to return error code
1276  *
1277  * Return one of the valid command types or I40E_NVMUPD_INVALID
1278  **/
1279 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1280 						 struct i40e_nvm_access *cmd,
1281 						 int *perrno)
1282 {
1283 	enum i40e_nvmupd_cmd upd_cmd;
1284 	u8 module, transaction;
1285 
1286 	/* anything that doesn't match a recognized case is an error */
1287 	upd_cmd = I40E_NVMUPD_INVALID;
1288 
1289 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1290 	module = i40e_nvmupd_get_module(cmd->config);
1291 
1292 	/* limits on data size */
1293 	if ((cmd->data_size < 1) ||
1294 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1295 		i40e_debug(hw, I40E_DEBUG_NVM,
1296 			   "i40e_nvmupd_validate_command data_size %d\n",
1297 			   cmd->data_size);
1298 		*perrno = -EFAULT;
1299 		return I40E_NVMUPD_INVALID;
1300 	}
1301 
1302 	switch (cmd->command) {
1303 	case I40E_NVM_READ:
1304 		switch (transaction) {
1305 		case I40E_NVM_CON:
1306 			upd_cmd = I40E_NVMUPD_READ_CON;
1307 			break;
1308 		case I40E_NVM_SNT:
1309 			upd_cmd = I40E_NVMUPD_READ_SNT;
1310 			break;
1311 		case I40E_NVM_LCB:
1312 			upd_cmd = I40E_NVMUPD_READ_LCB;
1313 			break;
1314 		case I40E_NVM_SA:
1315 			upd_cmd = I40E_NVMUPD_READ_SA;
1316 			break;
1317 		case I40E_NVM_EXEC:
1318 			if (module == 0xf)
1319 				upd_cmd = I40E_NVMUPD_STATUS;
1320 			else if (module == 0)
1321 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1322 			break;
1323 		case I40E_NVM_AQE:
1324 			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1325 			break;
1326 		}
1327 		break;
1328 
1329 	case I40E_NVM_WRITE:
1330 		switch (transaction) {
1331 		case I40E_NVM_CON:
1332 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1333 			break;
1334 		case I40E_NVM_SNT:
1335 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1336 			break;
1337 		case I40E_NVM_LCB:
1338 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1339 			break;
1340 		case I40E_NVM_SA:
1341 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1342 			break;
1343 		case I40E_NVM_ERA:
1344 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1345 			break;
1346 		case I40E_NVM_CSUM:
1347 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1348 			break;
1349 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1350 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1351 			break;
1352 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1353 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1354 			break;
1355 		case I40E_NVM_EXEC:
1356 			if (module == 0)
1357 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1358 			break;
1359 		}
1360 		break;
1361 	}
1362 
1363 	return upd_cmd;
1364 }
1365 
1366 /**
1367  * i40e_nvmupd_exec_aq - Run an AQ command
1368  * @hw: pointer to hardware structure
1369  * @cmd: pointer to nvm update command buffer
1370  * @bytes: pointer to the data buffer
1371  * @perrno: pointer to return error code
1372  *
1373  * cmd structure contains identifiers and data buffer
1374  **/
1375 static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1376 			       struct i40e_nvm_access *cmd,
1377 			       u8 *bytes, int *perrno)
1378 {
1379 	struct i40e_asq_cmd_details cmd_details;
1380 	struct i40e_aq_desc *aq_desc;
1381 	u32 buff_size = 0;
1382 	u8 *buff = NULL;
1383 	u32 aq_desc_len;
1384 	u32 aq_data_len;
1385 	int status;
1386 
1387 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1388 	if (cmd->offset == 0xffff)
1389 		return 0;
1390 
1391 	memset(&cmd_details, 0, sizeof(cmd_details));
1392 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1393 
1394 	aq_desc_len = sizeof(struct i40e_aq_desc);
1395 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1396 
1397 	/* get the aq descriptor */
1398 	if (cmd->data_size < aq_desc_len) {
1399 		i40e_debug(hw, I40E_DEBUG_NVM,
1400 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1401 			   cmd->data_size, aq_desc_len);
1402 		*perrno = -EINVAL;
1403 		return -EINVAL;
1404 	}
1405 	aq_desc = (struct i40e_aq_desc *)bytes;
1406 
1407 	/* if data buffer needed, make sure it's ready */
1408 	aq_data_len = cmd->data_size - aq_desc_len;
1409 	buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1410 	if (buff_size) {
1411 		if (!hw->nvm_buff.va) {
1412 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1413 							hw->aq.asq_buf_size);
1414 			if (status)
1415 				i40e_debug(hw, I40E_DEBUG_NVM,
1416 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1417 					   status);
1418 		}
1419 
1420 		if (hw->nvm_buff.va) {
1421 			buff = hw->nvm_buff.va;
1422 			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1423 		}
1424 	}
1425 
1426 	if (cmd->offset)
1427 		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1428 
1429 	/* and away we go! */
1430 	status = i40e_asq_send_command(hw, aq_desc, buff,
1431 				       buff_size, &cmd_details);
1432 	if (status) {
1433 		i40e_debug(hw, I40E_DEBUG_NVM,
1434 			   "%s err %pe aq_err %s\n",
1435 			   __func__, ERR_PTR(status),
1436 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1437 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1438 		return status;
1439 	}
1440 
1441 	/* should we wait for a followup event? */
1442 	if (cmd->offset) {
1443 		hw->nvm_wait_opcode = cmd->offset;
1444 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1445 	}
1446 
1447 	return status;
1448 }
1449 
1450 /**
1451  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1452  * @hw: pointer to hardware structure
1453  * @cmd: pointer to nvm update command buffer
1454  * @bytes: pointer to the data buffer
1455  * @perrno: pointer to return error code
1456  *
1457  * cmd structure contains identifiers and data buffer
1458  **/
1459 static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1460 				     struct i40e_nvm_access *cmd,
1461 				     u8 *bytes, int *perrno)
1462 {
1463 	u32 aq_total_len;
1464 	u32 aq_desc_len;
1465 	int remainder;
1466 	u8 *buff;
1467 
1468 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1469 
1470 	aq_desc_len = sizeof(struct i40e_aq_desc);
1471 	aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1472 
1473 	/* check offset range */
1474 	if (cmd->offset > aq_total_len) {
1475 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1476 			   __func__, cmd->offset, aq_total_len);
1477 		*perrno = -EINVAL;
1478 		return -EINVAL;
1479 	}
1480 
1481 	/* check copylength range */
1482 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1483 		int new_len = aq_total_len - cmd->offset;
1484 
1485 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1486 			   __func__, cmd->data_size, new_len);
1487 		cmd->data_size = new_len;
1488 	}
1489 
1490 	remainder = cmd->data_size;
1491 	if (cmd->offset < aq_desc_len) {
1492 		u32 len = aq_desc_len - cmd->offset;
1493 
1494 		len = min(len, cmd->data_size);
1495 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1496 			   __func__, cmd->offset, cmd->offset + len);
1497 
1498 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1499 		memcpy(bytes, buff, len);
1500 
1501 		bytes += len;
1502 		remainder -= len;
1503 		buff = hw->nvm_buff.va;
1504 	} else {
1505 		buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1506 	}
1507 
1508 	if (remainder > 0) {
1509 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1510 
1511 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1512 			   __func__, start_byte, start_byte + remainder);
1513 		memcpy(bytes, buff, remainder);
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 /**
1520  * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1521  * @hw: pointer to hardware structure
1522  * @cmd: pointer to nvm update command buffer
1523  * @bytes: pointer to the data buffer
1524  * @perrno: pointer to return error code
1525  *
1526  * cmd structure contains identifiers and data buffer
1527  **/
1528 static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1529 				    struct i40e_nvm_access *cmd,
1530 				    u8 *bytes, int *perrno)
1531 {
1532 	u32 aq_total_len;
1533 	u32 aq_desc_len;
1534 
1535 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1536 
1537 	aq_desc_len = sizeof(struct i40e_aq_desc);
1538 	aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1539 
1540 	/* check copylength range */
1541 	if (cmd->data_size > aq_total_len) {
1542 		i40e_debug(hw, I40E_DEBUG_NVM,
1543 			   "%s: copy length %d too big, trimming to %d\n",
1544 			   __func__, cmd->data_size, aq_total_len);
1545 		cmd->data_size = aq_total_len;
1546 	}
1547 
1548 	memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1549 
1550 	return 0;
1551 }
1552 
1553 /**
1554  * i40e_nvmupd_nvm_read - Read NVM
1555  * @hw: pointer to hardware structure
1556  * @cmd: pointer to nvm update command buffer
1557  * @bytes: pointer to the data buffer
1558  * @perrno: pointer to return error code
1559  *
1560  * cmd structure contains identifiers and data buffer
1561  **/
1562 static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1563 				struct i40e_nvm_access *cmd,
1564 				u8 *bytes, int *perrno)
1565 {
1566 	struct i40e_asq_cmd_details cmd_details;
1567 	u8 module, transaction;
1568 	int status;
1569 	bool last;
1570 
1571 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1572 	module = i40e_nvmupd_get_module(cmd->config);
1573 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1574 
1575 	memset(&cmd_details, 0, sizeof(cmd_details));
1576 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1577 
1578 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1579 				  bytes, last, &cmd_details);
1580 	if (status) {
1581 		i40e_debug(hw, I40E_DEBUG_NVM,
1582 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1583 			   module, cmd->offset, cmd->data_size);
1584 		i40e_debug(hw, I40E_DEBUG_NVM,
1585 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1586 			   status, hw->aq.asq_last_status);
1587 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1588 	}
1589 
1590 	return status;
1591 }
1592 
1593 /**
1594  * i40e_nvmupd_nvm_erase - Erase an NVM module
1595  * @hw: pointer to hardware structure
1596  * @cmd: pointer to nvm update command buffer
1597  * @perrno: pointer to return error code
1598  *
1599  * module, offset, data_size and data are in cmd structure
1600  **/
1601 static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1602 				 struct i40e_nvm_access *cmd,
1603 				 int *perrno)
1604 {
1605 	struct i40e_asq_cmd_details cmd_details;
1606 	u8 module, transaction;
1607 	int status = 0;
1608 	bool last;
1609 
1610 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1611 	module = i40e_nvmupd_get_module(cmd->config);
1612 	last = (transaction & I40E_NVM_LCB);
1613 
1614 	memset(&cmd_details, 0, sizeof(cmd_details));
1615 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1616 
1617 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1618 				   last, &cmd_details);
1619 	if (status) {
1620 		i40e_debug(hw, I40E_DEBUG_NVM,
1621 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1622 			   module, cmd->offset, cmd->data_size);
1623 		i40e_debug(hw, I40E_DEBUG_NVM,
1624 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1625 			   status, hw->aq.asq_last_status);
1626 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1627 	}
1628 
1629 	return status;
1630 }
1631 
1632 /**
1633  * i40e_nvmupd_nvm_write - Write NVM
1634  * @hw: pointer to hardware structure
1635  * @cmd: pointer to nvm update command buffer
1636  * @bytes: pointer to the data buffer
1637  * @perrno: pointer to return error code
1638  *
1639  * module, offset, data_size and data are in cmd structure
1640  **/
1641 static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1642 				 struct i40e_nvm_access *cmd,
1643 				 u8 *bytes, int *perrno)
1644 {
1645 	struct i40e_asq_cmd_details cmd_details;
1646 	u8 module, transaction;
1647 	u8 preservation_flags;
1648 	int status = 0;
1649 	bool last;
1650 
1651 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1652 	module = i40e_nvmupd_get_module(cmd->config);
1653 	last = (transaction & I40E_NVM_LCB);
1654 	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1655 
1656 	memset(&cmd_details, 0, sizeof(cmd_details));
1657 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1658 
1659 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1660 				    (u16)cmd->data_size, bytes, last,
1661 				    preservation_flags, &cmd_details);
1662 	if (status) {
1663 		i40e_debug(hw, I40E_DEBUG_NVM,
1664 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1665 			   module, cmd->offset, cmd->data_size);
1666 		i40e_debug(hw, I40E_DEBUG_NVM,
1667 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1668 			   status, hw->aq.asq_last_status);
1669 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1670 	}
1671 
1672 	return status;
1673 }
1674