xref: /titanic_44/usr/src/uts/common/io/i40e/core/i40e_nvm.c (revision 8f179fb3a98d55af8ab2edfe08bf5669ca1f9d30)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_prototype.h"
36 
37 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 					       u16 *data);
39 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 					    u16 *data);
41 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
42 						 u16 *words, u16 *data);
43 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
44 					      u16 *words, u16 *data);
45 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
46 				       u32 offset, u16 words, void *data,
47 				       bool last_command);
48 
49 /**
50  * i40e_init_nvm_ops - Initialize NVM function pointers
51  * @hw: pointer to the HW structure
52  *
53  * Setup the function pointers and the NVM info structure. Should be called
54  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
55  * Please notice that the NVM term is used here (& in all methods covered
56  * in this file) as an equivalent of the FLASH part mapped into the SR.
57  * We are accessing FLASH always through the Shadow RAM.
58  **/
i40e_init_nvm(struct i40e_hw * hw)59 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 {
61 	struct i40e_nvm_info *nvm = &hw->nvm;
62 	enum i40e_status_code ret_code = I40E_SUCCESS;
63 	u32 fla, gens;
64 	u8 sr_size;
65 
66 	DEBUGFUNC("i40e_init_nvm");
67 
68 	/* The SR size is stored regardless of the nvm programming mode
69 	 * as the blank mode may be used in the factory line.
70 	 */
71 	gens = rd32(hw, I40E_GLNVM_GENS);
72 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
73 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
74 	/* Switching to words (sr_size contains power of 2KB) */
75 	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 
77 	/* Check if we are in the normal or blank NVM programming mode */
78 	fla = rd32(hw, I40E_GLNVM_FLA);
79 	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 		/* Max NVM timeout */
81 		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
82 		nvm->blank_nvm_mode = FALSE;
83 	} else { /* Blank programming mode */
84 		nvm->blank_nvm_mode = TRUE;
85 		ret_code = I40E_ERR_NVM_BLANK_MODE;
86 		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
87 	}
88 
89 	return ret_code;
90 }
91 
92 /**
93  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
94  * @hw: pointer to the HW structure
95  * @access: NVM access type (read or write)
96  *
97  * This function will request NVM ownership for reading
98  * via the proper Admin Command.
99  **/
i40e_acquire_nvm(struct i40e_hw * hw,enum i40e_aq_resource_access_type access)100 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
101 				       enum i40e_aq_resource_access_type access)
102 {
103 	enum i40e_status_code ret_code = I40E_SUCCESS;
104 	u64 gtime, timeout;
105 	u64 time_left = 0;
106 
107 	DEBUGFUNC("i40e_acquire_nvm");
108 
109 	if (hw->nvm.blank_nvm_mode)
110 		goto i40e_i40e_acquire_nvm_exit;
111 
112 	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
113 					    0, &time_left, NULL);
114 	/* Reading the Global Device Timer */
115 	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 
117 	/* Store the timeout */
118 	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
119 
120 	if (ret_code)
121 		i40e_debug(hw, I40E_DEBUG_NVM,
122 			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
123 			   access, time_left, ret_code, hw->aq.asq_last_status);
124 
125 	if (ret_code && time_left) {
126 		/* Poll until the current NVM owner timeouts */
127 		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
128 		while ((gtime < timeout) && time_left) {
129 			i40e_msec_delay(10);
130 			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
131 			ret_code = i40e_aq_request_resource(hw,
132 							I40E_NVM_RESOURCE_ID,
133 							access, 0, &time_left,
134 							NULL);
135 			if (ret_code == I40E_SUCCESS) {
136 				hw->nvm.hw_semaphore_timeout =
137 					    I40E_MS_TO_GTIME(time_left) + gtime;
138 				break;
139 			}
140 		}
141 		if (ret_code != I40E_SUCCESS) {
142 			hw->nvm.hw_semaphore_timeout = 0;
143 			i40e_debug(hw, I40E_DEBUG_NVM,
144 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
145 				   time_left, ret_code, hw->aq.asq_last_status);
146 		}
147 	}
148 
149 i40e_i40e_acquire_nvm_exit:
150 	return ret_code;
151 }
152 
153 /**
154  * i40e_release_nvm - Generic request for releasing the NVM ownership
155  * @hw: pointer to the HW structure
156  *
157  * This function will release NVM resource via the proper Admin Command.
158  **/
i40e_release_nvm(struct i40e_hw * hw)159 void i40e_release_nvm(struct i40e_hw *hw)
160 {
161 	enum i40e_status_code ret_code = I40E_SUCCESS;
162 	u32 total_delay = 0;
163 
164 	DEBUGFUNC("i40e_release_nvm");
165 
166 	if (hw->nvm.blank_nvm_mode)
167 		return;
168 
169 	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 
171 	/* there are some rare cases when trying to release the resource
172 	 * results in an admin Q timeout, so handle them correctly
173 	 */
174 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
175 	       (total_delay < hw->aq.asq_cmd_timeout)) {
176 			i40e_msec_delay(1);
177 			ret_code = i40e_aq_release_resource(hw,
178 						I40E_NVM_RESOURCE_ID, 0, NULL);
179 			total_delay++;
180 	}
181 }
182 
183 /**
184  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
185  * @hw: pointer to the HW structure
186  *
187  * Polls the SRCTL Shadow RAM register done bit.
188  **/
i40e_poll_sr_srctl_done_bit(struct i40e_hw * hw)189 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 {
191 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
192 	u32 srctl, wait_cnt;
193 
194 	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 
196 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
197 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
198 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
199 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
200 			ret_code = I40E_SUCCESS;
201 			break;
202 		}
203 		i40e_usec_delay(5);
204 	}
205 	if (ret_code == I40E_ERR_TIMEOUT)
206 		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
207 	return ret_code;
208 }
209 
210 /**
211  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
212  * @hw: pointer to the HW structure
213  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
214  * @data: word read from the Shadow RAM
215  *
216  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217  **/
i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)218 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
219 					 u16 *data)
220 {
221 	enum i40e_status_code ret_code = I40E_SUCCESS;
222 
223 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
224 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
225 		if (!ret_code) {
226 			ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227 			i40e_release_nvm(hw);
228 		}
229 	} else {
230 		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
231 	}
232 	return ret_code;
233 }
234 
235 /**
236  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
237  * @hw: pointer to the HW structure
238  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
239  * @data: word read from the Shadow RAM
240  *
241  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
242  **/
__i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)243 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
244 					   u16 offset,
245 					   u16 *data)
246 {
247 	enum i40e_status_code ret_code = I40E_SUCCESS;
248 
249 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
250 		ret_code = i40e_read_nvm_word_aq(hw, offset, data);
251 	else
252 		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
253 	return ret_code;
254 }
255 
256 /**
257  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
258  * @hw: pointer to the HW structure
259  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
260  * @data: word read from the Shadow RAM
261  *
262  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
263  **/
i40e_read_nvm_word_srctl(struct i40e_hw * hw,u16 offset,u16 * data)264 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
265 					       u16 *data)
266 {
267 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
268 	u32 sr_reg;
269 
270 	DEBUGFUNC("i40e_read_nvm_word_srctl");
271 
272 	if (offset >= hw->nvm.sr_size) {
273 		i40e_debug(hw, I40E_DEBUG_NVM,
274 			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
275 			   offset, hw->nvm.sr_size);
276 		ret_code = I40E_ERR_PARAM;
277 		goto read_nvm_exit;
278 	}
279 
280 	/* Poll the done bit first */
281 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
282 	if (ret_code == I40E_SUCCESS) {
283 		/* Write the address and start reading */
284 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
285 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
286 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
287 
288 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
289 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
290 		if (ret_code == I40E_SUCCESS) {
291 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
292 			*data = (u16)((sr_reg &
293 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
294 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
295 		}
296 	}
297 	if (ret_code != I40E_SUCCESS)
298 		i40e_debug(hw, I40E_DEBUG_NVM,
299 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
300 			   offset);
301 
302 read_nvm_exit:
303 	return ret_code;
304 }
305 
306 /**
307  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
308  * @hw: pointer to the HW structure
309  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
310  * @data: word read from the Shadow RAM
311  *
312  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
313  **/
i40e_read_nvm_word_aq(struct i40e_hw * hw,u16 offset,u16 * data)314 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
315 					    u16 *data)
316 {
317 	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
318 
319 	DEBUGFUNC("i40e_read_nvm_word_aq");
320 
321 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
322 	*data = LE16_TO_CPU(*(__le16 *)data);
323 
324 	return ret_code;
325 }
326 
327 /**
328  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
329  * @hw: pointer to the HW structure
330  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
331  * @words: (in) number of words to read; (out) number of words actually read
332  * @data: words read from the Shadow RAM
333  *
334  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
335  * method. The buffer read is preceded by the NVM ownership take
336  * and followed by the release.
337  **/
__i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)338 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
339 					     u16 offset,
340 					     u16 *words, u16 *data)
341 {
342 	enum i40e_status_code ret_code = I40E_SUCCESS;
343 
344 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
345 		ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
346 	else
347 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
348 	return ret_code;
349 }
350 
351 /**
352  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
353  * @hw: pointer to the HW structure
354  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
355  * @words: (in) number of words to read; (out) number of words actually read
356  * @data: words read from the Shadow RAM
357  *
358  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
359  * method. The buffer read is preceded by the NVM ownership take
360  * and followed by the release.
361  **/
i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)362 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
363 					   u16 *words, u16 *data)
364 {
365 	enum i40e_status_code ret_code = I40E_SUCCESS;
366 
367 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
368 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
369 		if (!ret_code) {
370 			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
371 							 data);
372 			i40e_release_nvm(hw);
373 		}
374 	} else {
375 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
376 	}
377 	return ret_code;
378 }
379 
380 /**
381  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
382  * @hw: pointer to the HW structure
383  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
384  * @words: (in) number of words to read; (out) number of words actually read
385  * @data: words read from the Shadow RAM
386  *
387  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
388  * method. The buffer read is preceded by the NVM ownership take
389  * and followed by the release.
390  **/
i40e_read_nvm_buffer_srctl(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)391 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
392 						 u16 *words, u16 *data)
393 {
394 	enum i40e_status_code ret_code = I40E_SUCCESS;
395 	u16 index, word;
396 
397 	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
398 
399 	/* Loop through the selected region */
400 	for (word = 0; word < *words; word++) {
401 		index = offset + word;
402 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
403 		if (ret_code != I40E_SUCCESS)
404 			break;
405 	}
406 
407 	/* Update the number of words read from the Shadow RAM */
408 	*words = word;
409 
410 	return ret_code;
411 }
412 
413 /**
414  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
415  * @hw: pointer to the HW structure
416  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417  * @words: (in) number of words to read; (out) number of words actually read
418  * @data: words read from the Shadow RAM
419  *
420  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
421  * method. The buffer read is preceded by the NVM ownership take
422  * and followed by the release.
423  **/
i40e_read_nvm_buffer_aq(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)424 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
425 					      u16 *words, u16 *data)
426 {
427 	enum i40e_status_code ret_code;
428 	u16 read_size = *words;
429 	bool last_cmd = FALSE;
430 	u16 words_read = 0;
431 	u16 i = 0;
432 
433 	DEBUGFUNC("i40e_read_nvm_buffer_aq");
434 
435 	do {
436 		/* Calculate number of bytes we should read in this step.
437 		 * FVL AQ do not allow to read more than one page at a time or
438 		 * to cross page boundaries.
439 		 */
440 		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
441 			read_size = min(*words,
442 					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
443 				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
444 		else
445 			read_size = min((*words - words_read),
446 					I40E_SR_SECTOR_SIZE_IN_WORDS);
447 
448 		/* Check if this is last command, if so set proper flag */
449 		if ((words_read + read_size) >= *words)
450 			last_cmd = TRUE;
451 
452 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
453 					    data + words_read, last_cmd);
454 		if (ret_code != I40E_SUCCESS)
455 			goto read_nvm_buffer_aq_exit;
456 
457 		/* Increment counter for words already read and move offset to
458 		 * new read location
459 		 */
460 		words_read += read_size;
461 		offset += read_size;
462 	} while (words_read < *words);
463 
464 	for (i = 0; i < *words; i++)
465 		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
466 
467 read_nvm_buffer_aq_exit:
468 	*words = words_read;
469 	return ret_code;
470 }
471 
472 /**
473  * i40e_read_nvm_aq - Read Shadow RAM.
474  * @hw: pointer to the HW structure.
475  * @module_pointer: module pointer location in words from the NVM beginning
476  * @offset: offset in words from module start
477  * @words: number of words to write
478  * @data: buffer with words to write to the Shadow RAM
479  * @last_command: tells the AdminQ that this is the last command
480  *
481  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
482  **/
i40e_read_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)483 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
484 				       u32 offset, u16 words, void *data,
485 				       bool last_command)
486 {
487 	enum i40e_status_code ret_code = I40E_ERR_NVM;
488 	struct i40e_asq_cmd_details cmd_details;
489 
490 	DEBUGFUNC("i40e_read_nvm_aq");
491 
492 	memset(&cmd_details, 0, sizeof(cmd_details));
493 	cmd_details.wb_desc = &hw->nvm_wb_desc;
494 
495 	/* Here we are checking the SR limit only for the flat memory model.
496 	 * We cannot do it for the module-based model, as we did not acquire
497 	 * the NVM resource yet (we cannot get the module pointer value).
498 	 * Firmware will check the module-based model.
499 	 */
500 	if ((offset + words) > hw->nvm.sr_size)
501 		i40e_debug(hw, I40E_DEBUG_NVM,
502 			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
503 			   (offset + words), hw->nvm.sr_size);
504 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
505 		/* We can write only up to 4KB (one sector), in one AQ write */
506 		i40e_debug(hw, I40E_DEBUG_NVM,
507 			   "NVM write fail error: tried to write %d words, limit is %d.\n",
508 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
509 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
510 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
511 		/* A single write cannot spread over two sectors */
512 		i40e_debug(hw, I40E_DEBUG_NVM,
513 			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
514 			   offset, words);
515 	else
516 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
517 					    2 * offset,  /*bytes*/
518 					    2 * words,   /*bytes*/
519 					    data, last_command, &cmd_details);
520 
521 	return ret_code;
522 }
523 
524 /**
525  * i40e_write_nvm_aq - Writes Shadow RAM.
526  * @hw: pointer to the HW structure.
527  * @module_pointer: module pointer location in words from the NVM beginning
528  * @offset: offset in words from module start
529  * @words: number of words to write
530  * @data: buffer with words to write to the Shadow RAM
531  * @last_command: tells the AdminQ that this is the last command
532  *
533  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
534  **/
i40e_write_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)535 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
536 					u32 offset, u16 words, void *data,
537 					bool last_command)
538 {
539 	enum i40e_status_code ret_code = I40E_ERR_NVM;
540 	struct i40e_asq_cmd_details cmd_details;
541 
542 	DEBUGFUNC("i40e_write_nvm_aq");
543 
544 	memset(&cmd_details, 0, sizeof(cmd_details));
545 	cmd_details.wb_desc = &hw->nvm_wb_desc;
546 
547 	/* Here we are checking the SR limit only for the flat memory model.
548 	 * We cannot do it for the module-based model, as we did not acquire
549 	 * the NVM resource yet (we cannot get the module pointer value).
550 	 * Firmware will check the module-based model.
551 	 */
552 	if ((offset + words) > hw->nvm.sr_size)
553 		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
554 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
555 		/* We can write only up to 4KB (one sector), in one AQ write */
556 		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
557 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
558 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
559 		/* A single write cannot spread over two sectors */
560 		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
561 	else
562 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
563 					      2 * offset,  /*bytes*/
564 					      2 * words,   /*bytes*/
565 					      data, last_command, &cmd_details);
566 
567 	return ret_code;
568 }
569 
570 /**
571  * __i40e_write_nvm_word - Writes Shadow RAM word
572  * @hw: pointer to the HW structure
573  * @offset: offset of the Shadow RAM word to write
574  * @data: word to write to the Shadow RAM
575  *
576  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
577  * NVM ownership have to be acquired and released (on ARQ completion event
578  * reception) by caller. To commit SR to NVM update checksum function
579  * should be called.
580  **/
__i40e_write_nvm_word(struct i40e_hw * hw,u32 offset,void * data)581 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
582 					    void *data)
583 {
584 	DEBUGFUNC("i40e_write_nvm_word");
585 
586 	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
587 
588 	/* Value 0x00 below means that we treat SR as a flat mem */
589 	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
590 }
591 
592 /**
593  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
594  * @hw: pointer to the HW structure
595  * @module_pointer: module pointer location in words from the NVM beginning
596  * @offset: offset of the Shadow RAM buffer to write
597  * @words: number of words to write
598  * @data: words to write to the Shadow RAM
599  *
600  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
601  * NVM ownership must be acquired before calling this function and released
602  * on ARQ completion event reception by caller. To commit SR to NVM update
603  * checksum function should be called.
604  **/
__i40e_write_nvm_buffer(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data)605 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
606 					      u8 module_pointer, u32 offset,
607 					      u16 words, void *data)
608 {
609 	__le16 *le_word_ptr = (__le16 *)data;
610 	u16 *word_ptr = (u16 *)data;
611 	u32 i = 0;
612 
613 	DEBUGFUNC("i40e_write_nvm_buffer");
614 
615 	for (i = 0; i < words; i++)
616 		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
617 
618 	/* Here we will only write one buffer as the size of the modules
619 	 * mirrored in the Shadow RAM is always less than 4K.
620 	 */
621 	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
622 				 data, FALSE);
623 }
624 
625 /**
626  * i40e_calc_nvm_checksum - Calculates and returns the checksum
627  * @hw: pointer to hardware structure
628  * @checksum: pointer to the checksum
629  *
630  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
631  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
632  * is customer specific and unknown. Therefore, this function skips all maximum
633  * possible size of VPD (1kB).
634  **/
i40e_calc_nvm_checksum(struct i40e_hw * hw,u16 * checksum)635 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
636 {
637 	enum i40e_status_code ret_code = I40E_SUCCESS;
638 	struct i40e_virt_mem vmem;
639 	u16 pcie_alt_module = 0;
640 	u16 checksum_local = 0;
641 	u16 vpd_module = 0;
642 	u16 *data;
643 	u16 i = 0;
644 
645 	DEBUGFUNC("i40e_calc_nvm_checksum");
646 
647 	ret_code = i40e_allocate_virt_mem(hw, &vmem,
648 				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
649 	if (ret_code)
650 		goto i40e_calc_nvm_checksum_exit;
651 	data = (u16 *)vmem.va;
652 
653 	/* read pointer to VPD area */
654 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
655 					&vpd_module);
656 	if (ret_code != I40E_SUCCESS) {
657 		ret_code = I40E_ERR_NVM_CHECKSUM;
658 		goto i40e_calc_nvm_checksum_exit;
659 	}
660 
661 	/* read pointer to PCIe Alt Auto-load module */
662 	ret_code = __i40e_read_nvm_word(hw,
663 					I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
664 					&pcie_alt_module);
665 	if (ret_code != I40E_SUCCESS) {
666 		ret_code = I40E_ERR_NVM_CHECKSUM;
667 		goto i40e_calc_nvm_checksum_exit;
668 	}
669 
670 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
671 	 * except the VPD and PCIe ALT Auto-load modules
672 	 */
673 	for (i = 0; i < hw->nvm.sr_size; i++) {
674 		/* Read SR page */
675 		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
676 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
677 
678 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
679 			if (ret_code != I40E_SUCCESS) {
680 				ret_code = I40E_ERR_NVM_CHECKSUM;
681 				goto i40e_calc_nvm_checksum_exit;
682 			}
683 		}
684 
685 		/* Skip Checksum word */
686 		if (i == I40E_SR_SW_CHECKSUM_WORD)
687 			continue;
688 		/* Skip VPD module (convert byte size to word count) */
689 		if ((i >= (u32)vpd_module) &&
690 		    (i < ((u32)vpd_module +
691 		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
692 			continue;
693 		}
694 		/* Skip PCIe ALT module (convert byte size to word count) */
695 		if ((i >= (u32)pcie_alt_module) &&
696 		    (i < ((u32)pcie_alt_module +
697 		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
698 			continue;
699 		}
700 
701 		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
702 	}
703 
704 	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
705 
706 i40e_calc_nvm_checksum_exit:
707 	i40e_free_virt_mem(hw, &vmem);
708 	return ret_code;
709 }
710 
711 /**
712  * i40e_update_nvm_checksum - Updates the NVM checksum
713  * @hw: pointer to hardware structure
714  *
715  * NVM ownership must be acquired before calling this function and released
716  * on ARQ completion event reception by caller.
717  * This function will commit SR to NVM.
718  **/
i40e_update_nvm_checksum(struct i40e_hw * hw)719 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
720 {
721 	enum i40e_status_code ret_code = I40E_SUCCESS;
722 	u16 checksum;
723 	__le16 le_sum;
724 
725 	DEBUGFUNC("i40e_update_nvm_checksum");
726 
727 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
728 	le_sum = CPU_TO_LE16(checksum);
729 	if (ret_code == I40E_SUCCESS)
730 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
731 					     1, &le_sum, TRUE);
732 
733 	return ret_code;
734 }
735 
736 /**
737  * i40e_validate_nvm_checksum - Validate EEPROM checksum
738  * @hw: pointer to hardware structure
739  * @checksum: calculated checksum
740  *
741  * Performs checksum calculation and validates the NVM SW checksum. If the
742  * caller does not need checksum, the value can be NULL.
743  **/
i40e_validate_nvm_checksum(struct i40e_hw * hw,u16 * checksum)744 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
745 						 u16 *checksum)
746 {
747 	enum i40e_status_code ret_code = I40E_SUCCESS;
748 	u16 checksum_sr = 0;
749 	u16 checksum_local = 0;
750 
751 	DEBUGFUNC("i40e_validate_nvm_checksum");
752 
753 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
754 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
755 	if (!ret_code) {
756 		ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
757 		if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
758 			i40e_release_nvm(hw);
759 		if (ret_code != I40E_SUCCESS)
760 			goto i40e_validate_nvm_checksum_exit;
761 	} else {
762 		goto i40e_validate_nvm_checksum_exit;
763 	}
764 
765 	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
766 
767 	/* Verify read checksum from EEPROM is the same as
768 	 * calculated checksum
769 	 */
770 	if (checksum_local != checksum_sr)
771 		ret_code = I40E_ERR_NVM_CHECKSUM;
772 
773 	/* If the user cares, return the calculated checksum */
774 	if (checksum)
775 		*checksum = checksum_local;
776 
777 i40e_validate_nvm_checksum_exit:
778 	return ret_code;
779 }
780 
781 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
782 						    struct i40e_nvm_access *cmd,
783 						    u8 *bytes, int *perrno);
784 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
785 						    struct i40e_nvm_access *cmd,
786 						    u8 *bytes, int *perrno);
787 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
788 						    struct i40e_nvm_access *cmd,
789 						    u8 *bytes, int *perrno);
790 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
791 						    struct i40e_nvm_access *cmd,
792 						    int *perrno);
793 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
794 						   struct i40e_nvm_access *cmd,
795 						   int *perrno);
796 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
797 						   struct i40e_nvm_access *cmd,
798 						   u8 *bytes, int *perrno);
799 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
800 						  struct i40e_nvm_access *cmd,
801 						  u8 *bytes, int *perrno);
802 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
803 						 struct i40e_nvm_access *cmd,
804 						 u8 *bytes, int *perrno);
805 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
806 						    struct i40e_nvm_access *cmd,
807 						    u8 *bytes, int *perrno);
i40e_nvmupd_get_module(u32 val)808 static INLINE u8 i40e_nvmupd_get_module(u32 val)
809 {
810 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
811 }
i40e_nvmupd_get_transaction(u32 val)812 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
813 {
814 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
815 }
816 
817 static const char *i40e_nvm_update_state_str[] = {
818 	"I40E_NVMUPD_INVALID",
819 	"I40E_NVMUPD_READ_CON",
820 	"I40E_NVMUPD_READ_SNT",
821 	"I40E_NVMUPD_READ_LCB",
822 	"I40E_NVMUPD_READ_SA",
823 	"I40E_NVMUPD_WRITE_ERA",
824 	"I40E_NVMUPD_WRITE_CON",
825 	"I40E_NVMUPD_WRITE_SNT",
826 	"I40E_NVMUPD_WRITE_LCB",
827 	"I40E_NVMUPD_WRITE_SA",
828 	"I40E_NVMUPD_CSUM_CON",
829 	"I40E_NVMUPD_CSUM_SA",
830 	"I40E_NVMUPD_CSUM_LCB",
831 	"I40E_NVMUPD_STATUS",
832 	"I40E_NVMUPD_EXEC_AQ",
833 	"I40E_NVMUPD_GET_AQ_RESULT",
834 };
835 
836 /**
837  * i40e_nvmupd_command - Process an NVM update command
838  * @hw: pointer to hardware structure
839  * @cmd: pointer to nvm update command
840  * @bytes: pointer to the data buffer
841  * @perrno: pointer to return error code
842  *
843  * Dispatches command depending on what update state is current
844  **/
i40e_nvmupd_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)845 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
846 					  struct i40e_nvm_access *cmd,
847 					  u8 *bytes, int *perrno)
848 {
849 	enum i40e_status_code status;
850 	enum i40e_nvmupd_cmd upd_cmd;
851 
852 	DEBUGFUNC("i40e_nvmupd_command");
853 
854 	/* assume success */
855 	*perrno = 0;
856 
857 	/* early check for status command and debug msgs */
858 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
859 
860 	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
861 		   i40e_nvm_update_state_str[upd_cmd],
862 		   hw->nvmupd_state,
863 		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
864 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
865 
866 	if (upd_cmd == I40E_NVMUPD_INVALID) {
867 		*perrno = -EFAULT;
868 		i40e_debug(hw, I40E_DEBUG_NVM,
869 			   "i40e_nvmupd_validate_command returns %d errno %d\n",
870 			   upd_cmd, *perrno);
871 	}
872 
873 	/* a status request returns immediately rather than
874 	 * going into the state machine
875 	 */
876 	if (upd_cmd == I40E_NVMUPD_STATUS) {
877 		if (!cmd->data_size) {
878 			*perrno = -EFAULT;
879 			return I40E_ERR_BUF_TOO_SHORT;
880 		}
881 
882 		bytes[0] = hw->nvmupd_state;
883 
884 		if (cmd->data_size >= 4) {
885 			bytes[1] = 0;
886 			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
887 		}
888 
889 		/* Clear error status on read */
890 		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
891 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
892 
893 		return I40E_SUCCESS;
894 	}
895 
896 	/* Clear status even it is not read and log */
897 	if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
898 		i40e_debug(hw, I40E_DEBUG_NVM,
899 			   "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
900 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
901 	}
902 
903 	switch (hw->nvmupd_state) {
904 	case I40E_NVMUPD_STATE_INIT:
905 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
906 		break;
907 
908 	case I40E_NVMUPD_STATE_READING:
909 		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
910 		break;
911 
912 	case I40E_NVMUPD_STATE_WRITING:
913 		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
914 		break;
915 
916 	case I40E_NVMUPD_STATE_INIT_WAIT:
917 	case I40E_NVMUPD_STATE_WRITE_WAIT:
918 		/* if we need to stop waiting for an event, clear
919 		 * the wait info and return before doing anything else
920 		 */
921 		if (cmd->offset == 0xffff) {
922 			i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
923 			return I40E_SUCCESS;
924 		}
925 
926 		status = I40E_ERR_NOT_READY;
927 		*perrno = -EBUSY;
928 		break;
929 
930 	default:
931 		/* invalid state, should never happen */
932 		i40e_debug(hw, I40E_DEBUG_NVM,
933 			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
934 		status = I40E_NOT_SUPPORTED;
935 		*perrno = -ESRCH;
936 		break;
937 	}
938 	return status;
939 }
940 
941 /**
942  * i40e_nvmupd_state_init - Handle NVM update state Init
943  * @hw: pointer to hardware structure
944  * @cmd: pointer to nvm update command buffer
945  * @bytes: pointer to the data buffer
946  * @perrno: pointer to return error code
947  *
948  * Process legitimate commands of the Init state and conditionally set next
949  * state. Reject all other commands.
950  **/
i40e_nvmupd_state_init(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)951 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
952 						    struct i40e_nvm_access *cmd,
953 						    u8 *bytes, int *perrno)
954 {
955 	enum i40e_status_code status = I40E_SUCCESS;
956 	enum i40e_nvmupd_cmd upd_cmd;
957 
958 	DEBUGFUNC("i40e_nvmupd_state_init");
959 
960 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
961 
962 	switch (upd_cmd) {
963 	case I40E_NVMUPD_READ_SA:
964 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
965 		if (status) {
966 			*perrno = i40e_aq_rc_to_posix(status,
967 						     hw->aq.asq_last_status);
968 		} else {
969 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
970 			i40e_release_nvm(hw);
971 		}
972 		break;
973 
974 	case I40E_NVMUPD_READ_SNT:
975 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
976 		if (status) {
977 			*perrno = i40e_aq_rc_to_posix(status,
978 						     hw->aq.asq_last_status);
979 		} else {
980 			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
981 			if (status)
982 				i40e_release_nvm(hw);
983 			else
984 				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
985 		}
986 		break;
987 
988 	case I40E_NVMUPD_WRITE_ERA:
989 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
990 		if (status) {
991 			*perrno = i40e_aq_rc_to_posix(status,
992 						     hw->aq.asq_last_status);
993 		} else {
994 			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
995 			if (status) {
996 				i40e_release_nvm(hw);
997 			} else {
998 				hw->nvm_release_on_done = TRUE;
999 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1000 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1001 			}
1002 		}
1003 		break;
1004 
1005 	case I40E_NVMUPD_WRITE_SA:
1006 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1007 		if (status) {
1008 			*perrno = i40e_aq_rc_to_posix(status,
1009 						     hw->aq.asq_last_status);
1010 		} else {
1011 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1012 			if (status) {
1013 				i40e_release_nvm(hw);
1014 			} else {
1015 				hw->nvm_release_on_done = TRUE;
1016 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1017 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1018 			}
1019 		}
1020 		break;
1021 
1022 	case I40E_NVMUPD_WRITE_SNT:
1023 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1024 		if (status) {
1025 			*perrno = i40e_aq_rc_to_posix(status,
1026 						     hw->aq.asq_last_status);
1027 		} else {
1028 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1029 			if (status) {
1030 				i40e_release_nvm(hw);
1031 			} else {
1032 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1033 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1034 			}
1035 		}
1036 		break;
1037 
1038 	case I40E_NVMUPD_CSUM_SA:
1039 		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1040 		if (status) {
1041 			*perrno = i40e_aq_rc_to_posix(status,
1042 						     hw->aq.asq_last_status);
1043 		} else {
1044 			status = i40e_update_nvm_checksum(hw);
1045 			if (status) {
1046 				*perrno = hw->aq.asq_last_status ?
1047 				   i40e_aq_rc_to_posix(status,
1048 						       hw->aq.asq_last_status) :
1049 				   -EIO;
1050 				i40e_release_nvm(hw);
1051 			} else {
1052 				hw->nvm_release_on_done = TRUE;
1053 				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1054 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1055 			}
1056 		}
1057 		break;
1058 
1059 	case I40E_NVMUPD_EXEC_AQ:
1060 		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1061 		break;
1062 
1063 	case I40E_NVMUPD_GET_AQ_RESULT:
1064 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1065 		break;
1066 
1067 	default:
1068 		i40e_debug(hw, I40E_DEBUG_NVM,
1069 			   "NVMUPD: bad cmd %s in init state\n",
1070 			   i40e_nvm_update_state_str[upd_cmd]);
1071 		status = I40E_ERR_NVM;
1072 		*perrno = -ESRCH;
1073 		break;
1074 	}
1075 	return status;
1076 }
1077 
1078 /**
1079  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1080  * @hw: pointer to hardware structure
1081  * @cmd: pointer to nvm update command buffer
1082  * @bytes: pointer to the data buffer
1083  * @perrno: pointer to return error code
1084  *
1085  * NVM ownership is already held.  Process legitimate commands and set any
1086  * change in state; reject all other commands.
1087  **/
i40e_nvmupd_state_reading(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1088 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1089 						    struct i40e_nvm_access *cmd,
1090 						    u8 *bytes, int *perrno)
1091 {
1092 	enum i40e_status_code status = I40E_SUCCESS;
1093 	enum i40e_nvmupd_cmd upd_cmd;
1094 
1095 	DEBUGFUNC("i40e_nvmupd_state_reading");
1096 
1097 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1098 
1099 	switch (upd_cmd) {
1100 	case I40E_NVMUPD_READ_SA:
1101 	case I40E_NVMUPD_READ_CON:
1102 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1103 		break;
1104 
1105 	case I40E_NVMUPD_READ_LCB:
1106 		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1107 		i40e_release_nvm(hw);
1108 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1109 		break;
1110 
1111 	default:
1112 		i40e_debug(hw, I40E_DEBUG_NVM,
1113 			   "NVMUPD: bad cmd %s in reading state.\n",
1114 			   i40e_nvm_update_state_str[upd_cmd]);
1115 		status = I40E_NOT_SUPPORTED;
1116 		*perrno = -ESRCH;
1117 		break;
1118 	}
1119 	return status;
1120 }
1121 
1122 /**
1123  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1124  * @hw: pointer to hardware structure
1125  * @cmd: pointer to nvm update command buffer
1126  * @bytes: pointer to the data buffer
1127  * @perrno: pointer to return error code
1128  *
1129  * NVM ownership is already held.  Process legitimate commands and set any
1130  * change in state; reject all other commands
1131  **/
i40e_nvmupd_state_writing(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1132 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1133 						    struct i40e_nvm_access *cmd,
1134 						    u8 *bytes, int *perrno)
1135 {
1136 	enum i40e_status_code status = I40E_SUCCESS;
1137 	enum i40e_nvmupd_cmd upd_cmd;
1138 	bool retry_attempt = FALSE;
1139 
1140 	DEBUGFUNC("i40e_nvmupd_state_writing");
1141 
1142 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1143 
1144 retry:
1145 	switch (upd_cmd) {
1146 	case I40E_NVMUPD_WRITE_CON:
1147 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1148 		if (!status) {
1149 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1150 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1151 		}
1152 		break;
1153 
1154 	case I40E_NVMUPD_WRITE_LCB:
1155 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1156 		if (status) {
1157 			*perrno = hw->aq.asq_last_status ?
1158 				   i40e_aq_rc_to_posix(status,
1159 						       hw->aq.asq_last_status) :
1160 				   -EIO;
1161 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1162 		} else {
1163 			hw->nvm_release_on_done = TRUE;
1164 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1165 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1166 		}
1167 		break;
1168 
1169 	case I40E_NVMUPD_CSUM_CON:
1170 		/* Assumes the caller has acquired the nvm */
1171 		status = i40e_update_nvm_checksum(hw);
1172 		if (status) {
1173 			*perrno = hw->aq.asq_last_status ?
1174 				   i40e_aq_rc_to_posix(status,
1175 						       hw->aq.asq_last_status) :
1176 				   -EIO;
1177 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1178 		} else {
1179 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1180 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1181 		}
1182 		break;
1183 
1184 	case I40E_NVMUPD_CSUM_LCB:
1185 		/* Assumes the caller has acquired the nvm */
1186 		status = i40e_update_nvm_checksum(hw);
1187 		if (status) {
1188 			*perrno = hw->aq.asq_last_status ?
1189 				   i40e_aq_rc_to_posix(status,
1190 						       hw->aq.asq_last_status) :
1191 				   -EIO;
1192 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1193 		} else {
1194 			hw->nvm_release_on_done = TRUE;
1195 			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1196 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1197 		}
1198 		break;
1199 
1200 	default:
1201 		i40e_debug(hw, I40E_DEBUG_NVM,
1202 			   "NVMUPD: bad cmd %s in writing state.\n",
1203 			   i40e_nvm_update_state_str[upd_cmd]);
1204 		status = I40E_NOT_SUPPORTED;
1205 		*perrno = -ESRCH;
1206 		break;
1207 	}
1208 
1209 	/* In some circumstances, a multi-write transaction takes longer
1210 	 * than the default 3 minute timeout on the write semaphore.  If
1211 	 * the write failed with an EBUSY status, this is likely the problem,
1212 	 * so here we try to reacquire the semaphore then retry the write.
1213 	 * We only do one retry, then give up.
1214 	 */
1215 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1216 	    !retry_attempt) {
1217 		enum i40e_status_code old_status = status;
1218 		u32 old_asq_status = hw->aq.asq_last_status;
1219 		u32 gtime;
1220 
1221 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1222 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1223 			i40e_debug(hw, I40E_DEBUG_ALL,
1224 				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1225 				   gtime, hw->nvm.hw_semaphore_timeout);
1226 			i40e_release_nvm(hw);
1227 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1228 			if (status) {
1229 				i40e_debug(hw, I40E_DEBUG_ALL,
1230 					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1231 					   hw->aq.asq_last_status);
1232 				status = old_status;
1233 				hw->aq.asq_last_status = old_asq_status;
1234 			} else {
1235 				retry_attempt = TRUE;
1236 				goto retry;
1237 			}
1238 		}
1239 	}
1240 
1241 	return status;
1242 }
1243 
1244 /**
1245  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1246  * @hw: pointer to the hardware structure
1247  * @opcode: the event that just happened
1248  **/
i40e_nvmupd_check_wait_event(struct i40e_hw * hw,u16 opcode)1249 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1250 {
1251 	if (opcode == hw->nvm_wait_opcode) {
1252 
1253 		i40e_debug(hw, I40E_DEBUG_NVM,
1254 			   "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1255 		if (hw->nvm_release_on_done) {
1256 			i40e_release_nvm(hw);
1257 			hw->nvm_release_on_done = FALSE;
1258 		}
1259 		hw->nvm_wait_opcode = 0;
1260 
1261 		if (hw->aq.arq_last_status) {
1262 			hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1263 			return;
1264 		}
1265 
1266 		switch (hw->nvmupd_state) {
1267 		case I40E_NVMUPD_STATE_INIT_WAIT:
1268 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1269 			break;
1270 
1271 		case I40E_NVMUPD_STATE_WRITE_WAIT:
1272 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1273 			break;
1274 
1275 		default:
1276 			break;
1277 		}
1278 	}
1279 }
1280 
1281 /**
1282  * i40e_nvmupd_validate_command - Validate given command
1283  * @hw: pointer to hardware structure
1284  * @cmd: pointer to nvm update command buffer
1285  * @perrno: pointer to return error code
1286  *
1287  * Return one of the valid command types or I40E_NVMUPD_INVALID
1288  **/
i40e_nvmupd_validate_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1289 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1290 						    struct i40e_nvm_access *cmd,
1291 						    int *perrno)
1292 {
1293 	enum i40e_nvmupd_cmd upd_cmd;
1294 	u8 module, transaction;
1295 
1296 	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1297 
1298 	/* anything that doesn't match a recognized case is an error */
1299 	upd_cmd = I40E_NVMUPD_INVALID;
1300 
1301 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1302 	module = i40e_nvmupd_get_module(cmd->config);
1303 
1304 	/* limits on data size */
1305 	if ((cmd->data_size < 1) ||
1306 	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1307 		i40e_debug(hw, I40E_DEBUG_NVM,
1308 			   "i40e_nvmupd_validate_command data_size %d\n",
1309 			   cmd->data_size);
1310 		*perrno = -EFAULT;
1311 		return I40E_NVMUPD_INVALID;
1312 	}
1313 
1314 	switch (cmd->command) {
1315 	case I40E_NVM_READ:
1316 		switch (transaction) {
1317 		case I40E_NVM_CON:
1318 			upd_cmd = I40E_NVMUPD_READ_CON;
1319 			break;
1320 		case I40E_NVM_SNT:
1321 			upd_cmd = I40E_NVMUPD_READ_SNT;
1322 			break;
1323 		case I40E_NVM_LCB:
1324 			upd_cmd = I40E_NVMUPD_READ_LCB;
1325 			break;
1326 		case I40E_NVM_SA:
1327 			upd_cmd = I40E_NVMUPD_READ_SA;
1328 			break;
1329 		case I40E_NVM_EXEC:
1330 			if (module == 0xf)
1331 				upd_cmd = I40E_NVMUPD_STATUS;
1332 			else if (module == 0)
1333 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1334 			break;
1335 		}
1336 		break;
1337 
1338 	case I40E_NVM_WRITE:
1339 		switch (transaction) {
1340 		case I40E_NVM_CON:
1341 			upd_cmd = I40E_NVMUPD_WRITE_CON;
1342 			break;
1343 		case I40E_NVM_SNT:
1344 			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1345 			break;
1346 		case I40E_NVM_LCB:
1347 			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1348 			break;
1349 		case I40E_NVM_SA:
1350 			upd_cmd = I40E_NVMUPD_WRITE_SA;
1351 			break;
1352 		case I40E_NVM_ERA:
1353 			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1354 			break;
1355 		case I40E_NVM_CSUM:
1356 			upd_cmd = I40E_NVMUPD_CSUM_CON;
1357 			break;
1358 		case (I40E_NVM_CSUM|I40E_NVM_SA):
1359 			upd_cmd = I40E_NVMUPD_CSUM_SA;
1360 			break;
1361 		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1362 			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1363 			break;
1364 		case I40E_NVM_EXEC:
1365 			if (module == 0)
1366 				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1367 			break;
1368 		}
1369 		break;
1370 	}
1371 
1372 	return upd_cmd;
1373 }
1374 
1375 /**
1376  * i40e_nvmupd_exec_aq - Run an AQ command
1377  * @hw: pointer to hardware structure
1378  * @cmd: pointer to nvm update command buffer
1379  * @bytes: pointer to the data buffer
1380  * @perrno: pointer to return error code
1381  *
1382  * cmd structure contains identifiers and data buffer
1383  **/
i40e_nvmupd_exec_aq(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1384 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1385 						 struct i40e_nvm_access *cmd,
1386 						 u8 *bytes, int *perrno)
1387 {
1388 	struct i40e_asq_cmd_details cmd_details;
1389 	enum i40e_status_code status;
1390 	struct i40e_aq_desc *aq_desc;
1391 	u32 buff_size = 0;
1392 	u8 *buff = NULL;
1393 	u32 aq_desc_len;
1394 	u32 aq_data_len;
1395 
1396 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1397 	memset(&cmd_details, 0, sizeof(cmd_details));
1398 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1399 
1400 	aq_desc_len = sizeof(struct i40e_aq_desc);
1401 	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1402 
1403 	/* get the aq descriptor */
1404 	if (cmd->data_size < aq_desc_len) {
1405 		i40e_debug(hw, I40E_DEBUG_NVM,
1406 			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1407 			   cmd->data_size, aq_desc_len);
1408 		*perrno = -EINVAL;
1409 		return I40E_ERR_PARAM;
1410 	}
1411 	aq_desc = (struct i40e_aq_desc *)bytes;
1412 
1413 	/* if data buffer needed, make sure it's ready */
1414 	aq_data_len = cmd->data_size - aq_desc_len;
1415 	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1416 	if (buff_size) {
1417 		if (!hw->nvm_buff.va) {
1418 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1419 							hw->aq.asq_buf_size);
1420 			if (status)
1421 				i40e_debug(hw, I40E_DEBUG_NVM,
1422 					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1423 					   status);
1424 		}
1425 
1426 		if (hw->nvm_buff.va) {
1427 			buff = hw->nvm_buff.va;
1428 			i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1429 				I40E_NONDMA_TO_NONDMA);
1430 		}
1431 	}
1432 
1433 	/* and away we go! */
1434 	status = i40e_asq_send_command(hw, aq_desc, buff,
1435 				       buff_size, &cmd_details);
1436 	if (status) {
1437 		i40e_debug(hw, I40E_DEBUG_NVM,
1438 			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1439 			   i40e_stat_str(hw, status),
1440 			   i40e_aq_str(hw, hw->aq.asq_last_status));
1441 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1442 	}
1443 
1444 	/* should we wait for a followup event? */
1445 	if (cmd->offset) {
1446 		hw->nvm_wait_opcode = cmd->offset;
1447 		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1448 	}
1449 
1450 	return status;
1451 }
1452 
1453 /**
1454  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1455  * @hw: pointer to hardware structure
1456  * @cmd: pointer to nvm update command buffer
1457  * @bytes: pointer to the data buffer
1458  * @perrno: pointer to return error code
1459  *
1460  * cmd structure contains identifiers and data buffer
1461  **/
i40e_nvmupd_get_aq_result(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1462 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1463 						    struct i40e_nvm_access *cmd,
1464 						    u8 *bytes, int *perrno)
1465 {
1466 	u32 aq_total_len;
1467 	u32 aq_desc_len;
1468 	int remainder;
1469 	u8 *buff;
1470 
1471 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1472 
1473 	aq_desc_len = sizeof(struct i40e_aq_desc);
1474 	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1475 
1476 	/* check offset range */
1477 	if (cmd->offset > aq_total_len) {
1478 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1479 			   __func__, cmd->offset, aq_total_len);
1480 		*perrno = -EINVAL;
1481 		return I40E_ERR_PARAM;
1482 	}
1483 
1484 	/* check copylength range */
1485 	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1486 		int new_len = aq_total_len - cmd->offset;
1487 
1488 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1489 			   __func__, cmd->data_size, new_len);
1490 		cmd->data_size = new_len;
1491 	}
1492 
1493 	remainder = cmd->data_size;
1494 	if (cmd->offset < aq_desc_len) {
1495 		u32 len = aq_desc_len - cmd->offset;
1496 
1497 		len = min(len, cmd->data_size);
1498 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1499 			   __func__, cmd->offset, cmd->offset + len);
1500 
1501 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1502 		i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1503 
1504 		bytes += len;
1505 		remainder -= len;
1506 		buff = hw->nvm_buff.va;
1507 	} else {
1508 		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1509 	}
1510 
1511 	if (remainder > 0) {
1512 		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1513 
1514 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1515 			   __func__, start_byte, start_byte + remainder);
1516 		i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1517 	}
1518 
1519 	return I40E_SUCCESS;
1520 }
1521 
1522 /**
1523  * i40e_nvmupd_nvm_read - Read NVM
1524  * @hw: pointer to hardware structure
1525  * @cmd: pointer to nvm update command buffer
1526  * @bytes: pointer to the data buffer
1527  * @perrno: pointer to return error code
1528  *
1529  * cmd structure contains identifiers and data buffer
1530  **/
i40e_nvmupd_nvm_read(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1531 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1532 						  struct i40e_nvm_access *cmd,
1533 						  u8 *bytes, int *perrno)
1534 {
1535 	struct i40e_asq_cmd_details cmd_details;
1536 	enum i40e_status_code status;
1537 	u8 module, transaction;
1538 	bool last;
1539 
1540 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1541 	module = i40e_nvmupd_get_module(cmd->config);
1542 	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1543 
1544 	memset(&cmd_details, 0, sizeof(cmd_details));
1545 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1546 
1547 	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1548 				  bytes, last, &cmd_details);
1549 	if (status) {
1550 		i40e_debug(hw, I40E_DEBUG_NVM,
1551 			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1552 			   module, cmd->offset, cmd->data_size);
1553 		i40e_debug(hw, I40E_DEBUG_NVM,
1554 			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1555 			   status, hw->aq.asq_last_status);
1556 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1557 	}
1558 
1559 	return status;
1560 }
1561 
1562 /**
1563  * i40e_nvmupd_nvm_erase - Erase an NVM module
1564  * @hw: pointer to hardware structure
1565  * @cmd: pointer to nvm update command buffer
1566  * @perrno: pointer to return error code
1567  *
1568  * module, offset, data_size and data are in cmd structure
1569  **/
i40e_nvmupd_nvm_erase(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1570 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1571 						   struct i40e_nvm_access *cmd,
1572 						   int *perrno)
1573 {
1574 	enum i40e_status_code status = I40E_SUCCESS;
1575 	struct i40e_asq_cmd_details cmd_details;
1576 	u8 module, transaction;
1577 	bool last;
1578 
1579 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1580 	module = i40e_nvmupd_get_module(cmd->config);
1581 	last = (transaction & I40E_NVM_LCB);
1582 
1583 	memset(&cmd_details, 0, sizeof(cmd_details));
1584 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1585 
1586 	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1587 				   last, &cmd_details);
1588 	if (status) {
1589 		i40e_debug(hw, I40E_DEBUG_NVM,
1590 			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1591 			   module, cmd->offset, cmd->data_size);
1592 		i40e_debug(hw, I40E_DEBUG_NVM,
1593 			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1594 			   status, hw->aq.asq_last_status);
1595 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1596 	}
1597 
1598 	return status;
1599 }
1600 
1601 /**
1602  * i40e_nvmupd_nvm_write - Write NVM
1603  * @hw: pointer to hardware structure
1604  * @cmd: pointer to nvm update command buffer
1605  * @bytes: pointer to the data buffer
1606  * @perrno: pointer to return error code
1607  *
1608  * module, offset, data_size and data are in cmd structure
1609  **/
i40e_nvmupd_nvm_write(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1610 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1611 						   struct i40e_nvm_access *cmd,
1612 						   u8 *bytes, int *perrno)
1613 {
1614 	enum i40e_status_code status = I40E_SUCCESS;
1615 	struct i40e_asq_cmd_details cmd_details;
1616 	u8 module, transaction;
1617 	bool last;
1618 
1619 	transaction = i40e_nvmupd_get_transaction(cmd->config);
1620 	module = i40e_nvmupd_get_module(cmd->config);
1621 	last = (transaction & I40E_NVM_LCB);
1622 
1623 	memset(&cmd_details, 0, sizeof(cmd_details));
1624 	cmd_details.wb_desc = &hw->nvm_wb_desc;
1625 
1626 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1627 				    (u16)cmd->data_size, bytes, last,
1628 				    &cmd_details);
1629 	if (status) {
1630 		i40e_debug(hw, I40E_DEBUG_NVM,
1631 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1632 			   module, cmd->offset, cmd->data_size);
1633 		i40e_debug(hw, I40E_DEBUG_NVM,
1634 			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1635 			   status, hw->aq.asq_last_status);
1636 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1637 	}
1638 
1639 	return status;
1640 }
1641