1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "i40e_prototype.h"
35
36 /**
37 * i40e_init_nvm - Initialize NVM function pointers
38 * @hw: pointer to the HW structure
39 *
40 * Setup the function pointers and the NVM info structure. Should be called
41 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
42 * Please notice that the NVM term is used here (& in all methods covered
43 * in this file) as an equivalent of the FLASH part mapped into the SR.
44 * We are accessing FLASH always through the Shadow RAM.
45 **/
i40e_init_nvm(struct i40e_hw * hw)46 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
47 {
48 struct i40e_nvm_info *nvm = &hw->nvm;
49 enum i40e_status_code ret_code = I40E_SUCCESS;
50 u32 fla, gens;
51 u8 sr_size;
52
53 DEBUGFUNC("i40e_init_nvm");
54
55 /* The SR size is stored regardless of the nvm programming mode
56 * as the blank mode may be used in the factory line.
57 */
58 gens = rd32(hw, I40E_GLNVM_GENS);
59 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
60 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
61 /* Switching to words (sr_size contains power of 2KB) */
62 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
63
64 /* Check if we are in the normal or blank NVM programming mode */
65 fla = rd32(hw, I40E_GLNVM_FLA);
66 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
67 /* Max NVM timeout */
68 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
69 nvm->blank_nvm_mode = FALSE;
70 } else { /* Blank programming mode */
71 nvm->blank_nvm_mode = TRUE;
72 ret_code = I40E_ERR_NVM_BLANK_MODE;
73 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
74 }
75
76 return ret_code;
77 }
78
79 /**
80 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
81 * @hw: pointer to the HW structure
82 * @access: NVM access type (read or write)
83 *
84 * This function will request NVM ownership for reading
85 * via the proper Admin Command.
86 **/
i40e_acquire_nvm(struct i40e_hw * hw,enum i40e_aq_resource_access_type access)87 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
88 enum i40e_aq_resource_access_type access)
89 {
90 enum i40e_status_code ret_code = I40E_SUCCESS;
91 u64 gtime, timeout;
92 u64 time_left = 0;
93
94 DEBUGFUNC("i40e_acquire_nvm");
95
96 if (hw->nvm.blank_nvm_mode)
97 goto i40e_i40e_acquire_nvm_exit;
98
99 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
100 0, &time_left, NULL);
101 /* Reading the Global Device Timer */
102 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
103
104 /* Store the timeout */
105 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
106
107 if (ret_code)
108 i40e_debug(hw, I40E_DEBUG_NVM,
109 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
110 access, (unsigned long long)time_left, ret_code,
111 hw->aq.asq_last_status);
112
113 if (ret_code && time_left) {
114 /* Poll until the current NVM owner timeouts */
115 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
116 while ((gtime < timeout) && time_left) {
117 i40e_msec_delay(10);
118 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
119 ret_code = i40e_aq_request_resource(hw,
120 I40E_NVM_RESOURCE_ID,
121 access, 0, &time_left,
122 NULL);
123 if (ret_code == I40E_SUCCESS) {
124 hw->nvm.hw_semaphore_timeout =
125 I40E_MS_TO_GTIME(time_left) + gtime;
126 break;
127 }
128 }
129 if (ret_code != I40E_SUCCESS) {
130 hw->nvm.hw_semaphore_timeout = 0;
131 i40e_debug(hw, I40E_DEBUG_NVM,
132 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
133 (unsigned long long)time_left, ret_code,
134 hw->aq.asq_last_status);
135 }
136 }
137
138 i40e_i40e_acquire_nvm_exit:
139 return ret_code;
140 }
141
142 /**
143 * i40e_release_nvm - Generic request for releasing the NVM ownership
144 * @hw: pointer to the HW structure
145 *
146 * This function will release NVM resource via the proper Admin Command.
147 **/
i40e_release_nvm(struct i40e_hw * hw)148 void i40e_release_nvm(struct i40e_hw *hw)
149 {
150 enum i40e_status_code ret_code = I40E_SUCCESS;
151 u32 total_delay = 0;
152
153 DEBUGFUNC("i40e_release_nvm");
154
155 if (hw->nvm.blank_nvm_mode)
156 return;
157
158 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
159
160 /* there are some rare cases when trying to release the resource
161 * results in an admin Q timeout, so handle them correctly
162 */
163 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
164 (total_delay < hw->aq.asq_cmd_timeout)) {
165 i40e_msec_delay(1);
166 ret_code = i40e_aq_release_resource(hw,
167 I40E_NVM_RESOURCE_ID, 0, NULL);
168 total_delay++;
169 }
170 }
171
172 /**
173 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
174 * @hw: pointer to the HW structure
175 *
176 * Polls the SRCTL Shadow RAM register done bit.
177 **/
i40e_poll_sr_srctl_done_bit(struct i40e_hw * hw)178 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
179 {
180 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
181 u32 srctl, wait_cnt;
182
183 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
184
185 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
186 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
187 srctl = rd32(hw, I40E_GLNVM_SRCTL);
188 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
189 ret_code = I40E_SUCCESS;
190 break;
191 }
192 i40e_usec_delay(5);
193 }
194 if (ret_code == I40E_ERR_TIMEOUT)
195 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
196 return ret_code;
197 }
198
199 /**
200 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
201 * @hw: pointer to the HW structure
202 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
203 * @data: word read from the Shadow RAM
204 *
205 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
206 **/
i40e_read_nvm_word_srctl(struct i40e_hw * hw,u16 offset,u16 * data)207 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
208 u16 *data)
209 {
210 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
211 u32 sr_reg;
212
213 DEBUGFUNC("i40e_read_nvm_word_srctl");
214
215 if (offset >= hw->nvm.sr_size) {
216 i40e_debug(hw, I40E_DEBUG_NVM,
217 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
218 offset, hw->nvm.sr_size);
219 ret_code = I40E_ERR_PARAM;
220 goto read_nvm_exit;
221 }
222
223 /* Poll the done bit first */
224 ret_code = i40e_poll_sr_srctl_done_bit(hw);
225 if (ret_code == I40E_SUCCESS) {
226 /* Write the address and start reading */
227 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
228 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
229 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
230
231 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
232 ret_code = i40e_poll_sr_srctl_done_bit(hw);
233 if (ret_code == I40E_SUCCESS) {
234 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
235 *data = (u16)((sr_reg &
236 I40E_GLNVM_SRDATA_RDDATA_MASK)
237 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
238 }
239 }
240 if (ret_code != I40E_SUCCESS)
241 i40e_debug(hw, I40E_DEBUG_NVM,
242 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
243 offset);
244
245 read_nvm_exit:
246 return ret_code;
247 }
248
249 /**
250 * i40e_read_nvm_aq - Read Shadow RAM.
251 * @hw: pointer to the HW structure.
252 * @module_pointer: module pointer location in words from the NVM beginning
253 * @offset: offset in words from module start
254 * @words: number of words to write
255 * @data: buffer with words to write to the Shadow RAM
256 * @last_command: tells the AdminQ that this is the last command
257 *
258 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
259 **/
i40e_read_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)260 static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
261 u8 module_pointer, u32 offset,
262 u16 words, void *data,
263 bool last_command)
264 {
265 enum i40e_status_code ret_code = I40E_ERR_NVM;
266 struct i40e_asq_cmd_details cmd_details;
267
268 DEBUGFUNC("i40e_read_nvm_aq");
269
270 memset(&cmd_details, 0, sizeof(cmd_details));
271 cmd_details.wb_desc = &hw->nvm_wb_desc;
272
273 /* Here we are checking the SR limit only for the flat memory model.
274 * We cannot do it for the module-based model, as we did not acquire
275 * the NVM resource yet (we cannot get the module pointer value).
276 * Firmware will check the module-based model.
277 */
278 if ((offset + words) > hw->nvm.sr_size)
279 i40e_debug(hw, I40E_DEBUG_NVM,
280 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
281 (offset + words), hw->nvm.sr_size);
282 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
283 /* We can write only up to 4KB (one sector), in one AQ write */
284 i40e_debug(hw, I40E_DEBUG_NVM,
285 "NVM write fail error: tried to write %d words, limit is %d.\n",
286 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
287 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
288 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
289 /* A single write cannot spread over two sectors */
290 i40e_debug(hw, I40E_DEBUG_NVM,
291 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
292 offset, words);
293 else
294 ret_code = i40e_aq_read_nvm(hw, module_pointer,
295 2 * offset, /*bytes*/
296 2 * words, /*bytes*/
297 data, last_command, &cmd_details);
298
299 return ret_code;
300 }
301
302 /**
303 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
304 * @hw: pointer to the HW structure
305 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
306 * @data: word read from the Shadow RAM
307 *
308 * Reads one 16 bit word from the Shadow RAM using the AdminQ
309 **/
i40e_read_nvm_word_aq(struct i40e_hw * hw,u16 offset,u16 * data)310 static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
311 u16 *data)
312 {
313 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
314
315 DEBUGFUNC("i40e_read_nvm_word_aq");
316
317 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
318 *data = LE16_TO_CPU(*(__le16 *)data);
319
320 return ret_code;
321 }
322
323 /**
324 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
325 * @hw: pointer to the HW structure
326 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
327 * @data: word read from the Shadow RAM
328 *
329 * Reads one 16 bit word from the Shadow RAM.
330 *
331 * Do not use this function except in cases where the nvm lock is already
332 * taken via i40e_acquire_nvm().
333 **/
__i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)334 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
335 u16 offset,
336 u16 *data)
337 {
338
339 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
340 return i40e_read_nvm_word_aq(hw, offset, data);
341
342 return i40e_read_nvm_word_srctl(hw, offset, data);
343 }
344
345 /**
346 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
347 * @hw: pointer to the HW structure
348 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
349 * @data: word read from the Shadow RAM
350 *
351 * Reads one 16 bit word from the Shadow RAM.
352 **/
i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)353 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
354 u16 *data)
355 {
356 enum i40e_status_code ret_code = I40E_SUCCESS;
357
358 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
359 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
360
361 if (ret_code)
362 return ret_code;
363 ret_code = __i40e_read_nvm_word(hw, offset, data);
364
365 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
366 i40e_release_nvm(hw);
367 return ret_code;
368 }
369
370 /**
371 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
372 * @hw: Pointer to the HW structure
373 * @module_ptr: Pointer to module in words with respect to NVM beginning
374 * @module_offset: Offset in words from module start
375 * @data_offset: Offset in words from reading data area start
376 * @words_data_size: Words to read from NVM
377 * @data_ptr: Pointer to memory location where resulting buffer will be stored
378 **/
379 enum i40e_status_code
i40e_read_nvm_module_data(struct i40e_hw * hw,u8 module_ptr,u16 module_offset,u16 data_offset,u16 words_data_size,u16 * data_ptr)380 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
381 u16 data_offset, u16 words_data_size, u16 *data_ptr)
382 {
383 enum i40e_status_code status;
384 u16 specific_ptr = 0;
385 u16 ptr_value = 0;
386 u16 offset = 0;
387
388 if (module_ptr != 0) {
389 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
390 if (status != I40E_SUCCESS) {
391 i40e_debug(hw, I40E_DEBUG_ALL,
392 "Reading nvm word failed.Error code: %d.\n",
393 status);
394 return I40E_ERR_NVM;
395 }
396 }
397 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
398 #define I40E_NVM_INVALID_VAL 0xFFFF
399
400 /* Pointer not initialized */
401 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
402 ptr_value == I40E_NVM_INVALID_VAL) {
403 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
404 return I40E_ERR_BAD_PTR;
405 }
406
407 /* Check whether the module is in SR mapped area or outside */
408 if (ptr_value & I40E_PTR_TYPE) {
409 /* Pointer points outside of the Shared RAM mapped area */
410 i40e_debug(hw, I40E_DEBUG_ALL,
411 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
412
413 return I40E_ERR_PARAM;
414 } else {
415 /* Read from the Shadow RAM */
416
417 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
418 &specific_ptr);
419 if (status != I40E_SUCCESS) {
420 i40e_debug(hw, I40E_DEBUG_ALL,
421 "Reading nvm word failed.Error code: %d.\n",
422 status);
423 return I40E_ERR_NVM;
424 }
425
426 offset = ptr_value + module_offset + specific_ptr +
427 data_offset;
428
429 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
430 data_ptr);
431 if (status != I40E_SUCCESS) {
432 i40e_debug(hw, I40E_DEBUG_ALL,
433 "Reading nvm buffer failed.Error code: %d.\n",
434 status);
435 }
436 }
437
438 return status;
439 }
440
441 /**
442 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
443 * @hw: pointer to the HW structure
444 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
445 * @words: (in) number of words to read; (out) number of words actually read
446 * @data: words read from the Shadow RAM
447 *
448 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
449 * method. The buffer read is preceded by the NVM ownership take
450 * and followed by the release.
451 **/
i40e_read_nvm_buffer_srctl(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)452 static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
453 u16 *words, u16 *data)
454 {
455 enum i40e_status_code ret_code = I40E_SUCCESS;
456 u16 index, word;
457
458 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
459
460 /* Loop through the selected region */
461 for (word = 0; word < *words; word++) {
462 index = offset + word;
463 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
464 if (ret_code != I40E_SUCCESS)
465 break;
466 }
467
468 /* Update the number of words read from the Shadow RAM */
469 *words = word;
470
471 return ret_code;
472 }
473
474 /**
475 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
476 * @hw: pointer to the HW structure
477 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
478 * @words: (in) number of words to read; (out) number of words actually read
479 * @data: words read from the Shadow RAM
480 *
481 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
482 * method. The buffer read is preceded by the NVM ownership take
483 * and followed by the release.
484 **/
i40e_read_nvm_buffer_aq(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)485 static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
486 u16 *words, u16 *data)
487 {
488 enum i40e_status_code ret_code;
489 u16 read_size = *words;
490 bool last_cmd = FALSE;
491 u16 words_read = 0;
492 u16 i = 0;
493
494 DEBUGFUNC("i40e_read_nvm_buffer_aq");
495
496 do {
497 /* Calculate number of bytes we should read in this step.
498 * FVL AQ do not allow to read more than one page at a time or
499 * to cross page boundaries.
500 */
501 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
502 read_size = min(*words,
503 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
504 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
505 else
506 read_size = min((*words - words_read),
507 I40E_SR_SECTOR_SIZE_IN_WORDS);
508
509 /* Check if this is last command, if so set proper flag */
510 if ((words_read + read_size) >= *words)
511 last_cmd = TRUE;
512
513 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
514 data + words_read, last_cmd);
515 if (ret_code != I40E_SUCCESS)
516 goto read_nvm_buffer_aq_exit;
517
518 /* Increment counter for words already read and move offset to
519 * new read location
520 */
521 words_read += read_size;
522 offset += read_size;
523 } while (words_read < *words);
524
525 for (i = 0; i < *words; i++)
526 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
527
528 read_nvm_buffer_aq_exit:
529 *words = words_read;
530 return ret_code;
531 }
532
533 /**
534 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
535 * @hw: pointer to the HW structure
536 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
537 * @words: (in) number of words to read; (out) number of words actually read
538 * @data: words read from the Shadow RAM
539 *
540 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
541 * method.
542 **/
__i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)543 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
544 u16 offset,
545 u16 *words, u16 *data)
546 {
547 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
548 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
549
550 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
551 }
552
553 /**
554 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
555 * @hw: pointer to the HW structure
556 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
557 * @words: (in) number of words to read; (out) number of words actually read
558 * @data: words read from the Shadow RAM
559 *
560 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
561 * method. The buffer read is preceded by the NVM ownership take
562 * and followed by the release.
563 **/
i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)564 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
565 u16 *words, u16 *data)
566 {
567 enum i40e_status_code ret_code = I40E_SUCCESS;
568
569 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
570 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
571 if (!ret_code) {
572 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
573 data);
574 i40e_release_nvm(hw);
575 }
576 } else {
577 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
578 }
579
580 return ret_code;
581 }
582
583 /**
584 * i40e_write_nvm_aq - Writes Shadow RAM.
585 * @hw: pointer to the HW structure.
586 * @module_pointer: module pointer location in words from the NVM beginning
587 * @offset: offset in words from module start
588 * @words: number of words to write
589 * @data: buffer with words to write to the Shadow RAM
590 * @last_command: tells the AdminQ that this is the last command
591 *
592 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
593 **/
i40e_write_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)594 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
595 u32 offset, u16 words, void *data,
596 bool last_command)
597 {
598 enum i40e_status_code ret_code = I40E_ERR_NVM;
599 struct i40e_asq_cmd_details cmd_details;
600
601 DEBUGFUNC("i40e_write_nvm_aq");
602
603 memset(&cmd_details, 0, sizeof(cmd_details));
604 cmd_details.wb_desc = &hw->nvm_wb_desc;
605
606 /* Here we are checking the SR limit only for the flat memory model.
607 * We cannot do it for the module-based model, as we did not acquire
608 * the NVM resource yet (we cannot get the module pointer value).
609 * Firmware will check the module-based model.
610 */
611 if ((offset + words) > hw->nvm.sr_size)
612 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
613 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
614 /* We can write only up to 4KB (one sector), in one AQ write */
615 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
616 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
617 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
618 /* A single write cannot spread over two sectors */
619 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
620 else
621 ret_code = i40e_aq_update_nvm(hw, module_pointer,
622 2 * offset, /*bytes*/
623 2 * words, /*bytes*/
624 data, last_command, 0,
625 &cmd_details);
626
627 return ret_code;
628 }
629
630 /**
631 * __i40e_write_nvm_word - Writes Shadow RAM word
632 * @hw: pointer to the HW structure
633 * @offset: offset of the Shadow RAM word to write
634 * @data: word to write to the Shadow RAM
635 *
636 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
637 * NVM ownership have to be acquired and released (on ARQ completion event
638 * reception) by caller. To commit SR to NVM update checksum function
639 * should be called.
640 **/
__i40e_write_nvm_word(struct i40e_hw * hw,u32 offset,void * data)641 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
642 void *data)
643 {
644 DEBUGFUNC("i40e_write_nvm_word");
645
646 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
647
648 /* Value 0x00 below means that we treat SR as a flat mem */
649 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
650 }
651
652 /**
653 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
654 * @hw: pointer to the HW structure
655 * @module_pointer: module pointer location in words from the NVM beginning
656 * @offset: offset of the Shadow RAM buffer to write
657 * @words: number of words to write
658 * @data: words to write to the Shadow RAM
659 *
660 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
661 * NVM ownership must be acquired before calling this function and released
662 * on ARQ completion event reception by caller. To commit SR to NVM update
663 * checksum function should be called.
664 **/
__i40e_write_nvm_buffer(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data)665 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
666 u8 module_pointer, u32 offset,
667 u16 words, void *data)
668 {
669 __le16 *le_word_ptr = (__le16 *)data;
670 u16 *word_ptr = (u16 *)data;
671 u32 i = 0;
672
673 DEBUGFUNC("i40e_write_nvm_buffer");
674
675 for (i = 0; i < words; i++)
676 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
677
678 /* Here we will only write one buffer as the size of the modules
679 * mirrored in the Shadow RAM is always less than 4K.
680 */
681 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
682 data, FALSE);
683 }
684
685 /**
686 * i40e_calc_nvm_checksum - Calculates and returns the checksum
687 * @hw: pointer to hardware structure
688 * @checksum: pointer to the checksum
689 *
690 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
691 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
692 * is customer specific and unknown. Therefore, this function skips all maximum
693 * possible size of VPD (1kB).
694 **/
i40e_calc_nvm_checksum(struct i40e_hw * hw,u16 * checksum)695 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
696 {
697 enum i40e_status_code ret_code = I40E_SUCCESS;
698 struct i40e_virt_mem vmem;
699 u16 pcie_alt_module = 0;
700 u16 checksum_local = 0;
701 u16 vpd_module = 0;
702 u16 *data;
703 u16 i = 0;
704
705 DEBUGFUNC("i40e_calc_nvm_checksum");
706
707 ret_code = i40e_allocate_virt_mem(hw, &vmem,
708 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
709 if (ret_code)
710 goto i40e_calc_nvm_checksum_exit;
711 data = (u16 *)vmem.va;
712
713 /* read pointer to VPD area */
714 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
715 if (ret_code != I40E_SUCCESS) {
716 ret_code = I40E_ERR_NVM_CHECKSUM;
717 goto i40e_calc_nvm_checksum_exit;
718 }
719
720 /* read pointer to PCIe Alt Auto-load module */
721 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
722 &pcie_alt_module);
723 if (ret_code != I40E_SUCCESS) {
724 ret_code = I40E_ERR_NVM_CHECKSUM;
725 goto i40e_calc_nvm_checksum_exit;
726 }
727
728 /* Calculate SW checksum that covers the whole 64kB shadow RAM
729 * except the VPD and PCIe ALT Auto-load modules
730 */
731 for (i = 0; i < hw->nvm.sr_size; i++) {
732 /* Read SR page */
733 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
734 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
735
736 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
737 if (ret_code != I40E_SUCCESS) {
738 ret_code = I40E_ERR_NVM_CHECKSUM;
739 goto i40e_calc_nvm_checksum_exit;
740 }
741 }
742
743 /* Skip Checksum word */
744 if (i == I40E_SR_SW_CHECKSUM_WORD)
745 continue;
746 /* Skip VPD module (convert byte size to word count) */
747 if ((i >= (u32)vpd_module) &&
748 (i < ((u32)vpd_module +
749 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
750 continue;
751 }
752 /* Skip PCIe ALT module (convert byte size to word count) */
753 if ((i >= (u32)pcie_alt_module) &&
754 (i < ((u32)pcie_alt_module +
755 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
756 continue;
757 }
758
759 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
760 }
761
762 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
763
764 i40e_calc_nvm_checksum_exit:
765 i40e_free_virt_mem(hw, &vmem);
766 return ret_code;
767 }
768
769 /**
770 * i40e_update_nvm_checksum - Updates the NVM checksum
771 * @hw: pointer to hardware structure
772 *
773 * NVM ownership must be acquired before calling this function and released
774 * on ARQ completion event reception by caller.
775 * This function will commit SR to NVM.
776 **/
i40e_update_nvm_checksum(struct i40e_hw * hw)777 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
778 {
779 enum i40e_status_code ret_code = I40E_SUCCESS;
780 u16 checksum;
781 __le16 le_sum;
782
783 DEBUGFUNC("i40e_update_nvm_checksum");
784
785 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
786 if (ret_code == I40E_SUCCESS) {
787 le_sum = CPU_TO_LE16(checksum);
788 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
789 1, &le_sum, TRUE);
790 }
791
792 return ret_code;
793 }
794
795 /**
796 * i40e_validate_nvm_checksum - Validate EEPROM checksum
797 * @hw: pointer to hardware structure
798 * @checksum: calculated checksum
799 *
800 * Performs checksum calculation and validates the NVM SW checksum. If the
801 * caller does not need checksum, the value can be NULL.
802 **/
i40e_validate_nvm_checksum(struct i40e_hw * hw,u16 * checksum)803 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
804 u16 *checksum)
805 {
806 enum i40e_status_code ret_code = I40E_SUCCESS;
807 u16 checksum_sr = 0;
808 u16 checksum_local = 0;
809
810 DEBUGFUNC("i40e_validate_nvm_checksum");
811
812 /* We must acquire the NVM lock in order to correctly synchronize the
813 * NVM accesses across multiple PFs. Without doing so it is possible
814 * for one of the PFs to read invalid data potentially indicating that
815 * the checksum is invalid.
816 */
817 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
818 if (ret_code)
819 return ret_code;
820 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
821 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
822 i40e_release_nvm(hw);
823 if (ret_code)
824 return ret_code;
825
826 /* Verify read checksum from EEPROM is the same as
827 * calculated checksum
828 */
829 if (checksum_local != checksum_sr)
830 ret_code = I40E_ERR_NVM_CHECKSUM;
831
832 /* If the user cares, return the calculated checksum */
833 if (checksum)
834 *checksum = checksum_local;
835
836 return ret_code;
837 }
838
839 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
840 struct i40e_nvm_access *cmd,
841 u8 *bytes, int *perrno);
842 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
843 struct i40e_nvm_access *cmd,
844 u8 *bytes, int *perrno);
845 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
846 struct i40e_nvm_access *cmd,
847 u8 *bytes, int *perrno);
848 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
849 struct i40e_nvm_access *cmd,
850 int *perrno);
851 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
852 struct i40e_nvm_access *cmd,
853 int *perrno);
854 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
855 struct i40e_nvm_access *cmd,
856 u8 *bytes, int *perrno);
857 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
858 struct i40e_nvm_access *cmd,
859 u8 *bytes, int *perrno);
860 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
861 struct i40e_nvm_access *cmd,
862 u8 *bytes, int *perrno);
863 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
864 struct i40e_nvm_access *cmd,
865 u8 *bytes, int *perrno);
866 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
867 struct i40e_nvm_access *cmd,
868 u8 *bytes, int *perrno);
i40e_nvmupd_get_module(u32 val)869 static INLINE u8 i40e_nvmupd_get_module(u32 val)
870 {
871 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
872 }
i40e_nvmupd_get_transaction(u32 val)873 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
874 {
875 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
876 }
877
i40e_nvmupd_get_preservation_flags(u32 val)878 static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
879 {
880 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
881 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
882 }
883
884 static const char *i40e_nvm_update_state_str[] = {
885 "I40E_NVMUPD_INVALID",
886 "I40E_NVMUPD_READ_CON",
887 "I40E_NVMUPD_READ_SNT",
888 "I40E_NVMUPD_READ_LCB",
889 "I40E_NVMUPD_READ_SA",
890 "I40E_NVMUPD_WRITE_ERA",
891 "I40E_NVMUPD_WRITE_CON",
892 "I40E_NVMUPD_WRITE_SNT",
893 "I40E_NVMUPD_WRITE_LCB",
894 "I40E_NVMUPD_WRITE_SA",
895 "I40E_NVMUPD_CSUM_CON",
896 "I40E_NVMUPD_CSUM_SA",
897 "I40E_NVMUPD_CSUM_LCB",
898 "I40E_NVMUPD_STATUS",
899 "I40E_NVMUPD_EXEC_AQ",
900 "I40E_NVMUPD_GET_AQ_RESULT",
901 "I40E_NVMUPD_GET_AQ_EVENT",
902 "I40E_NVMUPD_GET_FEATURES",
903 };
904
905 /**
906 * i40e_nvmupd_command - Process an NVM update command
907 * @hw: pointer to hardware structure
908 * @cmd: pointer to nvm update command
909 * @bytes: pointer to the data buffer
910 * @perrno: pointer to return error code
911 *
912 * Dispatches command depending on what update state is current
913 **/
i40e_nvmupd_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)914 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
915 struct i40e_nvm_access *cmd,
916 u8 *bytes, int *perrno)
917 {
918 enum i40e_status_code status;
919 enum i40e_nvmupd_cmd upd_cmd;
920
921 DEBUGFUNC("i40e_nvmupd_command");
922
923 /* assume success */
924 *perrno = 0;
925
926 /* early check for status command and debug msgs */
927 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
928
929 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
930 i40e_nvm_update_state_str[upd_cmd],
931 hw->nvmupd_state,
932 hw->nvm_release_on_done, hw->nvm_wait_opcode,
933 cmd->command, cmd->config, cmd->offset, cmd->data_size);
934
935 if (upd_cmd == I40E_NVMUPD_INVALID) {
936 *perrno = -EFAULT;
937 i40e_debug(hw, I40E_DEBUG_NVM,
938 "i40e_nvmupd_validate_command returns %d errno %d\n",
939 upd_cmd, *perrno);
940 }
941
942 /* a status request returns immediately rather than
943 * going into the state machine
944 */
945 if (upd_cmd == I40E_NVMUPD_STATUS) {
946 if (!cmd->data_size) {
947 *perrno = -EFAULT;
948 return I40E_ERR_BUF_TOO_SHORT;
949 }
950
951 bytes[0] = hw->nvmupd_state;
952
953 if (cmd->data_size >= 4) {
954 bytes[1] = 0;
955 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
956 }
957
958 /* Clear error status on read */
959 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
960 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
961
962 return I40E_SUCCESS;
963 }
964
965 /*
966 * A supported features request returns immediately
967 * rather than going into state machine
968 */
969 if (upd_cmd == I40E_NVMUPD_FEATURES) {
970 if (cmd->data_size < hw->nvmupd_features.size) {
971 *perrno = -EFAULT;
972 return I40E_ERR_BUF_TOO_SHORT;
973 }
974
975 /*
976 * If buffer is bigger than i40e_nvmupd_features structure,
977 * make sure the trailing bytes are set to 0x0.
978 */
979 if (cmd->data_size > hw->nvmupd_features.size)
980 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
981 cmd->data_size - hw->nvmupd_features.size,
982 I40E_NONDMA_MEM);
983
984 i40e_memcpy(bytes, &hw->nvmupd_features,
985 hw->nvmupd_features.size, I40E_NONDMA_MEM);
986
987 return I40E_SUCCESS;
988 }
989
990 /* Clear status even it is not read and log */
991 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
992 i40e_debug(hw, I40E_DEBUG_NVM,
993 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
994 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
995 }
996
997 /* Acquire lock to prevent race condition where adminq_task
998 * can execute after i40e_nvmupd_nvm_read/write but before state
999 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
1000 *
1001 * During NVMUpdate, it is observed that lock could be held for
1002 * ~5ms for most commands. However lock is held for ~60ms for
1003 * NVMUPD_CSUM_LCB command.
1004 */
1005 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1006 switch (hw->nvmupd_state) {
1007 case I40E_NVMUPD_STATE_INIT:
1008 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
1009 break;
1010
1011 case I40E_NVMUPD_STATE_READING:
1012 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
1013 break;
1014
1015 case I40E_NVMUPD_STATE_WRITING:
1016 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
1017 break;
1018
1019 case I40E_NVMUPD_STATE_INIT_WAIT:
1020 case I40E_NVMUPD_STATE_WRITE_WAIT:
1021 /* if we need to stop waiting for an event, clear
1022 * the wait info and return before doing anything else
1023 */
1024 if (cmd->offset == 0xffff) {
1025 i40e_nvmupd_clear_wait_state(hw);
1026 status = I40E_SUCCESS;
1027 break;
1028 }
1029
1030 status = I40E_ERR_NOT_READY;
1031 *perrno = -EBUSY;
1032 break;
1033
1034 default:
1035 /* invalid state, should never happen */
1036 i40e_debug(hw, I40E_DEBUG_NVM,
1037 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1038 status = I40E_NOT_SUPPORTED;
1039 *perrno = -ESRCH;
1040 break;
1041 }
1042
1043 i40e_release_spinlock(&hw->aq.arq_spinlock);
1044 return status;
1045 }
1046
1047 /**
1048 * i40e_nvmupd_state_init - Handle NVM update state Init
1049 * @hw: pointer to hardware structure
1050 * @cmd: pointer to nvm update command buffer
1051 * @bytes: pointer to the data buffer
1052 * @perrno: pointer to return error code
1053 *
1054 * Process legitimate commands of the Init state and conditionally set next
1055 * state. Reject all other commands.
1056 **/
i40e_nvmupd_state_init(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1057 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1058 struct i40e_nvm_access *cmd,
1059 u8 *bytes, int *perrno)
1060 {
1061 enum i40e_status_code status = I40E_SUCCESS;
1062 enum i40e_nvmupd_cmd upd_cmd;
1063
1064 DEBUGFUNC("i40e_nvmupd_state_init");
1065
1066 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1067
1068 switch (upd_cmd) {
1069 case I40E_NVMUPD_READ_SA:
1070 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1071 if (status) {
1072 *perrno = i40e_aq_rc_to_posix(status,
1073 hw->aq.asq_last_status);
1074 } else {
1075 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1076 i40e_release_nvm(hw);
1077 }
1078 break;
1079
1080 case I40E_NVMUPD_READ_SNT:
1081 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1082 if (status) {
1083 *perrno = i40e_aq_rc_to_posix(status,
1084 hw->aq.asq_last_status);
1085 } else {
1086 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1087 if (status)
1088 i40e_release_nvm(hw);
1089 else
1090 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1091 }
1092 break;
1093
1094 case I40E_NVMUPD_WRITE_ERA:
1095 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1096 if (status) {
1097 *perrno = i40e_aq_rc_to_posix(status,
1098 hw->aq.asq_last_status);
1099 } else {
1100 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1101 if (status) {
1102 i40e_release_nvm(hw);
1103 } else {
1104 hw->nvm_release_on_done = TRUE;
1105 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1106 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1107 }
1108 }
1109 break;
1110
1111 case I40E_NVMUPD_WRITE_SA:
1112 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1113 if (status) {
1114 *perrno = i40e_aq_rc_to_posix(status,
1115 hw->aq.asq_last_status);
1116 } else {
1117 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1118 if (status) {
1119 i40e_release_nvm(hw);
1120 } else {
1121 hw->nvm_release_on_done = TRUE;
1122 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1123 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1124 }
1125 }
1126 break;
1127
1128 case I40E_NVMUPD_WRITE_SNT:
1129 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1130 if (status) {
1131 *perrno = i40e_aq_rc_to_posix(status,
1132 hw->aq.asq_last_status);
1133 } else {
1134 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1135 if (status) {
1136 i40e_release_nvm(hw);
1137 } else {
1138 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1139 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1140 }
1141 }
1142 break;
1143
1144 case I40E_NVMUPD_CSUM_SA:
1145 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1146 if (status) {
1147 *perrno = i40e_aq_rc_to_posix(status,
1148 hw->aq.asq_last_status);
1149 } else {
1150 status = i40e_update_nvm_checksum(hw);
1151 if (status) {
1152 *perrno = hw->aq.asq_last_status ?
1153 i40e_aq_rc_to_posix(status,
1154 hw->aq.asq_last_status) :
1155 -EIO;
1156 i40e_release_nvm(hw);
1157 } else {
1158 hw->nvm_release_on_done = TRUE;
1159 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1160 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1161 }
1162 }
1163 break;
1164
1165 case I40E_NVMUPD_EXEC_AQ:
1166 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1167 break;
1168
1169 case I40E_NVMUPD_GET_AQ_RESULT:
1170 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1171 break;
1172
1173 case I40E_NVMUPD_GET_AQ_EVENT:
1174 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1175 break;
1176
1177 default:
1178 i40e_debug(hw, I40E_DEBUG_NVM,
1179 "NVMUPD: bad cmd %s in init state\n",
1180 i40e_nvm_update_state_str[upd_cmd]);
1181 status = I40E_ERR_NVM;
1182 *perrno = -ESRCH;
1183 break;
1184 }
1185 return status;
1186 }
1187
1188 /**
1189 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1190 * @hw: pointer to hardware structure
1191 * @cmd: pointer to nvm update command buffer
1192 * @bytes: pointer to the data buffer
1193 * @perrno: pointer to return error code
1194 *
1195 * NVM ownership is already held. Process legitimate commands and set any
1196 * change in state; reject all other commands.
1197 **/
i40e_nvmupd_state_reading(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1198 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1199 struct i40e_nvm_access *cmd,
1200 u8 *bytes, int *perrno)
1201 {
1202 enum i40e_status_code status = I40E_SUCCESS;
1203 enum i40e_nvmupd_cmd upd_cmd;
1204
1205 DEBUGFUNC("i40e_nvmupd_state_reading");
1206
1207 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1208
1209 switch (upd_cmd) {
1210 case I40E_NVMUPD_READ_SA:
1211 case I40E_NVMUPD_READ_CON:
1212 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1213 break;
1214
1215 case I40E_NVMUPD_READ_LCB:
1216 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1217 i40e_release_nvm(hw);
1218 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1219 break;
1220
1221 default:
1222 i40e_debug(hw, I40E_DEBUG_NVM,
1223 "NVMUPD: bad cmd %s in reading state.\n",
1224 i40e_nvm_update_state_str[upd_cmd]);
1225 status = I40E_NOT_SUPPORTED;
1226 *perrno = -ESRCH;
1227 break;
1228 }
1229 return status;
1230 }
1231
1232 /**
1233 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1234 * @hw: pointer to hardware structure
1235 * @cmd: pointer to nvm update command buffer
1236 * @bytes: pointer to the data buffer
1237 * @perrno: pointer to return error code
1238 *
1239 * NVM ownership is already held. Process legitimate commands and set any
1240 * change in state; reject all other commands
1241 **/
i40e_nvmupd_state_writing(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1242 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1243 struct i40e_nvm_access *cmd,
1244 u8 *bytes, int *perrno)
1245 {
1246 enum i40e_status_code status = I40E_SUCCESS;
1247 enum i40e_nvmupd_cmd upd_cmd;
1248 bool retry_attempt = FALSE;
1249
1250 DEBUGFUNC("i40e_nvmupd_state_writing");
1251
1252 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1253
1254 retry:
1255 switch (upd_cmd) {
1256 case I40E_NVMUPD_WRITE_CON:
1257 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1258 if (!status) {
1259 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1260 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1261 }
1262 break;
1263
1264 case I40E_NVMUPD_WRITE_LCB:
1265 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1266 if (status) {
1267 *perrno = hw->aq.asq_last_status ?
1268 i40e_aq_rc_to_posix(status,
1269 hw->aq.asq_last_status) :
1270 -EIO;
1271 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1272 } else {
1273 hw->nvm_release_on_done = TRUE;
1274 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1275 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1276 }
1277 break;
1278
1279 case I40E_NVMUPD_CSUM_CON:
1280 /* Assumes the caller has acquired the nvm */
1281 status = i40e_update_nvm_checksum(hw);
1282 if (status) {
1283 *perrno = hw->aq.asq_last_status ?
1284 i40e_aq_rc_to_posix(status,
1285 hw->aq.asq_last_status) :
1286 -EIO;
1287 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1288 } else {
1289 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1290 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1291 }
1292 break;
1293
1294 case I40E_NVMUPD_CSUM_LCB:
1295 /* Assumes the caller has acquired the nvm */
1296 status = i40e_update_nvm_checksum(hw);
1297 if (status) {
1298 *perrno = hw->aq.asq_last_status ?
1299 i40e_aq_rc_to_posix(status,
1300 hw->aq.asq_last_status) :
1301 -EIO;
1302 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1303 } else {
1304 hw->nvm_release_on_done = TRUE;
1305 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1306 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1307 }
1308 break;
1309
1310 default:
1311 i40e_debug(hw, I40E_DEBUG_NVM,
1312 "NVMUPD: bad cmd %s in writing state.\n",
1313 i40e_nvm_update_state_str[upd_cmd]);
1314 status = I40E_NOT_SUPPORTED;
1315 *perrno = -ESRCH;
1316 break;
1317 }
1318
1319 /* In some circumstances, a multi-write transaction takes longer
1320 * than the default 3 minute timeout on the write semaphore. If
1321 * the write failed with an EBUSY status, this is likely the problem,
1322 * so here we try to reacquire the semaphore then retry the write.
1323 * We only do one retry, then give up.
1324 */
1325 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1326 !retry_attempt) {
1327 enum i40e_status_code old_status = status;
1328 u32 old_asq_status = hw->aq.asq_last_status;
1329 u32 gtime;
1330
1331 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1332 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1333 i40e_debug(hw, I40E_DEBUG_ALL,
1334 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1335 gtime, hw->nvm.hw_semaphore_timeout);
1336 i40e_release_nvm(hw);
1337 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1338 if (status) {
1339 i40e_debug(hw, I40E_DEBUG_ALL,
1340 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1341 hw->aq.asq_last_status);
1342 status = old_status;
1343 hw->aq.asq_last_status = old_asq_status;
1344 } else {
1345 retry_attempt = TRUE;
1346 goto retry;
1347 }
1348 }
1349 }
1350
1351 return status;
1352 }
1353
1354 /**
1355 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1356 * @hw: pointer to the hardware structure
1357 **/
i40e_nvmupd_clear_wait_state(struct i40e_hw * hw)1358 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1359 {
1360 i40e_debug(hw, I40E_DEBUG_NVM,
1361 "NVMUPD: clearing wait on opcode 0x%04x\n",
1362 hw->nvm_wait_opcode);
1363
1364 if (hw->nvm_release_on_done) {
1365 i40e_release_nvm(hw);
1366 hw->nvm_release_on_done = FALSE;
1367 }
1368 hw->nvm_wait_opcode = 0;
1369
1370 if (hw->aq.arq_last_status) {
1371 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1372 return;
1373 }
1374
1375 switch (hw->nvmupd_state) {
1376 case I40E_NVMUPD_STATE_INIT_WAIT:
1377 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1378 break;
1379
1380 case I40E_NVMUPD_STATE_WRITE_WAIT:
1381 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1382 break;
1383
1384 default:
1385 break;
1386 }
1387 }
1388
1389 /**
1390 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1391 * @hw: pointer to the hardware structure
1392 * @opcode: the event that just happened
1393 * @desc: AdminQ descriptor
1394 **/
i40e_nvmupd_check_wait_event(struct i40e_hw * hw,u16 opcode,struct i40e_aq_desc * desc)1395 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1396 struct i40e_aq_desc *desc)
1397 {
1398 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1399
1400 if (opcode == hw->nvm_wait_opcode) {
1401 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1402 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1403 i40e_nvmupd_clear_wait_state(hw);
1404 }
1405 }
1406
1407 /**
1408 * i40e_nvmupd_validate_command - Validate given command
1409 * @hw: pointer to hardware structure
1410 * @cmd: pointer to nvm update command buffer
1411 * @perrno: pointer to return error code
1412 *
1413 * Return one of the valid command types or I40E_NVMUPD_INVALID
1414 **/
i40e_nvmupd_validate_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1415 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1416 struct i40e_nvm_access *cmd,
1417 int *perrno)
1418 {
1419 enum i40e_nvmupd_cmd upd_cmd;
1420 u8 module, transaction;
1421
1422 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1423
1424 /* anything that doesn't match a recognized case is an error */
1425 upd_cmd = I40E_NVMUPD_INVALID;
1426
1427 transaction = i40e_nvmupd_get_transaction(cmd->config);
1428 module = i40e_nvmupd_get_module(cmd->config);
1429
1430 /* limits on data size */
1431 if ((cmd->data_size < 1) ||
1432 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1433 i40e_debug(hw, I40E_DEBUG_NVM,
1434 "i40e_nvmupd_validate_command data_size %d\n",
1435 cmd->data_size);
1436 *perrno = -EFAULT;
1437 return I40E_NVMUPD_INVALID;
1438 }
1439
1440 switch (cmd->command) {
1441 case I40E_NVM_READ:
1442 switch (transaction) {
1443 case I40E_NVM_CON:
1444 upd_cmd = I40E_NVMUPD_READ_CON;
1445 break;
1446 case I40E_NVM_SNT:
1447 upd_cmd = I40E_NVMUPD_READ_SNT;
1448 break;
1449 case I40E_NVM_LCB:
1450 upd_cmd = I40E_NVMUPD_READ_LCB;
1451 break;
1452 case I40E_NVM_SA:
1453 upd_cmd = I40E_NVMUPD_READ_SA;
1454 break;
1455 case I40E_NVM_EXEC:
1456 switch (module) {
1457 case I40E_NVM_EXEC_GET_AQ_RESULT:
1458 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1459 break;
1460 case I40E_NVM_EXEC_FEATURES:
1461 upd_cmd = I40E_NVMUPD_FEATURES;
1462 break;
1463 case I40E_NVM_EXEC_STATUS:
1464 upd_cmd = I40E_NVMUPD_STATUS;
1465 break;
1466 default:
1467 *perrno = -EFAULT;
1468 return I40E_NVMUPD_INVALID;
1469 }
1470 break;
1471 case I40E_NVM_AQE:
1472 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1473 break;
1474 }
1475 break;
1476
1477 case I40E_NVM_WRITE:
1478 switch (transaction) {
1479 case I40E_NVM_CON:
1480 upd_cmd = I40E_NVMUPD_WRITE_CON;
1481 break;
1482 case I40E_NVM_SNT:
1483 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1484 break;
1485 case I40E_NVM_LCB:
1486 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1487 break;
1488 case I40E_NVM_SA:
1489 upd_cmd = I40E_NVMUPD_WRITE_SA;
1490 break;
1491 case I40E_NVM_ERA:
1492 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1493 break;
1494 case I40E_NVM_CSUM:
1495 upd_cmd = I40E_NVMUPD_CSUM_CON;
1496 break;
1497 case (I40E_NVM_CSUM|I40E_NVM_SA):
1498 upd_cmd = I40E_NVMUPD_CSUM_SA;
1499 break;
1500 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1501 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1502 break;
1503 case I40E_NVM_EXEC:
1504 if (module == 0)
1505 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1506 break;
1507 }
1508 break;
1509 }
1510
1511 return upd_cmd;
1512 }
1513
1514 /**
1515 * i40e_nvmupd_exec_aq - Run an AQ command
1516 * @hw: pointer to hardware structure
1517 * @cmd: pointer to nvm update command buffer
1518 * @bytes: pointer to the data buffer
1519 * @perrno: pointer to return error code
1520 *
1521 * cmd structure contains identifiers and data buffer
1522 **/
i40e_nvmupd_exec_aq(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1523 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1524 struct i40e_nvm_access *cmd,
1525 u8 *bytes, int *perrno)
1526 {
1527 struct i40e_asq_cmd_details cmd_details;
1528 enum i40e_status_code status;
1529 struct i40e_aq_desc *aq_desc;
1530 u32 buff_size = 0;
1531 u8 *buff = NULL;
1532 u32 aq_desc_len;
1533 u32 aq_data_len;
1534
1535 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1536 if (cmd->offset == 0xffff)
1537 return I40E_SUCCESS;
1538
1539 memset(&cmd_details, 0, sizeof(cmd_details));
1540 cmd_details.wb_desc = &hw->nvm_wb_desc;
1541
1542 aq_desc_len = sizeof(struct i40e_aq_desc);
1543 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1544
1545 /* get the aq descriptor */
1546 if (cmd->data_size < aq_desc_len) {
1547 i40e_debug(hw, I40E_DEBUG_NVM,
1548 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1549 cmd->data_size, aq_desc_len);
1550 *perrno = -EINVAL;
1551 return I40E_ERR_PARAM;
1552 }
1553 aq_desc = (struct i40e_aq_desc *)bytes;
1554
1555 /* if data buffer needed, make sure it's ready */
1556 aq_data_len = cmd->data_size - aq_desc_len;
1557 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1558 if (buff_size) {
1559 if (!hw->nvm_buff.va) {
1560 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1561 hw->aq.asq_buf_size);
1562 if (status)
1563 i40e_debug(hw, I40E_DEBUG_NVM,
1564 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1565 status);
1566 }
1567
1568 if (hw->nvm_buff.va) {
1569 buff = hw->nvm_buff.va;
1570 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1571 I40E_NONDMA_TO_NONDMA);
1572 }
1573 }
1574
1575 if (cmd->offset)
1576 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1577
1578 /* and away we go! */
1579 status = i40e_asq_send_command(hw, aq_desc, buff,
1580 buff_size, &cmd_details);
1581 if (status) {
1582 i40e_debug(hw, I40E_DEBUG_NVM,
1583 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1584 i40e_stat_str(hw, status),
1585 i40e_aq_str(hw, hw->aq.asq_last_status));
1586 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1587 return status;
1588 }
1589
1590 /* should we wait for a followup event? */
1591 if (cmd->offset) {
1592 hw->nvm_wait_opcode = cmd->offset;
1593 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1594 }
1595
1596 return status;
1597 }
1598
1599 /**
1600 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1601 * @hw: pointer to hardware structure
1602 * @cmd: pointer to nvm update command buffer
1603 * @bytes: pointer to the data buffer
1604 * @perrno: pointer to return error code
1605 *
1606 * cmd structure contains identifiers and data buffer
1607 **/
i40e_nvmupd_get_aq_result(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1608 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1609 struct i40e_nvm_access *cmd,
1610 u8 *bytes, int *perrno)
1611 {
1612 u32 aq_total_len;
1613 u32 aq_desc_len;
1614 int remainder;
1615 u8 *buff;
1616
1617 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1618
1619 aq_desc_len = sizeof(struct i40e_aq_desc);
1620 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1621
1622 /* check offset range */
1623 if (cmd->offset > aq_total_len) {
1624 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1625 __func__, cmd->offset, aq_total_len);
1626 *perrno = -EINVAL;
1627 return I40E_ERR_PARAM;
1628 }
1629
1630 /* check copylength range */
1631 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1632 int new_len = aq_total_len - cmd->offset;
1633
1634 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1635 __func__, cmd->data_size, new_len);
1636 cmd->data_size = new_len;
1637 }
1638
1639 remainder = cmd->data_size;
1640 if (cmd->offset < aq_desc_len) {
1641 u32 len = aq_desc_len - cmd->offset;
1642
1643 len = min(len, cmd->data_size);
1644 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1645 __func__, cmd->offset, cmd->offset + len);
1646
1647 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1648 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1649
1650 bytes += len;
1651 remainder -= len;
1652 buff = hw->nvm_buff.va;
1653 } else {
1654 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1655 }
1656
1657 if (remainder > 0) {
1658 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1659
1660 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1661 __func__, start_byte, start_byte + remainder);
1662 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1663 }
1664
1665 return I40E_SUCCESS;
1666 }
1667
1668 /**
1669 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1670 * @hw: pointer to hardware structure
1671 * @cmd: pointer to nvm update command buffer
1672 * @bytes: pointer to the data buffer
1673 * @perrno: pointer to return error code
1674 *
1675 * cmd structure contains identifiers and data buffer
1676 **/
i40e_nvmupd_get_aq_event(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1677 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1678 struct i40e_nvm_access *cmd,
1679 u8 *bytes, int *perrno)
1680 {
1681 u32 aq_total_len;
1682 u32 aq_desc_len;
1683
1684 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1685
1686 aq_desc_len = sizeof(struct i40e_aq_desc);
1687 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1688
1689 /* check copylength range */
1690 if (cmd->data_size > aq_total_len) {
1691 i40e_debug(hw, I40E_DEBUG_NVM,
1692 "%s: copy length %d too big, trimming to %d\n",
1693 __func__, cmd->data_size, aq_total_len);
1694 cmd->data_size = aq_total_len;
1695 }
1696
1697 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1698 I40E_NONDMA_TO_NONDMA);
1699
1700 return I40E_SUCCESS;
1701 }
1702
1703 /**
1704 * i40e_nvmupd_nvm_read - Read NVM
1705 * @hw: pointer to hardware structure
1706 * @cmd: pointer to nvm update command buffer
1707 * @bytes: pointer to the data buffer
1708 * @perrno: pointer to return error code
1709 *
1710 * cmd structure contains identifiers and data buffer
1711 **/
i40e_nvmupd_nvm_read(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1712 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1713 struct i40e_nvm_access *cmd,
1714 u8 *bytes, int *perrno)
1715 {
1716 struct i40e_asq_cmd_details cmd_details;
1717 enum i40e_status_code status;
1718 u8 module, transaction;
1719 bool last;
1720
1721 transaction = i40e_nvmupd_get_transaction(cmd->config);
1722 module = i40e_nvmupd_get_module(cmd->config);
1723 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1724
1725 memset(&cmd_details, 0, sizeof(cmd_details));
1726 cmd_details.wb_desc = &hw->nvm_wb_desc;
1727
1728 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1729 bytes, last, &cmd_details);
1730 if (status) {
1731 i40e_debug(hw, I40E_DEBUG_NVM,
1732 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1733 module, cmd->offset, cmd->data_size);
1734 i40e_debug(hw, I40E_DEBUG_NVM,
1735 "i40e_nvmupd_nvm_read status %d aq %d\n",
1736 status, hw->aq.asq_last_status);
1737 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1738 }
1739
1740 return status;
1741 }
1742
1743 /**
1744 * i40e_nvmupd_nvm_erase - Erase an NVM module
1745 * @hw: pointer to hardware structure
1746 * @cmd: pointer to nvm update command buffer
1747 * @perrno: pointer to return error code
1748 *
1749 * module, offset, data_size and data are in cmd structure
1750 **/
i40e_nvmupd_nvm_erase(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1751 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1752 struct i40e_nvm_access *cmd,
1753 int *perrno)
1754 {
1755 enum i40e_status_code status = I40E_SUCCESS;
1756 struct i40e_asq_cmd_details cmd_details;
1757 u8 module, transaction;
1758 bool last;
1759
1760 transaction = i40e_nvmupd_get_transaction(cmd->config);
1761 module = i40e_nvmupd_get_module(cmd->config);
1762 last = (transaction & I40E_NVM_LCB);
1763
1764 memset(&cmd_details, 0, sizeof(cmd_details));
1765 cmd_details.wb_desc = &hw->nvm_wb_desc;
1766
1767 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1768 last, &cmd_details);
1769 if (status) {
1770 i40e_debug(hw, I40E_DEBUG_NVM,
1771 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1772 module, cmd->offset, cmd->data_size);
1773 i40e_debug(hw, I40E_DEBUG_NVM,
1774 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1775 status, hw->aq.asq_last_status);
1776 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1777 }
1778
1779 return status;
1780 }
1781
1782 /**
1783 * i40e_nvmupd_nvm_write - Write NVM
1784 * @hw: pointer to hardware structure
1785 * @cmd: pointer to nvm update command buffer
1786 * @bytes: pointer to the data buffer
1787 * @perrno: pointer to return error code
1788 *
1789 * module, offset, data_size and data are in cmd structure
1790 **/
i40e_nvmupd_nvm_write(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1791 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1792 struct i40e_nvm_access *cmd,
1793 u8 *bytes, int *perrno)
1794 {
1795 enum i40e_status_code status = I40E_SUCCESS;
1796 struct i40e_asq_cmd_details cmd_details;
1797 u8 module, transaction;
1798 u8 preservation_flags;
1799 bool last;
1800
1801 transaction = i40e_nvmupd_get_transaction(cmd->config);
1802 module = i40e_nvmupd_get_module(cmd->config);
1803 last = (transaction & I40E_NVM_LCB);
1804 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1805
1806 memset(&cmd_details, 0, sizeof(cmd_details));
1807 cmd_details.wb_desc = &hw->nvm_wb_desc;
1808
1809 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1810 (u16)cmd->data_size, bytes, last,
1811 preservation_flags, &cmd_details);
1812 if (status) {
1813 i40e_debug(hw, I40E_DEBUG_NVM,
1814 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1815 module, cmd->offset, cmd->data_size);
1816 i40e_debug(hw, I40E_DEBUG_NVM,
1817 "i40e_nvmupd_nvm_write status %d aq %d\n",
1818 status, hw->aq.asq_last_status);
1819 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1820 }
1821
1822 return status;
1823 }
1824