1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/delay.h> 6 #include "i40e_alloc.h" 7 #include "i40e_prototype.h" 8 9 /** 10 * i40e_init_nvm - Initialize NVM function pointers 11 * @hw: pointer to the HW structure 12 * 13 * Setup the function pointers and the NVM info structure. Should be called 14 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 15 * Please notice that the NVM term is used here (& in all methods covered 16 * in this file) as an equivalent of the FLASH part mapped into the SR. 17 * We are accessing FLASH always thru the Shadow RAM. 18 **/ 19 int i40e_init_nvm(struct i40e_hw *hw) 20 { 21 struct i40e_nvm_info *nvm = &hw->nvm; 22 int ret_code = 0; 23 u32 fla, gens; 24 u8 sr_size; 25 26 /* The SR size is stored regardless of the nvm programming mode 27 * as the blank mode may be used in the factory line. 28 */ 29 gens = rd32(hw, I40E_GLNVM_GENS); 30 sr_size = FIELD_GET(I40E_GLNVM_GENS_SR_SIZE_MASK, gens); 31 /* Switching to words (sr_size contains power of 2KB) */ 32 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; 33 34 /* Check if we are in the normal or blank NVM programming mode */ 35 fla = rd32(hw, I40E_GLNVM_FLA); 36 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ 37 /* Max NVM timeout */ 38 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 39 nvm->blank_nvm_mode = false; 40 } else { /* Blank programming mode */ 41 nvm->blank_nvm_mode = true; 42 ret_code = -EIO; 43 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); 44 } 45 46 return ret_code; 47 } 48 49 /** 50 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership 51 * @hw: pointer to the HW structure 52 * @access: NVM access type (read or write) 53 * 54 * This function will request NVM ownership for reading 55 * via the proper Admin Command. 56 **/ 57 int i40e_acquire_nvm(struct i40e_hw *hw, 58 enum i40e_aq_resource_access_type access) 59 { 60 u64 gtime, timeout; 61 u64 time_left = 0; 62 int ret_code = 0; 63 64 if (hw->nvm.blank_nvm_mode) 65 goto i40e_i40e_acquire_nvm_exit; 66 67 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 68 0, &time_left, NULL); 69 /* Reading the Global Device Timer */ 70 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 71 72 /* Store the timeout */ 73 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; 74 75 if (ret_code) 76 i40e_debug(hw, I40E_DEBUG_NVM, 77 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", 78 access, time_left, ret_code, hw->aq.asq_last_status); 79 80 if (ret_code && time_left) { 81 /* Poll until the current NVM owner timeouts */ 82 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; 83 while ((gtime < timeout) && time_left) { 84 usleep_range(10000, 20000); 85 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 86 ret_code = i40e_aq_request_resource(hw, 87 I40E_NVM_RESOURCE_ID, 88 access, 0, &time_left, 89 NULL); 90 if (!ret_code) { 91 hw->nvm.hw_semaphore_timeout = 92 I40E_MS_TO_GTIME(time_left) + gtime; 93 break; 94 } 95 } 96 if (ret_code) { 97 hw->nvm.hw_semaphore_timeout = 0; 98 i40e_debug(hw, I40E_DEBUG_NVM, 99 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", 100 time_left, ret_code, hw->aq.asq_last_status); 101 } 102 } 103 104 i40e_i40e_acquire_nvm_exit: 105 return ret_code; 106 } 107 108 /** 109 * i40e_release_nvm - Generic request for releasing the NVM ownership 110 * @hw: pointer to the HW structure 111 * 112 * This function will release NVM resource via the proper Admin Command. 113 **/ 114 void i40e_release_nvm(struct i40e_hw *hw) 115 { 116 u32 total_delay = 0; 117 int ret_code = 0; 118 119 if (hw->nvm.blank_nvm_mode) 120 return; 121 122 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 123 124 /* there are some rare cases when trying to release the resource 125 * results in an admin Q timeout, so handle them correctly 126 */ 127 while ((ret_code == -EIO) && 128 (total_delay < hw->aq.asq_cmd_timeout)) { 129 usleep_range(1000, 2000); 130 ret_code = i40e_aq_release_resource(hw, 131 I40E_NVM_RESOURCE_ID, 132 0, NULL); 133 total_delay++; 134 } 135 } 136 137 /** 138 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit 139 * @hw: pointer to the HW structure 140 * 141 * Polls the SRCTL Shadow RAM register done bit. 142 **/ 143 static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 144 { 145 int ret_code = -EIO; 146 u32 srctl, wait_cnt; 147 148 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ 149 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 150 srctl = rd32(hw, I40E_GLNVM_SRCTL); 151 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 152 ret_code = 0; 153 break; 154 } 155 udelay(5); 156 } 157 if (ret_code == -EIO) 158 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); 159 return ret_code; 160 } 161 162 /** 163 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register 164 * @hw: pointer to the HW structure 165 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 166 * @data: word read from the Shadow RAM 167 * 168 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 169 **/ 170 static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 171 u16 *data) 172 { 173 int ret_code = -EIO; 174 u32 sr_reg; 175 176 if (offset >= hw->nvm.sr_size) { 177 i40e_debug(hw, I40E_DEBUG_NVM, 178 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 179 offset, hw->nvm.sr_size); 180 ret_code = -EINVAL; 181 goto read_nvm_exit; 182 } 183 184 /* Poll the done bit first */ 185 ret_code = i40e_poll_sr_srctl_done_bit(hw); 186 if (!ret_code) { 187 /* Write the address and start reading */ 188 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 189 BIT(I40E_GLNVM_SRCTL_START_SHIFT); 190 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 191 192 /* Poll I40E_GLNVM_SRCTL until the done bit is set */ 193 ret_code = i40e_poll_sr_srctl_done_bit(hw); 194 if (!ret_code) { 195 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 196 *data = FIELD_GET(I40E_GLNVM_SRDATA_RDDATA_MASK, 197 sr_reg); 198 } 199 } 200 if (ret_code) 201 i40e_debug(hw, I40E_DEBUG_NVM, 202 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", 203 offset); 204 205 read_nvm_exit: 206 return ret_code; 207 } 208 209 /** 210 * i40e_read_nvm_aq - Read Shadow RAM. 211 * @hw: pointer to the HW structure. 212 * @module_pointer: module pointer location in words from the NVM beginning 213 * @offset: offset in words from module start 214 * @words: number of words to read 215 * @data: buffer with words to read to the Shadow RAM 216 * @last_command: tells the AdminQ that this is the last command 217 * 218 * Reads a 16 bit words buffer to the Shadow RAM using the admin command. 219 **/ 220 static int i40e_read_nvm_aq(struct i40e_hw *hw, 221 u8 module_pointer, u32 offset, 222 u16 words, void *data, 223 bool last_command) 224 { 225 struct i40e_asq_cmd_details cmd_details; 226 int ret_code = -EIO; 227 228 memset(&cmd_details, 0, sizeof(cmd_details)); 229 cmd_details.wb_desc = &hw->nvm_wb_desc; 230 231 /* Here we are checking the SR limit only for the flat memory model. 232 * We cannot do it for the module-based model, as we did not acquire 233 * the NVM resource yet (we cannot get the module pointer value). 234 * Firmware will check the module-based model. 235 */ 236 if ((offset + words) > hw->nvm.sr_size) 237 i40e_debug(hw, I40E_DEBUG_NVM, 238 "NVM read error: offset %d beyond Shadow RAM limit %d\n", 239 (offset + words), hw->nvm.sr_size); 240 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 241 /* We can read only up to 4KB (one sector), in one AQ write */ 242 i40e_debug(hw, I40E_DEBUG_NVM, 243 "NVM read fail error: tried to read %d words, limit is %d.\n", 244 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 245 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 246 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 247 /* A single read cannot spread over two sectors */ 248 i40e_debug(hw, I40E_DEBUG_NVM, 249 "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", 250 offset, words); 251 else 252 ret_code = i40e_aq_read_nvm(hw, module_pointer, 253 2 * offset, /*bytes*/ 254 2 * words, /*bytes*/ 255 data, last_command, &cmd_details); 256 257 return ret_code; 258 } 259 260 /** 261 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ 262 * @hw: pointer to the HW structure 263 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 264 * @data: word read from the Shadow RAM 265 * 266 * Reads one 16 bit word from the Shadow RAM using the AdminQ 267 **/ 268 static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 269 u16 *data) 270 { 271 int ret_code = -EIO; 272 273 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); 274 *data = le16_to_cpu(*(__le16 *)data); 275 276 return ret_code; 277 } 278 279 /** 280 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking 281 * @hw: pointer to the HW structure 282 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 283 * @data: word read from the Shadow RAM 284 * 285 * Reads one 16 bit word from the Shadow RAM. 286 * 287 * Do not use this function except in cases where the nvm lock is already 288 * taken via i40e_acquire_nvm(). 289 **/ 290 static int __i40e_read_nvm_word(struct i40e_hw *hw, 291 u16 offset, u16 *data) 292 { 293 if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) 294 return i40e_read_nvm_word_aq(hw, offset, data); 295 296 return i40e_read_nvm_word_srctl(hw, offset, data); 297 } 298 299 /** 300 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary 301 * @hw: pointer to the HW structure 302 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 303 * @data: word read from the Shadow RAM 304 * 305 * Reads one 16 bit word from the Shadow RAM. 306 **/ 307 int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 308 u16 *data) 309 { 310 int ret_code = 0; 311 312 if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) 313 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 314 if (ret_code) 315 return ret_code; 316 317 ret_code = __i40e_read_nvm_word(hw, offset, data); 318 319 if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps)) 320 i40e_release_nvm(hw); 321 322 return ret_code; 323 } 324 325 /** 326 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location 327 * @hw: Pointer to the HW structure 328 * @module_ptr: Pointer to module in words with respect to NVM beginning 329 * @module_offset: Offset in words from module start 330 * @data_offset: Offset in words from reading data area start 331 * @words_data_size: Words to read from NVM 332 * @data_ptr: Pointer to memory location where resulting buffer will be stored 333 **/ 334 int i40e_read_nvm_module_data(struct i40e_hw *hw, 335 u8 module_ptr, 336 u16 module_offset, 337 u16 data_offset, 338 u16 words_data_size, 339 u16 *data_ptr) 340 { 341 u16 specific_ptr = 0; 342 u16 ptr_value = 0; 343 u32 offset = 0; 344 int status; 345 346 if (module_ptr != 0) { 347 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); 348 if (status) { 349 i40e_debug(hw, I40E_DEBUG_ALL, 350 "Reading nvm word failed.Error code: %d.\n", 351 status); 352 return -EIO; 353 } 354 } 355 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF 356 #define I40E_NVM_INVALID_VAL 0xFFFF 357 358 /* Pointer not initialized */ 359 if (ptr_value == I40E_NVM_INVALID_PTR_VAL || 360 ptr_value == I40E_NVM_INVALID_VAL) { 361 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); 362 return -EINVAL; 363 } 364 365 /* Check whether the module is in SR mapped area or outside */ 366 if (ptr_value & I40E_PTR_TYPE) { 367 /* Pointer points outside of the Shared RAM mapped area */ 368 i40e_debug(hw, I40E_DEBUG_ALL, 369 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); 370 371 return -EINVAL; 372 } else { 373 /* Read from the Shadow RAM */ 374 375 status = i40e_read_nvm_word(hw, ptr_value + module_offset, 376 &specific_ptr); 377 if (status) { 378 i40e_debug(hw, I40E_DEBUG_ALL, 379 "Reading nvm word failed.Error code: %d.\n", 380 status); 381 return -EIO; 382 } 383 384 offset = ptr_value + module_offset + specific_ptr + 385 data_offset; 386 387 status = i40e_read_nvm_buffer(hw, offset, &words_data_size, 388 data_ptr); 389 if (status) { 390 i40e_debug(hw, I40E_DEBUG_ALL, 391 "Reading nvm buffer failed.Error code: %d.\n", 392 status); 393 } 394 } 395 396 return status; 397 } 398 399 /** 400 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 401 * @hw: pointer to the HW structure 402 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 403 * @words: (in) number of words to read; (out) number of words actually read 404 * @data: words read from the Shadow RAM 405 * 406 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 407 * method. The buffer read is preceded by the NVM ownership take 408 * and followed by the release. 409 **/ 410 static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 411 u16 *words, u16 *data) 412 { 413 int ret_code = 0; 414 u16 index, word; 415 416 /* Loop thru the selected region */ 417 for (word = 0; word < *words; word++) { 418 index = offset + word; 419 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); 420 if (ret_code) 421 break; 422 } 423 424 /* Update the number of words read from the Shadow RAM */ 425 *words = word; 426 427 return ret_code; 428 } 429 430 /** 431 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ 432 * @hw: pointer to the HW structure 433 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 434 * @words: (in) number of words to read; (out) number of words actually read 435 * @data: words read from the Shadow RAM 436 * 437 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() 438 * method. The buffer read is preceded by the NVM ownership take 439 * and followed by the release. 440 **/ 441 static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 442 u16 *words, u16 *data) 443 { 444 bool last_cmd = false; 445 u16 words_read = 0; 446 u16 read_size; 447 int ret_code; 448 u16 i = 0; 449 450 do { 451 /* Calculate number of bytes we should read in this step. 452 * FVL AQ do not allow to read more than one page at a time or 453 * to cross page boundaries. 454 */ 455 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) 456 read_size = min(*words, 457 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - 458 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); 459 else 460 read_size = min((*words - words_read), 461 I40E_SR_SECTOR_SIZE_IN_WORDS); 462 463 /* Check if this is last command, if so set proper flag */ 464 if ((words_read + read_size) >= *words) 465 last_cmd = true; 466 467 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, 468 data + words_read, last_cmd); 469 if (ret_code) 470 goto read_nvm_buffer_aq_exit; 471 472 /* Increment counter for words already read and move offset to 473 * new read location 474 */ 475 words_read += read_size; 476 offset += read_size; 477 } while (words_read < *words); 478 479 for (i = 0; i < *words; i++) 480 data[i] = le16_to_cpu(((__le16 *)data)[i]); 481 482 read_nvm_buffer_aq_exit: 483 *words = words_read; 484 return ret_code; 485 } 486 487 /** 488 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock 489 * @hw: pointer to the HW structure 490 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 491 * @words: (in) number of words to read; (out) number of words actually read 492 * @data: words read from the Shadow RAM 493 * 494 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 495 * method. 496 **/ 497 static int __i40e_read_nvm_buffer(struct i40e_hw *hw, 498 u16 offset, u16 *words, 499 u16 *data) 500 { 501 if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) 502 return i40e_read_nvm_buffer_aq(hw, offset, words, data); 503 504 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 505 } 506 507 /** 508 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary 509 * @hw: pointer to the HW structure 510 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 511 * @words: (in) number of words to read; (out) number of words actually read 512 * @data: words read from the Shadow RAM 513 * 514 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 515 * method. The buffer read is preceded by the NVM ownership take 516 * and followed by the release. 517 **/ 518 int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 519 u16 *words, u16 *data) 520 { 521 int ret_code = 0; 522 523 if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) { 524 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 525 if (!ret_code) { 526 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, 527 data); 528 i40e_release_nvm(hw); 529 } 530 } else { 531 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); 532 } 533 534 return ret_code; 535 } 536 537 /** 538 * i40e_write_nvm_aq - Writes Shadow RAM. 539 * @hw: pointer to the HW structure. 540 * @module_pointer: module pointer location in words from the NVM beginning 541 * @offset: offset in words from module start 542 * @words: number of words to write 543 * @data: buffer with words to write to the Shadow RAM 544 * @last_command: tells the AdminQ that this is the last command 545 * 546 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 547 **/ 548 static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 549 u32 offset, u16 words, void *data, 550 bool last_command) 551 { 552 struct i40e_asq_cmd_details cmd_details; 553 int ret_code = -EIO; 554 555 memset(&cmd_details, 0, sizeof(cmd_details)); 556 cmd_details.wb_desc = &hw->nvm_wb_desc; 557 558 /* Here we are checking the SR limit only for the flat memory model. 559 * We cannot do it for the module-based model, as we did not acquire 560 * the NVM resource yet (we cannot get the module pointer value). 561 * Firmware will check the module-based model. 562 */ 563 if ((offset + words) > hw->nvm.sr_size) 564 i40e_debug(hw, I40E_DEBUG_NVM, 565 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 566 (offset + words), hw->nvm.sr_size); 567 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 568 /* We can write only up to 4KB (one sector), in one AQ write */ 569 i40e_debug(hw, I40E_DEBUG_NVM, 570 "NVM write fail error: tried to write %d words, limit is %d.\n", 571 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 572 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 573 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 574 /* A single write cannot spread over two sectors */ 575 i40e_debug(hw, I40E_DEBUG_NVM, 576 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 577 offset, words); 578 else 579 ret_code = i40e_aq_update_nvm(hw, module_pointer, 580 2 * offset, /*bytes*/ 581 2 * words, /*bytes*/ 582 data, last_command, 0, 583 &cmd_details); 584 585 return ret_code; 586 } 587 588 /** 589 * i40e_calc_nvm_checksum - Calculates and returns the checksum 590 * @hw: pointer to hardware structure 591 * @checksum: pointer to the checksum 592 * 593 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 594 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 595 * is customer specific and unknown. Therefore, this function skips all maximum 596 * possible size of VPD (1kB). 597 **/ 598 static int i40e_calc_nvm_checksum(struct i40e_hw *hw, 599 u16 *checksum) 600 { 601 struct i40e_virt_mem vmem; 602 u16 pcie_alt_module = 0; 603 u16 checksum_local = 0; 604 u16 vpd_module = 0; 605 int ret_code; 606 u16 *data; 607 u16 i = 0; 608 609 ret_code = i40e_allocate_virt_mem(hw, &vmem, 610 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); 611 if (ret_code) 612 goto i40e_calc_nvm_checksum_exit; 613 data = (u16 *)vmem.va; 614 615 /* read pointer to VPD area */ 616 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 617 if (ret_code) { 618 ret_code = -EIO; 619 goto i40e_calc_nvm_checksum_exit; 620 } 621 622 /* read pointer to PCIe Alt Auto-load module */ 623 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 624 &pcie_alt_module); 625 if (ret_code) { 626 ret_code = -EIO; 627 goto i40e_calc_nvm_checksum_exit; 628 } 629 630 /* Calculate SW checksum that covers the whole 64kB shadow RAM 631 * except the VPD and PCIe ALT Auto-load modules 632 */ 633 for (i = 0; i < hw->nvm.sr_size; i++) { 634 /* Read SR page */ 635 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { 636 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; 637 638 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); 639 if (ret_code) { 640 ret_code = -EIO; 641 goto i40e_calc_nvm_checksum_exit; 642 } 643 } 644 645 /* Skip Checksum word */ 646 if (i == I40E_SR_SW_CHECKSUM_WORD) 647 continue; 648 /* Skip VPD module (convert byte size to word count) */ 649 if ((i >= (u32)vpd_module) && 650 (i < ((u32)vpd_module + 651 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { 652 continue; 653 } 654 /* Skip PCIe ALT module (convert byte size to word count) */ 655 if ((i >= (u32)pcie_alt_module) && 656 (i < ((u32)pcie_alt_module + 657 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { 658 continue; 659 } 660 661 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; 662 } 663 664 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 665 666 i40e_calc_nvm_checksum_exit: 667 i40e_free_virt_mem(hw, &vmem); 668 return ret_code; 669 } 670 671 /** 672 * i40e_update_nvm_checksum - Updates the NVM checksum 673 * @hw: pointer to hardware structure 674 * 675 * NVM ownership must be acquired before calling this function and released 676 * on ARQ completion event reception by caller. 677 * This function will commit SR to NVM. 678 **/ 679 int i40e_update_nvm_checksum(struct i40e_hw *hw) 680 { 681 __le16 le_sum; 682 int ret_code; 683 u16 checksum; 684 685 ret_code = i40e_calc_nvm_checksum(hw, &checksum); 686 if (!ret_code) { 687 le_sum = cpu_to_le16(checksum); 688 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 689 1, &le_sum, true); 690 } 691 692 return ret_code; 693 } 694 695 /** 696 * i40e_validate_nvm_checksum - Validate EEPROM checksum 697 * @hw: pointer to hardware structure 698 * @checksum: calculated checksum 699 * 700 * Performs checksum calculation and validates the NVM SW checksum. If the 701 * caller does not need checksum, the value can be NULL. 702 **/ 703 int i40e_validate_nvm_checksum(struct i40e_hw *hw, 704 u16 *checksum) 705 { 706 u16 checksum_local = 0; 707 u16 checksum_sr = 0; 708 int ret_code = 0; 709 710 /* We must acquire the NVM lock in order to correctly synchronize the 711 * NVM accesses across multiple PFs. Without doing so it is possible 712 * for one of the PFs to read invalid data potentially indicating that 713 * the checksum is invalid. 714 */ 715 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 716 if (ret_code) 717 return ret_code; 718 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); 719 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); 720 i40e_release_nvm(hw); 721 if (ret_code) 722 return ret_code; 723 724 /* Verify read checksum from EEPROM is the same as 725 * calculated checksum 726 */ 727 if (checksum_local != checksum_sr) 728 ret_code = -EIO; 729 730 /* If the user cares, return the calculated checksum */ 731 if (checksum) 732 *checksum = checksum_local; 733 734 return ret_code; 735 } 736 737 static u8 i40e_nvmupd_get_module(u32 val) 738 { 739 return (u8)(val & I40E_NVM_MOD_PNT_MASK); 740 } 741 static inline u8 i40e_nvmupd_get_transaction(u32 val) 742 { 743 return FIELD_GET(I40E_NVM_TRANS_MASK, val); 744 } 745 746 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) 747 { 748 return FIELD_GET(I40E_NVM_PRESERVATION_FLAGS_MASK, val); 749 } 750 751 static const char * const i40e_nvm_update_state_str[] = { 752 "I40E_NVMUPD_INVALID", 753 "I40E_NVMUPD_READ_CON", 754 "I40E_NVMUPD_READ_SNT", 755 "I40E_NVMUPD_READ_LCB", 756 "I40E_NVMUPD_READ_SA", 757 "I40E_NVMUPD_WRITE_ERA", 758 "I40E_NVMUPD_WRITE_CON", 759 "I40E_NVMUPD_WRITE_SNT", 760 "I40E_NVMUPD_WRITE_LCB", 761 "I40E_NVMUPD_WRITE_SA", 762 "I40E_NVMUPD_CSUM_CON", 763 "I40E_NVMUPD_CSUM_SA", 764 "I40E_NVMUPD_CSUM_LCB", 765 "I40E_NVMUPD_STATUS", 766 "I40E_NVMUPD_EXEC_AQ", 767 "I40E_NVMUPD_GET_AQ_RESULT", 768 "I40E_NVMUPD_GET_AQ_EVENT", 769 }; 770 771 /** 772 * i40e_nvmupd_validate_command - Validate given command 773 * @hw: pointer to hardware structure 774 * @cmd: pointer to nvm update command buffer 775 * @perrno: pointer to return error code 776 * 777 * Return one of the valid command types or I40E_NVMUPD_INVALID 778 **/ 779 static enum i40e_nvmupd_cmd 780 i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, 781 int *perrno) 782 { 783 enum i40e_nvmupd_cmd upd_cmd; 784 u8 module, transaction; 785 786 /* anything that doesn't match a recognized case is an error */ 787 upd_cmd = I40E_NVMUPD_INVALID; 788 789 transaction = i40e_nvmupd_get_transaction(cmd->config); 790 module = i40e_nvmupd_get_module(cmd->config); 791 792 /* limits on data size */ 793 if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) { 794 i40e_debug(hw, I40E_DEBUG_NVM, 795 "%s data_size %d\n", __func__, cmd->data_size); 796 *perrno = -EFAULT; 797 return I40E_NVMUPD_INVALID; 798 } 799 800 switch (cmd->command) { 801 case I40E_NVM_READ: 802 switch (transaction) { 803 case I40E_NVM_CON: 804 upd_cmd = I40E_NVMUPD_READ_CON; 805 break; 806 case I40E_NVM_SNT: 807 upd_cmd = I40E_NVMUPD_READ_SNT; 808 break; 809 case I40E_NVM_LCB: 810 upd_cmd = I40E_NVMUPD_READ_LCB; 811 break; 812 case I40E_NVM_SA: 813 upd_cmd = I40E_NVMUPD_READ_SA; 814 break; 815 case I40E_NVM_EXEC: 816 if (module == 0xf) 817 upd_cmd = I40E_NVMUPD_STATUS; 818 else if (module == 0) 819 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; 820 break; 821 case I40E_NVM_AQE: 822 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; 823 break; 824 } 825 break; 826 827 case I40E_NVM_WRITE: 828 switch (transaction) { 829 case I40E_NVM_CON: 830 upd_cmd = I40E_NVMUPD_WRITE_CON; 831 break; 832 case I40E_NVM_SNT: 833 upd_cmd = I40E_NVMUPD_WRITE_SNT; 834 break; 835 case I40E_NVM_LCB: 836 upd_cmd = I40E_NVMUPD_WRITE_LCB; 837 break; 838 case I40E_NVM_SA: 839 upd_cmd = I40E_NVMUPD_WRITE_SA; 840 break; 841 case I40E_NVM_ERA: 842 upd_cmd = I40E_NVMUPD_WRITE_ERA; 843 break; 844 case I40E_NVM_CSUM: 845 upd_cmd = I40E_NVMUPD_CSUM_CON; 846 break; 847 case (I40E_NVM_CSUM | I40E_NVM_SA): 848 upd_cmd = I40E_NVMUPD_CSUM_SA; 849 break; 850 case (I40E_NVM_CSUM | I40E_NVM_LCB): 851 upd_cmd = I40E_NVMUPD_CSUM_LCB; 852 break; 853 case I40E_NVM_EXEC: 854 if (module == 0) 855 upd_cmd = I40E_NVMUPD_EXEC_AQ; 856 break; 857 } 858 break; 859 } 860 861 return upd_cmd; 862 } 863 864 /** 865 * i40e_nvmupd_nvm_erase - Erase an NVM module 866 * @hw: pointer to hardware structure 867 * @cmd: pointer to nvm update command buffer 868 * @perrno: pointer to return error code 869 * 870 * module, offset, data_size and data are in cmd structure 871 **/ 872 static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, 873 struct i40e_nvm_access *cmd, 874 int *perrno) 875 { 876 struct i40e_asq_cmd_details cmd_details; 877 u8 module, transaction; 878 int status = 0; 879 bool last; 880 881 transaction = i40e_nvmupd_get_transaction(cmd->config); 882 module = i40e_nvmupd_get_module(cmd->config); 883 last = (transaction & I40E_NVM_LCB); 884 885 memset(&cmd_details, 0, sizeof(cmd_details)); 886 cmd_details.wb_desc = &hw->nvm_wb_desc; 887 888 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 889 last, &cmd_details); 890 if (status) { 891 i40e_debug(hw, I40E_DEBUG_NVM, 892 "%s mod 0x%x off 0x%x len 0x%x\n", 893 __func__, module, cmd->offset, cmd->data_size); 894 i40e_debug(hw, I40E_DEBUG_NVM, 895 "%s status %d aq %d\n", 896 __func__, status, hw->aq.asq_last_status); 897 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 898 } 899 900 return status; 901 } 902 903 /** 904 * i40e_nvmupd_nvm_write - Write NVM 905 * @hw: pointer to hardware structure 906 * @cmd: pointer to nvm update command buffer 907 * @bytes: pointer to the data buffer 908 * @perrno: pointer to return error code 909 * 910 * module, offset, data_size and data are in cmd structure 911 **/ 912 static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, 913 struct i40e_nvm_access *cmd, 914 u8 *bytes, int *perrno) 915 { 916 struct i40e_asq_cmd_details cmd_details; 917 u8 module, transaction; 918 u8 preservation_flags; 919 int status = 0; 920 bool last; 921 922 transaction = i40e_nvmupd_get_transaction(cmd->config); 923 module = i40e_nvmupd_get_module(cmd->config); 924 last = (transaction & I40E_NVM_LCB); 925 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); 926 927 memset(&cmd_details, 0, sizeof(cmd_details)); 928 cmd_details.wb_desc = &hw->nvm_wb_desc; 929 930 status = i40e_aq_update_nvm(hw, module, cmd->offset, 931 (u16)cmd->data_size, bytes, last, 932 preservation_flags, &cmd_details); 933 if (status) { 934 i40e_debug(hw, I40E_DEBUG_NVM, 935 "%s mod 0x%x off 0x%x len 0x%x\n", 936 __func__, module, cmd->offset, cmd->data_size); 937 i40e_debug(hw, I40E_DEBUG_NVM, 938 "%s status %d aq %d\n", 939 __func__, status, hw->aq.asq_last_status); 940 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 941 } 942 943 return status; 944 } 945 946 /** 947 * i40e_nvmupd_nvm_read - Read NVM 948 * @hw: pointer to hardware structure 949 * @cmd: pointer to nvm update command buffer 950 * @bytes: pointer to the data buffer 951 * @perrno: pointer to return error code 952 * 953 * cmd structure contains identifiers and data buffer 954 **/ 955 static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, 956 struct i40e_nvm_access *cmd, 957 u8 *bytes, int *perrno) 958 { 959 struct i40e_asq_cmd_details cmd_details; 960 u8 module, transaction; 961 int status; 962 bool last; 963 964 transaction = i40e_nvmupd_get_transaction(cmd->config); 965 module = i40e_nvmupd_get_module(cmd->config); 966 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); 967 968 memset(&cmd_details, 0, sizeof(cmd_details)); 969 cmd_details.wb_desc = &hw->nvm_wb_desc; 970 971 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, 972 bytes, last, &cmd_details); 973 if (status) { 974 i40e_debug(hw, I40E_DEBUG_NVM, 975 "%s mod 0x%x off 0x%x len 0x%x\n", 976 __func__, module, cmd->offset, cmd->data_size); 977 i40e_debug(hw, I40E_DEBUG_NVM, 978 "%s status %d aq %d\n", 979 __func__, status, hw->aq.asq_last_status); 980 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 981 } 982 983 return status; 984 } 985 986 /** 987 * i40e_nvmupd_exec_aq - Run an AQ command 988 * @hw: pointer to hardware structure 989 * @cmd: pointer to nvm update command buffer 990 * @bytes: pointer to the data buffer 991 * @perrno: pointer to return error code 992 * 993 * cmd structure contains identifiers and data buffer 994 **/ 995 static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, 996 struct i40e_nvm_access *cmd, 997 u8 *bytes, int *perrno) 998 { 999 struct i40e_asq_cmd_details cmd_details; 1000 struct i40e_aq_desc *aq_desc; 1001 u32 buff_size = 0; 1002 u8 *buff = NULL; 1003 u32 aq_desc_len; 1004 u32 aq_data_len; 1005 int status; 1006 1007 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1008 if (cmd->offset == 0xffff) 1009 return 0; 1010 1011 memset(&cmd_details, 0, sizeof(cmd_details)); 1012 cmd_details.wb_desc = &hw->nvm_wb_desc; 1013 1014 aq_desc_len = sizeof(struct i40e_aq_desc); 1015 memset(&hw->nvm_wb_desc, 0, aq_desc_len); 1016 1017 /* get the aq descriptor */ 1018 if (cmd->data_size < aq_desc_len) { 1019 i40e_debug(hw, I40E_DEBUG_NVM, 1020 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", 1021 cmd->data_size, aq_desc_len); 1022 *perrno = -EINVAL; 1023 return -EINVAL; 1024 } 1025 aq_desc = (struct i40e_aq_desc *)bytes; 1026 1027 /* if data buffer needed, make sure it's ready */ 1028 aq_data_len = cmd->data_size - aq_desc_len; 1029 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); 1030 if (buff_size) { 1031 if (!hw->nvm_buff.va) { 1032 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, 1033 hw->aq.asq_buf_size); 1034 if (status) 1035 i40e_debug(hw, I40E_DEBUG_NVM, 1036 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", 1037 status); 1038 } 1039 1040 if (hw->nvm_buff.va) { 1041 buff = hw->nvm_buff.va; 1042 memcpy(buff, &bytes[aq_desc_len], aq_data_len); 1043 } 1044 } 1045 1046 if (cmd->offset) 1047 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); 1048 1049 /* and away we go! */ 1050 status = i40e_asq_send_command(hw, aq_desc, buff, 1051 buff_size, &cmd_details); 1052 if (status) { 1053 i40e_debug(hw, I40E_DEBUG_NVM, 1054 "%s err %pe aq_err %s\n", 1055 __func__, ERR_PTR(status), 1056 i40e_aq_str(hw, hw->aq.asq_last_status)); 1057 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1058 return status; 1059 } 1060 1061 /* should we wait for a followup event? */ 1062 if (cmd->offset) { 1063 hw->nvm_wait_opcode = cmd->offset; 1064 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1065 } 1066 1067 return status; 1068 } 1069 1070 /** 1071 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq 1072 * @hw: pointer to hardware structure 1073 * @cmd: pointer to nvm update command buffer 1074 * @bytes: pointer to the data buffer 1075 * @perrno: pointer to return error code 1076 * 1077 * cmd structure contains identifiers and data buffer 1078 **/ 1079 static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 1080 struct i40e_nvm_access *cmd, 1081 u8 *bytes, int *perrno) 1082 { 1083 u32 aq_total_len; 1084 u32 aq_desc_len; 1085 int remainder; 1086 u8 *buff; 1087 1088 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1089 1090 aq_desc_len = sizeof(struct i40e_aq_desc); 1091 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); 1092 1093 /* check offset range */ 1094 if (cmd->offset > aq_total_len) { 1095 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", 1096 __func__, cmd->offset, aq_total_len); 1097 *perrno = -EINVAL; 1098 return -EINVAL; 1099 } 1100 1101 /* check copylength range */ 1102 if (cmd->data_size > (aq_total_len - cmd->offset)) { 1103 int new_len = aq_total_len - cmd->offset; 1104 1105 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", 1106 __func__, cmd->data_size, new_len); 1107 cmd->data_size = new_len; 1108 } 1109 1110 remainder = cmd->data_size; 1111 if (cmd->offset < aq_desc_len) { 1112 u32 len = aq_desc_len - cmd->offset; 1113 1114 len = min(len, cmd->data_size); 1115 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", 1116 __func__, cmd->offset, cmd->offset + len); 1117 1118 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; 1119 memcpy(bytes, buff, len); 1120 1121 bytes += len; 1122 remainder -= len; 1123 buff = hw->nvm_buff.va; 1124 } else { 1125 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); 1126 } 1127 1128 if (remainder > 0) { 1129 int start_byte = buff - (u8 *)hw->nvm_buff.va; 1130 1131 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", 1132 __func__, start_byte, start_byte + remainder); 1133 memcpy(bytes, buff, remainder); 1134 } 1135 1136 return 0; 1137 } 1138 1139 /** 1140 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq 1141 * @hw: pointer to hardware structure 1142 * @cmd: pointer to nvm update command buffer 1143 * @bytes: pointer to the data buffer 1144 * @perrno: pointer to return error code 1145 * 1146 * cmd structure contains identifiers and data buffer 1147 **/ 1148 static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, 1149 struct i40e_nvm_access *cmd, 1150 u8 *bytes, int *perrno) 1151 { 1152 u32 aq_total_len; 1153 u32 aq_desc_len; 1154 1155 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1156 1157 aq_desc_len = sizeof(struct i40e_aq_desc); 1158 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); 1159 1160 /* check copylength range */ 1161 if (cmd->data_size > aq_total_len) { 1162 i40e_debug(hw, I40E_DEBUG_NVM, 1163 "%s: copy length %d too big, trimming to %d\n", 1164 __func__, cmd->data_size, aq_total_len); 1165 cmd->data_size = aq_total_len; 1166 } 1167 1168 memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); 1169 1170 return 0; 1171 } 1172 1173 /** 1174 * i40e_nvmupd_state_init - Handle NVM update state Init 1175 * @hw: pointer to hardware structure 1176 * @cmd: pointer to nvm update command buffer 1177 * @bytes: pointer to the data buffer 1178 * @perrno: pointer to return error code 1179 * 1180 * Process legitimate commands of the Init state and conditionally set next 1181 * state. Reject all other commands. 1182 **/ 1183 static int i40e_nvmupd_state_init(struct i40e_hw *hw, 1184 struct i40e_nvm_access *cmd, 1185 u8 *bytes, int *perrno) 1186 { 1187 enum i40e_nvmupd_cmd upd_cmd; 1188 int status = 0; 1189 1190 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1191 1192 switch (upd_cmd) { 1193 case I40E_NVMUPD_READ_SA: 1194 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 1195 if (status) { 1196 *perrno = i40e_aq_rc_to_posix(status, 1197 hw->aq.asq_last_status); 1198 } else { 1199 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1200 i40e_release_nvm(hw); 1201 } 1202 break; 1203 1204 case I40E_NVMUPD_READ_SNT: 1205 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 1206 if (status) { 1207 *perrno = i40e_aq_rc_to_posix(status, 1208 hw->aq.asq_last_status); 1209 } else { 1210 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1211 if (status) 1212 i40e_release_nvm(hw); 1213 else 1214 hw->nvmupd_state = I40E_NVMUPD_STATE_READING; 1215 } 1216 break; 1217 1218 case I40E_NVMUPD_WRITE_ERA: 1219 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1220 if (status) { 1221 *perrno = i40e_aq_rc_to_posix(status, 1222 hw->aq.asq_last_status); 1223 } else { 1224 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); 1225 if (status) { 1226 i40e_release_nvm(hw); 1227 } else { 1228 hw->nvm_release_on_done = true; 1229 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; 1230 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1231 } 1232 } 1233 break; 1234 1235 case I40E_NVMUPD_WRITE_SA: 1236 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1237 if (status) { 1238 *perrno = i40e_aq_rc_to_posix(status, 1239 hw->aq.asq_last_status); 1240 } else { 1241 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1242 if (status) { 1243 i40e_release_nvm(hw); 1244 } else { 1245 hw->nvm_release_on_done = true; 1246 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1247 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1248 } 1249 } 1250 break; 1251 1252 case I40E_NVMUPD_WRITE_SNT: 1253 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1254 if (status) { 1255 *perrno = i40e_aq_rc_to_posix(status, 1256 hw->aq.asq_last_status); 1257 } else { 1258 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1259 if (status) { 1260 i40e_release_nvm(hw); 1261 } else { 1262 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1263 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1264 } 1265 } 1266 break; 1267 1268 case I40E_NVMUPD_CSUM_SA: 1269 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1270 if (status) { 1271 *perrno = i40e_aq_rc_to_posix(status, 1272 hw->aq.asq_last_status); 1273 } else { 1274 status = i40e_update_nvm_checksum(hw); 1275 if (status) { 1276 *perrno = hw->aq.asq_last_status ? 1277 i40e_aq_rc_to_posix(status, 1278 hw->aq.asq_last_status) : 1279 -EIO; 1280 i40e_release_nvm(hw); 1281 } else { 1282 hw->nvm_release_on_done = true; 1283 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1284 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1285 } 1286 } 1287 break; 1288 1289 case I40E_NVMUPD_EXEC_AQ: 1290 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); 1291 break; 1292 1293 case I40E_NVMUPD_GET_AQ_RESULT: 1294 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); 1295 break; 1296 1297 case I40E_NVMUPD_GET_AQ_EVENT: 1298 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); 1299 break; 1300 1301 default: 1302 i40e_debug(hw, I40E_DEBUG_NVM, 1303 "NVMUPD: bad cmd %s in init state\n", 1304 i40e_nvm_update_state_str[upd_cmd]); 1305 status = -EIO; 1306 *perrno = -ESRCH; 1307 break; 1308 } 1309 return status; 1310 } 1311 1312 /** 1313 * i40e_nvmupd_state_reading - Handle NVM update state Reading 1314 * @hw: pointer to hardware structure 1315 * @cmd: pointer to nvm update command buffer 1316 * @bytes: pointer to the data buffer 1317 * @perrno: pointer to return error code 1318 * 1319 * NVM ownership is already held. Process legitimate commands and set any 1320 * change in state; reject all other commands. 1321 **/ 1322 static int i40e_nvmupd_state_reading(struct i40e_hw *hw, 1323 struct i40e_nvm_access *cmd, 1324 u8 *bytes, int *perrno) 1325 { 1326 enum i40e_nvmupd_cmd upd_cmd; 1327 int status = 0; 1328 1329 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1330 1331 switch (upd_cmd) { 1332 case I40E_NVMUPD_READ_SA: 1333 case I40E_NVMUPD_READ_CON: 1334 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1335 break; 1336 1337 case I40E_NVMUPD_READ_LCB: 1338 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); 1339 i40e_release_nvm(hw); 1340 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1341 break; 1342 1343 default: 1344 i40e_debug(hw, I40E_DEBUG_NVM, 1345 "NVMUPD: bad cmd %s in reading state.\n", 1346 i40e_nvm_update_state_str[upd_cmd]); 1347 status = -EOPNOTSUPP; 1348 *perrno = -ESRCH; 1349 break; 1350 } 1351 return status; 1352 } 1353 1354 /** 1355 * i40e_nvmupd_state_writing - Handle NVM update state Writing 1356 * @hw: pointer to hardware structure 1357 * @cmd: pointer to nvm update command buffer 1358 * @bytes: pointer to the data buffer 1359 * @perrno: pointer to return error code 1360 * 1361 * NVM ownership is already held. Process legitimate commands and set any 1362 * change in state; reject all other commands 1363 **/ 1364 static int i40e_nvmupd_state_writing(struct i40e_hw *hw, 1365 struct i40e_nvm_access *cmd, 1366 u8 *bytes, int *perrno) 1367 { 1368 enum i40e_nvmupd_cmd upd_cmd; 1369 bool retry_attempt = false; 1370 int status = 0; 1371 1372 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1373 1374 retry: 1375 switch (upd_cmd) { 1376 case I40E_NVMUPD_WRITE_CON: 1377 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1378 if (!status) { 1379 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1380 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1381 } 1382 break; 1383 1384 case I40E_NVMUPD_WRITE_LCB: 1385 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); 1386 if (status) { 1387 *perrno = hw->aq.asq_last_status ? 1388 i40e_aq_rc_to_posix(status, 1389 hw->aq.asq_last_status) : 1390 -EIO; 1391 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1392 } else { 1393 hw->nvm_release_on_done = true; 1394 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1395 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1396 } 1397 break; 1398 1399 case I40E_NVMUPD_CSUM_CON: 1400 /* Assumes the caller has acquired the nvm */ 1401 status = i40e_update_nvm_checksum(hw); 1402 if (status) { 1403 *perrno = hw->aq.asq_last_status ? 1404 i40e_aq_rc_to_posix(status, 1405 hw->aq.asq_last_status) : 1406 -EIO; 1407 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1408 } else { 1409 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1410 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; 1411 } 1412 break; 1413 1414 case I40E_NVMUPD_CSUM_LCB: 1415 /* Assumes the caller has acquired the nvm */ 1416 status = i40e_update_nvm_checksum(hw); 1417 if (status) { 1418 *perrno = hw->aq.asq_last_status ? 1419 i40e_aq_rc_to_posix(status, 1420 hw->aq.asq_last_status) : 1421 -EIO; 1422 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1423 } else { 1424 hw->nvm_release_on_done = true; 1425 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; 1426 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 1427 } 1428 break; 1429 1430 default: 1431 i40e_debug(hw, I40E_DEBUG_NVM, 1432 "NVMUPD: bad cmd %s in writing state.\n", 1433 i40e_nvm_update_state_str[upd_cmd]); 1434 status = -EOPNOTSUPP; 1435 *perrno = -ESRCH; 1436 break; 1437 } 1438 1439 /* In some circumstances, a multi-write transaction takes longer 1440 * than the default 3 minute timeout on the write semaphore. If 1441 * the write failed with an EBUSY status, this is likely the problem, 1442 * so here we try to reacquire the semaphore then retry the write. 1443 * We only do one retry, then give up. 1444 */ 1445 if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY && 1446 !retry_attempt) { 1447 u32 old_asq_status = hw->aq.asq_last_status; 1448 int old_status = status; 1449 u32 gtime; 1450 1451 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 1452 if (gtime >= hw->nvm.hw_semaphore_timeout) { 1453 i40e_debug(hw, I40E_DEBUG_ALL, 1454 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", 1455 gtime, hw->nvm.hw_semaphore_timeout); 1456 i40e_release_nvm(hw); 1457 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); 1458 if (status) { 1459 i40e_debug(hw, I40E_DEBUG_ALL, 1460 "NVMUPD: write semaphore reacquire failed aq_err = %d\n", 1461 hw->aq.asq_last_status); 1462 status = old_status; 1463 hw->aq.asq_last_status = old_asq_status; 1464 } else { 1465 retry_attempt = true; 1466 goto retry; 1467 } 1468 } 1469 } 1470 1471 return status; 1472 } 1473 1474 /** 1475 * i40e_nvmupd_command - Process an NVM update command 1476 * @hw: pointer to hardware structure 1477 * @cmd: pointer to nvm update command 1478 * @bytes: pointer to the data buffer 1479 * @perrno: pointer to return error code 1480 * 1481 * Dispatches command depending on what update state is current 1482 **/ 1483 int i40e_nvmupd_command(struct i40e_hw *hw, 1484 struct i40e_nvm_access *cmd, 1485 u8 *bytes, int *perrno) 1486 { 1487 enum i40e_nvmupd_cmd upd_cmd; 1488 int status; 1489 1490 /* assume success */ 1491 *perrno = 0; 1492 1493 /* early check for status command and debug msgs */ 1494 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); 1495 1496 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", 1497 i40e_nvm_update_state_str[upd_cmd], 1498 hw->nvmupd_state, 1499 hw->nvm_release_on_done, hw->nvm_wait_opcode, 1500 cmd->command, cmd->config, cmd->offset, cmd->data_size); 1501 1502 if (upd_cmd == I40E_NVMUPD_INVALID) { 1503 *perrno = -EFAULT; 1504 i40e_debug(hw, I40E_DEBUG_NVM, 1505 "i40e_nvmupd_validate_command returns %d errno %d\n", 1506 upd_cmd, *perrno); 1507 } 1508 1509 /* a status request returns immediately rather than 1510 * going into the state machine 1511 */ 1512 if (upd_cmd == I40E_NVMUPD_STATUS) { 1513 if (!cmd->data_size) { 1514 *perrno = -EFAULT; 1515 return -EINVAL; 1516 } 1517 1518 bytes[0] = hw->nvmupd_state; 1519 1520 if (cmd->data_size >= 4) { 1521 bytes[1] = 0; 1522 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; 1523 } 1524 1525 /* Clear error status on read */ 1526 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) 1527 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1528 1529 return 0; 1530 } 1531 1532 /* Clear status even it is not read and log */ 1533 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { 1534 i40e_debug(hw, I40E_DEBUG_NVM, 1535 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); 1536 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1537 } 1538 1539 /* Acquire lock to prevent race condition where adminq_task 1540 * can execute after i40e_nvmupd_nvm_read/write but before state 1541 * variables (nvm_wait_opcode, nvm_release_on_done) are updated. 1542 * 1543 * During NVMUpdate, it is observed that lock could be held for 1544 * ~5ms for most commands. However lock is held for ~60ms for 1545 * NVMUPD_CSUM_LCB command. 1546 */ 1547 mutex_lock(&hw->aq.arq_mutex); 1548 switch (hw->nvmupd_state) { 1549 case I40E_NVMUPD_STATE_INIT: 1550 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); 1551 break; 1552 1553 case I40E_NVMUPD_STATE_READING: 1554 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); 1555 break; 1556 1557 case I40E_NVMUPD_STATE_WRITING: 1558 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); 1559 break; 1560 1561 case I40E_NVMUPD_STATE_INIT_WAIT: 1562 case I40E_NVMUPD_STATE_WRITE_WAIT: 1563 /* if we need to stop waiting for an event, clear 1564 * the wait info and return before doing anything else 1565 */ 1566 if (cmd->offset == 0xffff) { 1567 i40e_nvmupd_clear_wait_state(hw); 1568 status = 0; 1569 break; 1570 } 1571 1572 status = -EBUSY; 1573 *perrno = -EBUSY; 1574 break; 1575 1576 default: 1577 /* invalid state, should never happen */ 1578 i40e_debug(hw, I40E_DEBUG_NVM, 1579 "NVMUPD: no such state %d\n", hw->nvmupd_state); 1580 status = -EOPNOTSUPP; 1581 *perrno = -ESRCH; 1582 break; 1583 } 1584 1585 mutex_unlock(&hw->aq.arq_mutex); 1586 return status; 1587 } 1588 1589 /** 1590 * i40e_nvmupd_clear_wait_state - clear wait state on hw 1591 * @hw: pointer to the hardware structure 1592 **/ 1593 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) 1594 { 1595 i40e_debug(hw, I40E_DEBUG_NVM, 1596 "NVMUPD: clearing wait on opcode 0x%04x\n", 1597 hw->nvm_wait_opcode); 1598 1599 if (hw->nvm_release_on_done) { 1600 i40e_release_nvm(hw); 1601 hw->nvm_release_on_done = false; 1602 } 1603 hw->nvm_wait_opcode = 0; 1604 1605 if (hw->aq.arq_last_status) { 1606 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; 1607 return; 1608 } 1609 1610 switch (hw->nvmupd_state) { 1611 case I40E_NVMUPD_STATE_INIT_WAIT: 1612 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1613 break; 1614 1615 case I40E_NVMUPD_STATE_WRITE_WAIT: 1616 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1617 break; 1618 1619 default: 1620 break; 1621 } 1622 } 1623 1624 /** 1625 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1626 * @hw: pointer to the hardware structure 1627 * @opcode: the event that just happened 1628 * @desc: AdminQ descriptor 1629 **/ 1630 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, 1631 struct i40e_aq_desc *desc) 1632 { 1633 u32 aq_desc_len = sizeof(struct i40e_aq_desc); 1634 1635 if (opcode == hw->nvm_wait_opcode) { 1636 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); 1637 i40e_nvmupd_clear_wait_state(hw); 1638 } 1639 } 1640