1 /****************************************************************************** 2 3 Copyright (c) 2013-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "i40e_prototype.h" 36 37 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 38 u16 *data); 39 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 40 u16 *data); 41 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 42 u16 *words, u16 *data); 43 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 44 u16 *words, u16 *data); 45 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 46 u32 offset, u16 words, void *data, 47 bool last_command); 48 49 /** 50 * i40e_init_nvm_ops - Initialize NVM function pointers 51 * @hw: pointer to the HW structure 52 * 53 * Setup the function pointers and the NVM info structure. Should be called 54 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 55 * Please notice that the NVM term is used here (& in all methods covered 56 * in this file) as an equivalent of the FLASH part mapped into the SR. 57 * We are accessing FLASH always thru the Shadow RAM. 58 **/ 59 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw) 60 { 61 struct i40e_nvm_info *nvm = &hw->nvm; 62 enum i40e_status_code ret_code = I40E_SUCCESS; 63 u32 fla, gens; 64 u8 sr_size; 65 66 DEBUGFUNC("i40e_init_nvm"); 67 68 /* The SR size is stored regardless of the nvm programming mode 69 * as the blank mode may be used in the factory line. 70 */ 71 gens = rd32(hw, I40E_GLNVM_GENS); 72 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 73 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 74 /* Switching to words (sr_size contains power of 2KB) */ 75 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; 76 77 /* Check if we are in the normal or blank NVM programming mode */ 78 fla = rd32(hw, I40E_GLNVM_FLA); 79 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ 80 /* Max NVM timeout */ 81 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 82 nvm->blank_nvm_mode = FALSE; 83 } else { /* Blank programming mode */ 84 nvm->blank_nvm_mode = TRUE; 85 ret_code = I40E_ERR_NVM_BLANK_MODE; 86 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); 87 } 88 89 return ret_code; 90 } 91 92 /** 93 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership 94 * @hw: pointer to the HW structure 95 * @access: NVM access type (read or write) 96 * 97 * This function will request NVM ownership for reading 98 * via the proper Admin Command. 99 **/ 100 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, 101 enum i40e_aq_resource_access_type access) 102 { 103 enum i40e_status_code ret_code = I40E_SUCCESS; 104 u64 gtime, timeout; 105 u64 time_left = 0; 106 107 DEBUGFUNC("i40e_acquire_nvm"); 108 109 if (hw->nvm.blank_nvm_mode) 110 goto i40e_i40e_acquire_nvm_exit; 111 112 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 113 0, &time_left, NULL); 114 /* Reading the Global Device Timer */ 115 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 116 117 /* Store the timeout */ 118 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; 119 120 if (ret_code) 121 i40e_debug(hw, I40E_DEBUG_NVM, 122 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", 123 access, time_left, ret_code, hw->aq.asq_last_status); 124 125 if (ret_code && time_left) { 126 /* Poll until the current NVM owner timeouts */ 127 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; 128 while ((gtime < timeout) && time_left) { 129 i40e_msec_delay(10); 130 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 131 ret_code = i40e_aq_request_resource(hw, 132 I40E_NVM_RESOURCE_ID, 133 access, 0, &time_left, 134 NULL); 135 if (ret_code == I40E_SUCCESS) { 136 hw->nvm.hw_semaphore_timeout = 137 I40E_MS_TO_GTIME(time_left) + gtime; 138 break; 139 } 140 } 141 if (ret_code != I40E_SUCCESS) { 142 hw->nvm.hw_semaphore_timeout = 0; 143 i40e_debug(hw, I40E_DEBUG_NVM, 144 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", 145 time_left, ret_code, hw->aq.asq_last_status); 146 } 147 } 148 149 i40e_i40e_acquire_nvm_exit: 150 return ret_code; 151 } 152 153 /** 154 * i40e_release_nvm - Generic request for releasing the NVM ownership 155 * @hw: pointer to the HW structure 156 * 157 * This function will release NVM resource via the proper Admin Command. 158 **/ 159 void i40e_release_nvm(struct i40e_hw *hw) 160 { 161 enum i40e_status_code ret_code = I40E_SUCCESS; 162 u32 total_delay = 0; 163 164 DEBUGFUNC("i40e_release_nvm"); 165 166 if (hw->nvm.blank_nvm_mode) 167 return; 168 169 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 170 171 /* there are some rare cases when trying to release the resource 172 * results in an admin Q timeout, so handle them correctly 173 */ 174 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && 175 (total_delay < hw->aq.asq_cmd_timeout)) { 176 i40e_msec_delay(1); 177 ret_code = i40e_aq_release_resource(hw, 178 I40E_NVM_RESOURCE_ID, 0, NULL); 179 total_delay++; 180 } 181 } 182 183 /** 184 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit 185 * @hw: pointer to the HW structure 186 * 187 * Polls the SRCTL Shadow RAM register done bit. 188 **/ 189 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 190 { 191 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; 192 u32 srctl, wait_cnt; 193 194 DEBUGFUNC("i40e_poll_sr_srctl_done_bit"); 195 196 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ 197 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 198 srctl = rd32(hw, I40E_GLNVM_SRCTL); 199 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 200 ret_code = I40E_SUCCESS; 201 break; 202 } 203 i40e_usec_delay(5); 204 } 205 if (ret_code == I40E_ERR_TIMEOUT) 206 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); 207 return ret_code; 208 } 209 210 /** 211 * i40e_read_nvm_word - Reads Shadow RAM 212 * @hw: pointer to the HW structure 213 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 214 * @data: word read from the Shadow RAM 215 * 216 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 217 **/ 218 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 219 u16 *data) 220 { 221 return i40e_read_nvm_word_srctl(hw, offset, data); 222 } 223 224 /** 225 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register 226 * @hw: pointer to the HW structure 227 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 228 * @data: word read from the Shadow RAM 229 * 230 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 231 **/ 232 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, 233 u16 *data) 234 { 235 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; 236 u32 sr_reg; 237 238 DEBUGFUNC("i40e_read_nvm_word_srctl"); 239 240 if (offset >= hw->nvm.sr_size) { 241 i40e_debug(hw, I40E_DEBUG_NVM, 242 "NVM read error: Offset %d beyond Shadow RAM limit %d\n", 243 offset, hw->nvm.sr_size); 244 ret_code = I40E_ERR_PARAM; 245 goto read_nvm_exit; 246 } 247 248 /* Poll the done bit first */ 249 ret_code = i40e_poll_sr_srctl_done_bit(hw); 250 if (ret_code == I40E_SUCCESS) { 251 /* Write the address and start reading */ 252 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 253 BIT(I40E_GLNVM_SRCTL_START_SHIFT); 254 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 255 256 /* Poll I40E_GLNVM_SRCTL until the done bit is set */ 257 ret_code = i40e_poll_sr_srctl_done_bit(hw); 258 if (ret_code == I40E_SUCCESS) { 259 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 260 *data = (u16)((sr_reg & 261 I40E_GLNVM_SRDATA_RDDATA_MASK) 262 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); 263 } 264 } 265 if (ret_code != I40E_SUCCESS) 266 i40e_debug(hw, I40E_DEBUG_NVM, 267 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", 268 offset); 269 270 read_nvm_exit: 271 return ret_code; 272 } 273 274 /** 275 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ 276 * @hw: pointer to the HW structure 277 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 278 * @data: word read from the Shadow RAM 279 * 280 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 281 **/ 282 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, 283 u16 *data) 284 { 285 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; 286 287 DEBUGFUNC("i40e_read_nvm_word_aq"); 288 289 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE); 290 *data = LE16_TO_CPU(*(__le16 *)data); 291 292 return ret_code; 293 } 294 295 /** 296 * i40e_read_nvm_buffer - Reads Shadow RAM buffer 297 * @hw: pointer to the HW structure 298 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 299 * @words: (in) number of words to read; (out) number of words actually read 300 * @data: words read from the Shadow RAM 301 * 302 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 303 * method. The buffer read is preceded by the NVM ownership take 304 * and followed by the release. 305 **/ 306 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 307 u16 *words, u16 *data) 308 { 309 return i40e_read_nvm_buffer_srctl(hw, offset, words, data); 310 } 311 312 /** 313 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register 314 * @hw: pointer to the HW structure 315 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 316 * @words: (in) number of words to read; (out) number of words actually read 317 * @data: words read from the Shadow RAM 318 * 319 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 320 * method. The buffer read is preceded by the NVM ownership take 321 * and followed by the release. 322 **/ 323 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, 324 u16 *words, u16 *data) 325 { 326 enum i40e_status_code ret_code = I40E_SUCCESS; 327 u16 index, word; 328 329 DEBUGFUNC("i40e_read_nvm_buffer_srctl"); 330 331 /* Loop thru the selected region */ 332 for (word = 0; word < *words; word++) { 333 index = offset + word; 334 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); 335 if (ret_code != I40E_SUCCESS) 336 break; 337 } 338 339 /* Update the number of words read from the Shadow RAM */ 340 *words = word; 341 342 return ret_code; 343 } 344 345 /** 346 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ 347 * @hw: pointer to the HW structure 348 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 349 * @words: (in) number of words to read; (out) number of words actually read 350 * @data: words read from the Shadow RAM 351 * 352 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() 353 * method. The buffer read is preceded by the NVM ownership take 354 * and followed by the release. 355 **/ 356 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, 357 u16 *words, u16 *data) 358 { 359 enum i40e_status_code ret_code; 360 u16 read_size = *words; 361 bool last_cmd = FALSE; 362 u16 words_read = 0; 363 u16 i = 0; 364 365 DEBUGFUNC("i40e_read_nvm_buffer_aq"); 366 367 do { 368 /* Calculate number of bytes we should read in this step. 369 * FVL AQ do not allow to read more than one page at a time or 370 * to cross page boundaries. 371 */ 372 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) 373 read_size = min(*words, 374 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - 375 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); 376 else 377 read_size = min((*words - words_read), 378 I40E_SR_SECTOR_SIZE_IN_WORDS); 379 380 /* Check if this is last command, if so set proper flag */ 381 if ((words_read + read_size) >= *words) 382 last_cmd = TRUE; 383 384 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, 385 data + words_read, last_cmd); 386 if (ret_code != I40E_SUCCESS) 387 goto read_nvm_buffer_aq_exit; 388 389 /* Increment counter for words already read and move offset to 390 * new read location 391 */ 392 words_read += read_size; 393 offset += read_size; 394 } while (words_read < *words); 395 396 for (i = 0; i < *words; i++) 397 data[i] = LE16_TO_CPU(((__le16 *)data)[i]); 398 399 read_nvm_buffer_aq_exit: 400 *words = words_read; 401 return ret_code; 402 } 403 404 /** 405 * i40e_read_nvm_aq - Read Shadow RAM. 406 * @hw: pointer to the HW structure. 407 * @module_pointer: module pointer location in words from the NVM beginning 408 * @offset: offset in words from module start 409 * @words: number of words to write 410 * @data: buffer with words to write to the Shadow RAM 411 * @last_command: tells the AdminQ that this is the last command 412 * 413 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 414 **/ 415 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 416 u32 offset, u16 words, void *data, 417 bool last_command) 418 { 419 enum i40e_status_code ret_code = I40E_ERR_NVM; 420 struct i40e_asq_cmd_details cmd_details; 421 422 DEBUGFUNC("i40e_read_nvm_aq"); 423 424 memset(&cmd_details, 0, sizeof(cmd_details)); 425 cmd_details.wb_desc = &hw->nvm_wb_desc; 426 427 /* Here we are checking the SR limit only for the flat memory model. 428 * We cannot do it for the module-based model, as we did not acquire 429 * the NVM resource yet (we cannot get the module pointer value). 430 * Firmware will check the module-based model. 431 */ 432 if ((offset + words) > hw->nvm.sr_size) 433 i40e_debug(hw, I40E_DEBUG_NVM, 434 "NVM write error: offset %d beyond Shadow RAM limit %d\n", 435 (offset + words), hw->nvm.sr_size); 436 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 437 /* We can write only up to 4KB (one sector), in one AQ write */ 438 i40e_debug(hw, I40E_DEBUG_NVM, 439 "NVM write fail error: tried to write %d words, limit is %d.\n", 440 words, I40E_SR_SECTOR_SIZE_IN_WORDS); 441 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 442 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 443 /* A single write cannot spread over two sectors */ 444 i40e_debug(hw, I40E_DEBUG_NVM, 445 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", 446 offset, words); 447 else 448 ret_code = i40e_aq_read_nvm(hw, module_pointer, 449 2 * offset, /*bytes*/ 450 2 * words, /*bytes*/ 451 data, last_command, &cmd_details); 452 453 return ret_code; 454 } 455 456 /** 457 * i40e_write_nvm_aq - Writes Shadow RAM. 458 * @hw: pointer to the HW structure. 459 * @module_pointer: module pointer location in words from the NVM beginning 460 * @offset: offset in words from module start 461 * @words: number of words to write 462 * @data: buffer with words to write to the Shadow RAM 463 * @last_command: tells the AdminQ that this is the last command 464 * 465 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 466 **/ 467 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 468 u32 offset, u16 words, void *data, 469 bool last_command) 470 { 471 enum i40e_status_code ret_code = I40E_ERR_NVM; 472 struct i40e_asq_cmd_details cmd_details; 473 474 DEBUGFUNC("i40e_write_nvm_aq"); 475 476 memset(&cmd_details, 0, sizeof(cmd_details)); 477 cmd_details.wb_desc = &hw->nvm_wb_desc; 478 479 /* Here we are checking the SR limit only for the flat memory model. 480 * We cannot do it for the module-based model, as we did not acquire 481 * the NVM resource yet (we cannot get the module pointer value). 482 * Firmware will check the module-based model. 483 */ 484 if ((offset + words) > hw->nvm.sr_size) 485 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n"); 486 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) 487 /* We can write only up to 4KB (one sector), in one AQ write */ 488 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n"); 489 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) 490 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) 491 /* A single write cannot spread over two sectors */ 492 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n"); 493 else 494 ret_code = i40e_aq_update_nvm(hw, module_pointer, 495 2 * offset, /*bytes*/ 496 2 * words, /*bytes*/ 497 data, last_command, &cmd_details); 498 499 return ret_code; 500 } 501 502 /** 503 * i40e_write_nvm_word - Writes Shadow RAM word 504 * @hw: pointer to the HW structure 505 * @offset: offset of the Shadow RAM word to write 506 * @data: word to write to the Shadow RAM 507 * 508 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method. 509 * NVM ownership have to be acquired and released (on ARQ completion event 510 * reception) by caller. To commit SR to NVM update checksum function 511 * should be called. 512 **/ 513 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, 514 void *data) 515 { 516 DEBUGFUNC("i40e_write_nvm_word"); 517 518 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data)); 519 520 /* Value 0x00 below means that we treat SR as a flat mem */ 521 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE); 522 } 523 524 /** 525 * i40e_write_nvm_buffer - Writes Shadow RAM buffer 526 * @hw: pointer to the HW structure 527 * @module_pointer: module pointer location in words from the NVM beginning 528 * @offset: offset of the Shadow RAM buffer to write 529 * @words: number of words to write 530 * @data: words to write to the Shadow RAM 531 * 532 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 533 * NVM ownership must be acquired before calling this function and released 534 * on ARQ completion event reception by caller. To commit SR to NVM update 535 * checksum function should be called. 536 **/ 537 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw, 538 u8 module_pointer, u32 offset, 539 u16 words, void *data) 540 { 541 __le16 *le_word_ptr = (__le16 *)data; 542 u16 *word_ptr = (u16 *)data; 543 u32 i = 0; 544 545 DEBUGFUNC("i40e_write_nvm_buffer"); 546 547 for (i = 0; i < words; i++) 548 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]); 549 550 /* Here we will only write one buffer as the size of the modules 551 * mirrored in the Shadow RAM is always less than 4K. 552 */ 553 return i40e_write_nvm_aq(hw, module_pointer, offset, words, 554 data, FALSE); 555 } 556 557 /** 558 * i40e_calc_nvm_checksum - Calculates and returns the checksum 559 * @hw: pointer to hardware structure 560 * @checksum: pointer to the checksum 561 * 562 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 563 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 564 * is customer specific and unknown. Therefore, this function skips all maximum 565 * possible size of VPD (1kB). 566 **/ 567 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) 568 { 569 enum i40e_status_code ret_code = I40E_SUCCESS; 570 struct i40e_virt_mem vmem; 571 u16 pcie_alt_module = 0; 572 u16 checksum_local = 0; 573 u16 vpd_module = 0; 574 u16 *data; 575 u16 i = 0; 576 577 DEBUGFUNC("i40e_calc_nvm_checksum"); 578 579 ret_code = i40e_allocate_virt_mem(hw, &vmem, 580 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); 581 if (ret_code) 582 goto i40e_calc_nvm_checksum_exit; 583 data = (u16 *)vmem.va; 584 585 /* read pointer to VPD area */ 586 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 587 if (ret_code != I40E_SUCCESS) { 588 ret_code = I40E_ERR_NVM_CHECKSUM; 589 goto i40e_calc_nvm_checksum_exit; 590 } 591 592 /* read pointer to PCIe Alt Auto-load module */ 593 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 594 &pcie_alt_module); 595 if (ret_code != I40E_SUCCESS) { 596 ret_code = I40E_ERR_NVM_CHECKSUM; 597 goto i40e_calc_nvm_checksum_exit; 598 } 599 600 /* Calculate SW checksum that covers the whole 64kB shadow RAM 601 * except the VPD and PCIe ALT Auto-load modules 602 */ 603 for (i = 0; i < hw->nvm.sr_size; i++) { 604 /* Read SR page */ 605 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { 606 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; 607 608 ret_code = i40e_read_nvm_buffer(hw, i, &words, data); 609 if (ret_code != I40E_SUCCESS) { 610 ret_code = I40E_ERR_NVM_CHECKSUM; 611 goto i40e_calc_nvm_checksum_exit; 612 } 613 } 614 615 /* Skip Checksum word */ 616 if (i == I40E_SR_SW_CHECKSUM_WORD) 617 continue; 618 /* Skip VPD module (convert byte size to word count) */ 619 if ((i >= (u32)vpd_module) && 620 (i < ((u32)vpd_module + 621 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { 622 continue; 623 } 624 /* Skip PCIe ALT module (convert byte size to word count) */ 625 if ((i >= (u32)pcie_alt_module) && 626 (i < ((u32)pcie_alt_module + 627 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { 628 continue; 629 } 630 631 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; 632 } 633 634 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 635 636 i40e_calc_nvm_checksum_exit: 637 i40e_free_virt_mem(hw, &vmem); 638 return ret_code; 639 } 640 641 /** 642 * i40e_update_nvm_checksum - Updates the NVM checksum 643 * @hw: pointer to hardware structure 644 * 645 * NVM ownership must be acquired before calling this function and released 646 * on ARQ completion event reception by caller. 647 * This function will commit SR to NVM. 648 **/ 649 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw) 650 { 651 enum i40e_status_code ret_code = I40E_SUCCESS; 652 u16 checksum; 653 __le16 le_sum; 654 655 DEBUGFUNC("i40e_update_nvm_checksum"); 656 657 ret_code = i40e_calc_nvm_checksum(hw, &checksum); 658 le_sum = CPU_TO_LE16(checksum); 659 if (ret_code == I40E_SUCCESS) 660 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 661 1, &le_sum, TRUE); 662 663 return ret_code; 664 } 665 666 /** 667 * i40e_validate_nvm_checksum - Validate EEPROM checksum 668 * @hw: pointer to hardware structure 669 * @checksum: calculated checksum 670 * 671 * Performs checksum calculation and validates the NVM SW checksum. If the 672 * caller does not need checksum, the value can be NULL. 673 **/ 674 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, 675 u16 *checksum) 676 { 677 enum i40e_status_code ret_code = I40E_SUCCESS; 678 u16 checksum_sr = 0; 679 u16 checksum_local = 0; 680 681 DEBUGFUNC("i40e_validate_nvm_checksum"); 682 683 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); 684 if (ret_code != I40E_SUCCESS) 685 goto i40e_validate_nvm_checksum_exit; 686 687 /* Do not use i40e_read_nvm_word() because we do not want to take 688 * the synchronization semaphores twice here. 689 */ 690 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); 691 692 /* Verify read checksum from EEPROM is the same as 693 * calculated checksum 694 */ 695 if (checksum_local != checksum_sr) 696 ret_code = I40E_ERR_NVM_CHECKSUM; 697 698 /* If the user cares, return the calculated checksum */ 699 if (checksum) 700 *checksum = checksum_local; 701 702 i40e_validate_nvm_checksum_exit: 703 return ret_code; 704 } 705