1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 35 /** 36 * ice_aq_read_nvm 37 * @hw: pointer to the HW struct 38 * @module_typeid: module pointer location in words from the NVM beginning 39 * @offset: byte offset from the module beginning 40 * @length: length of the section to be read (in bytes from the offset) 41 * @data: command buffer (size [bytes] = length) 42 * @last_command: tells if this is the last command in a series 43 * @read_shadow_ram: tell if this is a shadow RAM read 44 * @cd: pointer to command details structure or NULL 45 * 46 * Read the NVM using the admin queue commands (0x0701) 47 */ 48 enum ice_status 49 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 50 void *data, bool last_command, bool read_shadow_ram, 51 struct ice_sq_cd *cd) 52 { 53 struct ice_aq_desc desc; 54 struct ice_aqc_nvm *cmd; 55 56 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 57 58 cmd = &desc.params.nvm; 59 60 if (offset > ICE_AQC_NVM_MAX_OFFSET) 61 return ICE_ERR_PARAM; 62 63 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 64 65 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) 66 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; 67 68 /* If this is the last command in a series, set the proper flag. */ 69 if (last_command) 70 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 71 cmd->module_typeid = CPU_TO_LE16(module_typeid); 72 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 73 cmd->offset_high = (offset >> 16) & 0xFF; 74 cmd->length = CPU_TO_LE16(length); 75 76 return ice_aq_send_cmd(hw, &desc, data, length, cd); 77 } 78 79 /** 80 * ice_read_flat_nvm - Read portion of NVM by flat offset 81 * @hw: pointer to the HW struct 82 * @offset: offset from beginning of NVM 83 * @length: (in) number of bytes to read; (out) number of bytes actually read 84 * @data: buffer to return data in (sized to fit the specified length) 85 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 86 * 87 * Reads a portion of the NVM, as a flat memory space. This function correctly 88 * breaks read requests across Shadow RAM sectors and ensures that no single 89 * read request exceeds the maximum 4KB read for a single AdminQ command. 90 * 91 * Returns a status code on failure. Note that the data pointer may be 92 * partially updated if some reads succeed before a failure. 93 */ 94 enum ice_status 95 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 96 bool read_shadow_ram) 97 { 98 enum ice_status status; 99 u32 inlen = *length; 100 u32 bytes_read = 0; 101 bool last_cmd; 102 103 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 104 105 *length = 0; 106 107 /* Verify the length of the read if this is for the Shadow RAM */ 108 if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { 109 ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); 110 return ICE_ERR_PARAM; 111 } 112 113 do { 114 u32 read_size, sector_offset; 115 116 /* ice_aq_read_nvm cannot read more than 4KB at a time. 117 * Additionally, a read from the Shadow RAM may not cross over 118 * a sector boundary. Conveniently, the sector size is also 119 * 4KB. 120 */ 121 sector_offset = offset % ICE_AQ_MAX_BUF_LEN; 122 read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, 123 inlen - bytes_read); 124 125 last_cmd = !(bytes_read + read_size < inlen); 126 127 /* ice_aq_read_nvm takes the length as a u16. Our read_size is 128 * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum 129 * size guarantees that it will fit within the 2 bytes. 130 */ 131 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 132 offset, (u16)read_size, 133 data + bytes_read, last_cmd, 134 read_shadow_ram, NULL); 135 if (status) 136 break; 137 138 bytes_read += read_size; 139 offset += read_size; 140 } while (!last_cmd); 141 142 *length = bytes_read; 143 return status; 144 } 145 146 /** 147 * ice_aq_update_nvm 148 * @hw: pointer to the HW struct 149 * @module_typeid: module pointer location in words from the NVM beginning 150 * @offset: byte offset from the module beginning 151 * @length: length of the section to be written (in bytes from the offset) 152 * @data: command buffer (size [bytes] = length) 153 * @last_command: tells if this is the last command in a series 154 * @command_flags: command parameters 155 * @cd: pointer to command details structure or NULL 156 * 157 * Update the NVM using the admin queue commands (0x0703) 158 */ 159 enum ice_status 160 ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, 161 u16 length, void *data, bool last_command, u8 command_flags, 162 struct ice_sq_cd *cd) 163 { 164 struct ice_aq_desc desc; 165 struct ice_aqc_nvm *cmd; 166 167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 168 169 cmd = &desc.params.nvm; 170 171 /* In offset the highest byte must be zeroed. */ 172 if (offset & 0xFF000000) 173 return ICE_ERR_PARAM; 174 175 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); 176 177 cmd->cmd_flags |= command_flags; 178 179 /* If this is the last command in a series, set the proper flag. */ 180 if (last_command) 181 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 182 cmd->module_typeid = CPU_TO_LE16(module_typeid); 183 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 184 cmd->offset_high = (offset >> 16) & 0xFF; 185 cmd->length = CPU_TO_LE16(length); 186 187 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 188 189 return ice_aq_send_cmd(hw, &desc, data, length, cd); 190 } 191 192 /** 193 * ice_aq_erase_nvm 194 * @hw: pointer to the HW struct 195 * @module_typeid: module pointer location in words from the NVM beginning 196 * @cd: pointer to command details structure or NULL 197 * 198 * Erase the NVM sector using the admin queue commands (0x0702) 199 */ 200 enum ice_status 201 ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) 202 { 203 struct ice_aq_desc desc; 204 struct ice_aqc_nvm *cmd; 205 enum ice_status status; 206 __le16 len; 207 208 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 209 210 /* read a length value from SR, so module_typeid is equal to 0 */ 211 /* calculate offset where module size is placed from bytes to words */ 212 /* set last command and read from SR values to true */ 213 status = ice_aq_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true, 214 true, NULL); 215 if (status) 216 return status; 217 218 cmd = &desc.params.nvm; 219 220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); 221 222 cmd->module_typeid = CPU_TO_LE16(module_typeid); 223 cmd->length = len; 224 cmd->offset_low = 0; 225 cmd->offset_high = 0; 226 227 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 228 } 229 230 /** 231 * ice_aq_read_nvm_cfg - read an NVM config block 232 * @hw: pointer to the HW struct 233 * @cmd_flags: NVM access admin command bits 234 * @field_id: field or feature ID 235 * @data: buffer for result 236 * @buf_size: buffer size 237 * @elem_count: pointer to count of elements read by FW 238 * @cd: pointer to command details structure or NULL 239 * 240 * Reads single or multiple feature/field ID and data (0x0704) 241 */ 242 enum ice_status 243 ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data, 244 u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd) 245 { 246 struct ice_aqc_nvm_cfg *cmd; 247 struct ice_aq_desc desc; 248 enum ice_status status; 249 250 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 251 252 cmd = &desc.params.nvm_cfg; 253 254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read); 255 256 cmd->cmd_flags = cmd_flags; 257 cmd->id = CPU_TO_LE16(field_id); 258 259 status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 260 if (!status && elem_count) 261 *elem_count = LE16_TO_CPU(cmd->count); 262 263 return status; 264 } 265 266 /** 267 * ice_aq_write_nvm_cfg - write an NVM config block 268 * @hw: pointer to the HW struct 269 * @cmd_flags: NVM access admin command bits 270 * @data: buffer for result 271 * @buf_size: buffer size 272 * @elem_count: count of elements to be written 273 * @cd: pointer to command details structure or NULL 274 * 275 * Writes single or multiple feature/field ID and data (0x0705) 276 */ 277 enum ice_status 278 ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size, 279 u16 elem_count, struct ice_sq_cd *cd) 280 { 281 struct ice_aqc_nvm_cfg *cmd; 282 struct ice_aq_desc desc; 283 284 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 285 286 cmd = &desc.params.nvm_cfg; 287 288 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write); 289 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 290 291 cmd->count = CPU_TO_LE16(elem_count); 292 cmd->cmd_flags = cmd_flags; 293 294 return ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 295 } 296 297 /** 298 * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. 299 * @hw: pointer to the HW structure 300 * @offset: offset in words from module start 301 * @words: number of words to access 302 */ 303 static enum ice_status 304 ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) 305 { 306 if ((offset + words) > hw->flash.sr_words) { 307 ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n"); 308 return ICE_ERR_PARAM; 309 } 310 311 if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { 312 /* We can access only up to 4KB (one sector), in one AQ write */ 313 ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n", 314 words, ICE_SR_SECTOR_SIZE_IN_WORDS); 315 return ICE_ERR_PARAM; 316 } 317 318 if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != 319 (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { 320 /* A single access cannot spread over two sectors */ 321 ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n"); 322 return ICE_ERR_PARAM; 323 } 324 325 return ICE_SUCCESS; 326 } 327 328 /** 329 * ice_read_sr_word_aq - Reads Shadow RAM via AQ 330 * @hw: pointer to the HW structure 331 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 332 * @data: word read from the Shadow RAM 333 * 334 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. 335 */ 336 enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 337 { 338 u32 bytes = sizeof(u16); 339 enum ice_status status; 340 __le16 data_local; 341 342 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 343 344 /* Note that ice_read_flat_nvm checks if the read is past the Shadow 345 * RAM size, and ensures we don't read across a Shadow RAM sector 346 * boundary 347 */ 348 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 349 (_FORCE_ u8 *)&data_local, true); 350 if (status) 351 return status; 352 353 *data = LE16_TO_CPU(data_local); 354 return ICE_SUCCESS; 355 } 356 357 /** 358 * ice_write_sr_aq - Writes Shadow RAM. 359 * @hw: pointer to the HW structure 360 * @offset: offset in words from module start 361 * @words: number of words to write 362 * @data: buffer with words to write to the Shadow RAM 363 * @last_command: tells the AdminQ that this is the last command 364 * 365 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 366 */ 367 static enum ice_status 368 ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data, 369 bool last_command) 370 { 371 enum ice_status status; 372 373 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 374 375 status = ice_check_sr_access_params(hw, offset, words); 376 if (!status) 377 status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data, 378 last_command, 0, NULL); 379 380 return status; 381 } 382 383 /** 384 * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ 385 * @hw: pointer to the HW structure 386 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 387 * @words: (in) number of words to read; (out) number of words actually read 388 * @data: words read from the Shadow RAM 389 * 390 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is 391 * taken before reading the buffer and later released. 392 */ 393 static enum ice_status 394 ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 395 { 396 u32 bytes = *words * 2, i; 397 enum ice_status status; 398 399 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 400 401 /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM 402 * sector restrictions necessary when reading from the NVM. 403 */ 404 status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); 405 406 /* Report the number of words successfully read */ 407 *words = bytes / 2; 408 409 /* Byte swap the words up to the amount we actually read */ 410 for (i = 0; i < *words; i++) 411 data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); 412 413 return status; 414 } 415 416 /** 417 * ice_acquire_nvm - Generic request for acquiring the NVM ownership 418 * @hw: pointer to the HW structure 419 * @access: NVM access type (read or write) 420 * 421 * This function will request NVM ownership. 422 */ 423 enum ice_status 424 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) 425 { 426 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 427 428 if (hw->flash.blank_nvm_mode) 429 return ICE_SUCCESS; 430 431 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); 432 } 433 434 /** 435 * ice_release_nvm - Generic request for releasing the NVM ownership 436 * @hw: pointer to the HW structure 437 * 438 * This function will release NVM ownership. 439 */ 440 void ice_release_nvm(struct ice_hw *hw) 441 { 442 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 443 444 if (hw->flash.blank_nvm_mode) 445 return; 446 447 ice_release_res(hw, ICE_NVM_RES_ID); 448 } 449 450 /** 451 * ice_get_flash_bank_offset - Get offset into requested flash bank 452 * @hw: pointer to the HW structure 453 * @bank: whether to read from the active or inactive flash bank 454 * @module: the module to read from 455 * 456 * Based on the module, lookup the module offset from the beginning of the 457 * flash. 458 * 459 * Returns the flash offset. Note that a value of zero is invalid and must be 460 * treated as an error. 461 */ 462 static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module) 463 { 464 struct ice_bank_info *banks = &hw->flash.banks; 465 enum ice_flash_bank active_bank; 466 bool second_bank_active; 467 u32 offset, size; 468 469 switch (module) { 470 case ICE_SR_1ST_NVM_BANK_PTR: 471 offset = banks->nvm_ptr; 472 size = banks->nvm_size; 473 active_bank = banks->nvm_bank; 474 break; 475 case ICE_SR_1ST_OROM_BANK_PTR: 476 offset = banks->orom_ptr; 477 size = banks->orom_size; 478 active_bank = banks->orom_bank; 479 break; 480 case ICE_SR_NETLIST_BANK_PTR: 481 offset = banks->netlist_ptr; 482 size = banks->netlist_size; 483 active_bank = banks->netlist_bank; 484 break; 485 default: 486 ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module); 487 return 0; 488 } 489 490 switch (active_bank) { 491 case ICE_1ST_FLASH_BANK: 492 second_bank_active = false; 493 break; 494 case ICE_2ND_FLASH_BANK: 495 second_bank_active = true; 496 break; 497 default: 498 ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n", 499 active_bank); 500 return 0; 501 } 502 503 /* The second flash bank is stored immediately following the first 504 * bank. Based on whether the 1st or 2nd bank is active, and whether 505 * we want the active or inactive bank, calculate the desired offset. 506 */ 507 switch (bank) { 508 case ICE_ACTIVE_FLASH_BANK: 509 return offset + (second_bank_active ? size : 0); 510 case ICE_INACTIVE_FLASH_BANK: 511 return offset + (second_bank_active ? 0 : size); 512 } 513 514 ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank); 515 return 0; 516 } 517 518 /** 519 * ice_read_flash_module - Read a word from one of the main NVM modules 520 * @hw: pointer to the HW structure 521 * @bank: which bank of the module to read 522 * @module: the module to read 523 * @offset: the offset into the module in bytes 524 * @data: storage for the word read from the flash 525 * @length: bytes of data to read 526 * 527 * Read data from the specified flash module. The bank parameter indicates 528 * whether or not to read from the active bank or the inactive bank of that 529 * module. 530 * 531 * The word will be read using flat NVM access, and relies on the 532 * hw->flash.banks data being setup by ice_determine_active_flash_banks() 533 * during initialization. 534 */ 535 static enum ice_status 536 ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, 537 u32 offset, u8 *data, u32 length) 538 { 539 enum ice_status status; 540 u32 start; 541 542 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 543 544 start = ice_get_flash_bank_offset(hw, bank, module); 545 if (!start) { 546 ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", 547 module); 548 return ICE_ERR_PARAM; 549 } 550 551 status = ice_acquire_nvm(hw, ICE_RES_READ); 552 if (status) 553 return status; 554 555 status = ice_read_flat_nvm(hw, start + offset, &length, data, false); 556 557 ice_release_nvm(hw); 558 559 return status; 560 } 561 562 /** 563 * ice_read_nvm_module - Read from the active main NVM module 564 * @hw: pointer to the HW structure 565 * @bank: whether to read from active or inactive NVM module 566 * @offset: offset into the NVM module to read, in words 567 * @data: storage for returned word value 568 * 569 * Read the specified word from the active NVM module. This includes the CSS 570 * header at the start of the NVM module. 571 */ 572 static enum ice_status 573 ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) 574 { 575 enum ice_status status; 576 __le16 data_local; 577 578 status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), 579 (_FORCE_ u8 *)&data_local, sizeof(u16)); 580 if (!status) 581 *data = LE16_TO_CPU(data_local); 582 583 return status; 584 } 585 586 /** 587 * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank 588 * @hw: pointer to the HW structure 589 * @bank: whether to read from the active or inactive NVM module 590 * @offset: offset into the Shadow RAM copy to read, in words 591 * @data: storage for returned word value 592 * 593 * Read the specified word from the copy of the Shadow RAM found in the 594 * specified NVM module. 595 */ 596 static enum ice_status 597 ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) 598 { 599 return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); 600 } 601 602 /** 603 * ice_read_orom_module - Read from the active Option ROM module 604 * @hw: pointer to the HW structure 605 * @bank: whether to read from active or inactive OROM module 606 * @offset: offset into the OROM module to read, in words 607 * @data: storage for returned word value 608 * 609 * Read the specified word from the active Option ROM module of the flash. 610 * Note that unlike the NVM module, the CSS data is stored at the end of the 611 * module instead of at the beginning. 612 */ 613 static enum ice_status 614 ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) 615 { 616 enum ice_status status; 617 __le16 data_local; 618 619 status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16), 620 (_FORCE_ u8 *)&data_local, sizeof(u16)); 621 if (!status) 622 *data = LE16_TO_CPU(data_local); 623 624 return status; 625 } 626 627 /** 628 * ice_read_netlist_module - Read data from the netlist module area 629 * @hw: pointer to the HW structure 630 * @bank: whether to read from the active or inactive module 631 * @offset: offset into the netlist to read from 632 * @data: storage for returned word value 633 * 634 * Read a word from the specified netlist bank. 635 */ 636 static enum ice_status 637 ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) 638 { 639 enum ice_status status; 640 __le16 data_local; 641 642 status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), 643 (_FORCE_ u8 *)&data_local, sizeof(u16)); 644 if (!status) 645 *data = LE16_TO_CPU(data_local); 646 647 return status; 648 } 649 650 /** 651 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 652 * @hw: pointer to the HW structure 653 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 654 * @data: word read from the Shadow RAM 655 * 656 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 657 */ 658 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 659 { 660 enum ice_status status; 661 662 status = ice_acquire_nvm(hw, ICE_RES_READ); 663 if (!status) { 664 status = ice_read_sr_word_aq(hw, offset, data); 665 ice_release_nvm(hw); 666 } 667 668 return status; 669 } 670 671 /** 672 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 673 * @hw: pointer to hardware structure 674 * @module_tlv: pointer to module TLV to return 675 * @module_tlv_len: pointer to module TLV length to return 676 * @module_type: module type requested 677 * 678 * Finds the requested sub module TLV type from the Preserved Field 679 * Area (PFA) and returns the TLV pointer and length. The caller can 680 * use these to read the variable length TLV value. 681 */ 682 enum ice_status 683 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 684 u16 module_type) 685 { 686 enum ice_status status; 687 u16 pfa_len, pfa_ptr; 688 u16 next_tlv; 689 690 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 691 if (status != ICE_SUCCESS) { 692 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 693 return status; 694 } 695 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 696 if (status != ICE_SUCCESS) { 697 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 698 return status; 699 } 700 /* Starting with first TLV after PFA length, iterate through the list 701 * of TLVs to find the requested one. 702 */ 703 next_tlv = pfa_ptr + 1; 704 while (next_tlv < pfa_ptr + pfa_len) { 705 u16 tlv_sub_module_type; 706 u16 tlv_len; 707 708 /* Read TLV type */ 709 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 710 if (status != ICE_SUCCESS) { 711 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 712 break; 713 } 714 /* Read TLV length */ 715 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 716 if (status != ICE_SUCCESS) { 717 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 718 break; 719 } 720 if (tlv_sub_module_type == module_type) { 721 if (tlv_len) { 722 *module_tlv = next_tlv; 723 *module_tlv_len = tlv_len; 724 return ICE_SUCCESS; 725 } 726 return ICE_ERR_INVAL_SIZE; 727 } 728 /* Check next TLV, i.e. current TLV pointer + length + 2 words 729 * (for current TLV's type and length) 730 */ 731 next_tlv = next_tlv + tlv_len + 2; 732 } 733 /* Module does not exist */ 734 return ICE_ERR_DOES_NOT_EXIST; 735 } 736 737 /** 738 * ice_read_pba_string - Reads part number string from NVM 739 * @hw: pointer to hardware structure 740 * @pba_num: stores the part number string from the NVM 741 * @pba_num_size: part number string buffer length 742 * 743 * Reads the part number string from the NVM. 744 */ 745 enum ice_status 746 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) 747 { 748 u16 pba_tlv, pba_tlv_len; 749 enum ice_status status; 750 u16 pba_word, pba_size; 751 u16 i; 752 753 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 754 ICE_SR_PBA_BLOCK_PTR); 755 if (status != ICE_SUCCESS) { 756 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); 757 return status; 758 } 759 760 /* pba_size is the next word */ 761 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); 762 if (status != ICE_SUCCESS) { 763 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); 764 return status; 765 } 766 767 if (pba_tlv_len < pba_size) { 768 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); 769 return ICE_ERR_INVAL_SIZE; 770 } 771 772 /* Subtract one to get PBA word count (PBA Size word is included in 773 * total size) 774 */ 775 pba_size--; 776 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 777 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); 778 return ICE_ERR_PARAM; 779 } 780 781 for (i = 0; i < pba_size; i++) { 782 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); 783 if (status != ICE_SUCCESS) { 784 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); 785 return status; 786 } 787 788 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 789 pba_num[(i * 2) + 1] = pba_word & 0xFF; 790 } 791 pba_num[(pba_size * 2)] = '\0'; 792 793 return status; 794 } 795 796 /** 797 * ice_get_nvm_srev - Read the security revision from the NVM CSS header 798 * @hw: pointer to the HW struct 799 * @bank: whether to read from the active or inactive flash bank 800 * @srev: storage for security revision 801 * 802 * Read the security revision out of the CSS header of the active NVM module 803 * bank. 804 */ 805 static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) 806 { 807 enum ice_status status; 808 u16 srev_l, srev_h; 809 810 status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l); 811 if (status) 812 return status; 813 814 status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_H, &srev_h); 815 if (status) 816 return status; 817 818 *srev = srev_h << 16 | srev_l; 819 820 return ICE_SUCCESS; 821 } 822 823 /** 824 * ice_get_nvm_ver_info - Read NVM version information 825 * @hw: pointer to the HW struct 826 * @bank: whether to read from the active or inactive flash bank 827 * @nvm: pointer to NVM info structure 828 * 829 * Read the NVM EETRACK ID and map version of the main NVM image bank, filling 830 * in the nvm info structure. 831 */ 832 static enum ice_status 833 ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) 834 { 835 u16 eetrack_lo, eetrack_hi, ver; 836 enum ice_status status; 837 838 status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); 839 if (status) { 840 ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n"); 841 return status; 842 } 843 844 nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 845 nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 846 847 status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 848 if (status) { 849 ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n"); 850 return status; 851 } 852 status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 853 if (status) { 854 ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n"); 855 return status; 856 } 857 858 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 859 860 status = ice_get_nvm_srev(hw, bank, &nvm->srev); 861 if (status) 862 ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM security revision.\n"); 863 864 return ICE_SUCCESS; 865 } 866 867 /** 868 * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank 869 * @hw: pointer to the HW structure 870 * @nvm: storage for Option ROM version information 871 * 872 * Reads the NVM EETRACK ID, Map version, and security revision of the 873 * inactive NVM bank. Used to access version data for a pending update that 874 * has not yet been activated. 875 */ 876 enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) 877 { 878 return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); 879 } 880 881 /** 882 * ice_get_orom_srev - Read the security revision from the OROM CSS header 883 * @hw: pointer to the HW struct 884 * @bank: whether to read from active or inactive flash module 885 * @srev: storage for security revision 886 * 887 * Read the security revision out of the CSS header of the active OROM module 888 * bank. 889 */ 890 static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) 891 { 892 enum ice_status status; 893 u16 srev_l, srev_h; 894 u32 css_start; 895 896 if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) { 897 ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n", 898 hw->flash.banks.orom_size); 899 return ICE_ERR_CFG; 900 } 901 902 /* calculate how far into the Option ROM the CSS header starts. Note 903 * that ice_read_orom_module takes a word offset so we need to 904 * divide by 2 here. 905 */ 906 css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2; 907 908 status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l); 909 if (status) 910 return status; 911 912 status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_H, &srev_h); 913 if (status) 914 return status; 915 916 *srev = srev_h << 16 | srev_l; 917 918 return ICE_SUCCESS; 919 } 920 921 /** 922 * ice_get_orom_civd_data - Get the combo version information from Option ROM 923 * @hw: pointer to the HW struct 924 * @bank: whether to read from the active or inactive flash module 925 * @civd: storage for the Option ROM CIVD data. 926 * 927 * Searches through the Option ROM flash contents to locate the CIVD data for 928 * the image. 929 */ 930 static enum ice_status 931 ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, 932 struct ice_orom_civd_info *civd) 933 { 934 struct ice_orom_civd_info tmp; 935 enum ice_status status; 936 u32 offset; 937 938 /* The CIVD section is located in the Option ROM aligned to 512 bytes. 939 * The first 4 bytes must contain the ASCII characters "$CIV". 940 * A simple modulo 256 sum of all of the bytes of the structure must 941 * equal 0. 942 */ 943 for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { 944 u8 sum = 0, i; 945 946 status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 947 offset, (u8 *)&tmp, sizeof(tmp)); 948 if (status) { 949 ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n"); 950 return status; 951 } 952 953 /* Skip forward until we find a matching signature */ 954 if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0) 955 continue; 956 957 /* Verify that the simple checksum is zero */ 958 for (i = 0; i < sizeof(tmp); i++) 959 sum += ((u8 *)&tmp)[i]; 960 961 if (sum) { 962 ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", 963 sum); 964 return ICE_ERR_NVM; 965 } 966 967 *civd = tmp; 968 return ICE_SUCCESS; 969 } 970 971 return ICE_ERR_NVM; 972 } 973 974 /** 975 * ice_get_orom_ver_info - Read Option ROM version information 976 * @hw: pointer to the HW struct 977 * @bank: whether to read from the active or inactive flash module 978 * @orom: pointer to Option ROM info structure 979 * 980 * Read Option ROM version and security revision from the Option ROM flash 981 * section. 982 */ 983 static enum ice_status 984 ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) 985 { 986 struct ice_orom_civd_info civd; 987 enum ice_status status; 988 u32 combo_ver; 989 990 status = ice_get_orom_civd_data(hw, bank, &civd); 991 if (status) { 992 ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n"); 993 return status; 994 } 995 996 combo_ver = LE32_TO_CPU(civd.combo_ver); 997 998 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); 999 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); 1000 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); 1001 1002 status = ice_get_orom_srev(hw, bank, &orom->srev); 1003 if (status) { 1004 ice_debug(hw, ICE_DBG_NVM, "Failed to read Option ROM security revision.\n"); 1005 return status; 1006 } 1007 1008 return ICE_SUCCESS; 1009 } 1010 1011 /** 1012 * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank 1013 * @hw: pointer to the HW structure 1014 * @orom: storage for Option ROM version information 1015 * 1016 * Reads the Option ROM version and security revision data for the inactive 1017 * section of flash. Used to access version data for a pending update that has 1018 * not yet been activated. 1019 */ 1020 enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) 1021 { 1022 return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); 1023 } 1024 1025 /** 1026 * ice_get_netlist_info 1027 * @hw: pointer to the HW struct 1028 * @bank: whether to read from the active or inactive flash bank 1029 * @netlist: pointer to netlist version info structure 1030 * 1031 * Get the netlist version information from the requested bank. Reads the Link 1032 * Topology section to find the Netlist ID block and extract the relevant 1033 * information into the netlist version structure. 1034 */ 1035 static enum ice_status 1036 ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, 1037 struct ice_netlist_info *netlist) 1038 { 1039 u16 module_id, length, node_count, i; 1040 enum ice_status status; 1041 u16 *id_blk; 1042 1043 status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); 1044 if (status) 1045 return status; 1046 1047 if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { 1048 ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", 1049 ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); 1050 return ICE_ERR_NVM; 1051 } 1052 1053 status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); 1054 if (status) 1055 return status; 1056 1057 /* sanity check that we have at least enough words to store the netlist ID block */ 1058 if (length < ICE_NETLIST_ID_BLK_SIZE) { 1059 ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", 1060 ICE_NETLIST_ID_BLK_SIZE, length); 1061 return ICE_ERR_NVM; 1062 } 1063 1064 status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); 1065 if (status) 1066 return status; 1067 node_count &= ICE_LINK_TOPO_NODE_COUNT_M; 1068 1069 id_blk = (u16 *)ice_calloc(hw, ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk)); 1070 if (!id_blk) 1071 return ICE_ERR_NO_MEMORY; 1072 1073 /* Read out the entire Netlist ID Block at once. */ 1074 status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, 1075 ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16), 1076 (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16)); 1077 if (status) 1078 goto exit_error; 1079 1080 for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++) 1081 id_blk[i] = LE16_TO_CPU(((_FORCE_ __le16 *)id_blk)[i]); 1082 1083 netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | 1084 id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW]; 1085 netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | 1086 id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW]; 1087 netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | 1088 id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW]; 1089 netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 | 1090 id_blk[ICE_NETLIST_ID_BLK_REV_LOW]; 1091 netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER]; 1092 /* Read the left most 4 bytes of SHA */ 1093 netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | 1094 id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; 1095 1096 exit_error: 1097 ice_free(hw, id_blk); 1098 1099 return status; 1100 } 1101 1102 /** 1103 * ice_get_netlist_ver_info 1104 * @hw: pointer to the HW struct 1105 * @netlist: pointer to netlist version info structure 1106 * 1107 * Get the netlist version information 1108 */ 1109 enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist) 1110 { 1111 return ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, netlist); 1112 } 1113 1114 /** 1115 * ice_get_inactive_netlist_ver 1116 * @hw: pointer to the HW struct 1117 * @netlist: pointer to netlist version info structure 1118 * 1119 * Read the netlist version data from the inactive netlist bank. Used to 1120 * extract version data of a pending flash update in order to display the 1121 * version data. 1122 */ 1123 enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) 1124 { 1125 return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); 1126 } 1127 1128 /** 1129 * ice_discover_flash_size - Discover the available flash size. 1130 * @hw: pointer to the HW struct 1131 * 1132 * The device flash could be up to 16MB in size. However, it is possible that 1133 * the actual size is smaller. Use bisection to determine the accessible size 1134 * of flash memory. 1135 */ 1136 static enum ice_status ice_discover_flash_size(struct ice_hw *hw) 1137 { 1138 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; 1139 enum ice_status status; 1140 1141 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1142 1143 status = ice_acquire_nvm(hw, ICE_RES_READ); 1144 if (status) 1145 return status; 1146 1147 while ((max_size - min_size) > 1) { 1148 u32 offset = (max_size + min_size) / 2; 1149 u32 len = 1; 1150 u8 data; 1151 1152 status = ice_read_flat_nvm(hw, offset, &len, &data, false); 1153 if (status == ICE_ERR_AQ_ERROR && 1154 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { 1155 ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", 1156 __func__, offset); 1157 status = ICE_SUCCESS; 1158 max_size = offset; 1159 } else if (!status) { 1160 ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", 1161 __func__, offset); 1162 min_size = offset; 1163 } else { 1164 /* an unexpected error occurred */ 1165 goto err_read_flat_nvm; 1166 } 1167 } 1168 1169 ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); 1170 1171 hw->flash.flash_size = max_size; 1172 1173 err_read_flat_nvm: 1174 ice_release_nvm(hw); 1175 1176 return status; 1177 } 1178 1179 /** 1180 * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word 1181 * @hw: pointer to the HW structure 1182 * @offset: the word offset of the Shadow RAM word to read 1183 * @pointer: pointer value read from Shadow RAM 1184 * 1185 * Read the given Shadow RAM word, and convert it to a pointer value specified 1186 * in bytes. This function assumes the specified offset is a valid pointer 1187 * word. 1188 * 1189 * Each pointer word specifies whether it is stored in word size or 4KB 1190 * sector size by using the highest bit. The reported pointer value will be in 1191 * bytes, intended for flat NVM reads. 1192 */ 1193 static enum ice_status 1194 ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) 1195 { 1196 enum ice_status status; 1197 u16 value; 1198 1199 status = ice_read_sr_word(hw, offset, &value); 1200 if (status) 1201 return status; 1202 1203 /* Determine if the pointer is in 4KB or word units */ 1204 if (value & ICE_SR_NVM_PTR_4KB_UNITS) 1205 *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024; 1206 else 1207 *pointer = value * 2; 1208 1209 return ICE_SUCCESS; 1210 } 1211 1212 /** 1213 * ice_read_sr_area_size - Read an area size from a Shadow RAM word 1214 * @hw: pointer to the HW structure 1215 * @offset: the word offset of the Shadow RAM to read 1216 * @size: size value read from the Shadow RAM 1217 * 1218 * Read the given Shadow RAM word, and convert it to an area size value 1219 * specified in bytes. This function assumes the specified offset is a valid 1220 * area size word. 1221 * 1222 * Each area size word is specified in 4KB sector units. This function reports 1223 * the size in bytes, intended for flat NVM reads. 1224 */ 1225 static enum ice_status 1226 ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) 1227 { 1228 enum ice_status status; 1229 u16 value; 1230 1231 status = ice_read_sr_word(hw, offset, &value); 1232 if (status) 1233 return status; 1234 1235 /* Area sizes are always specified in 4KB units */ 1236 *size = value * 4 * 1024; 1237 1238 return ICE_SUCCESS; 1239 } 1240 1241 /** 1242 * ice_determine_active_flash_banks - Discover active bank for each module 1243 * @hw: pointer to the HW struct 1244 * 1245 * Read the Shadow RAM control word and determine which banks are active for 1246 * the NVM, OROM, and Netlist modules. Also read and calculate the associated 1247 * pointer and size. These values are then cached into the ice_flash_info 1248 * structure for later use in order to calculate the correct offset to read 1249 * from the active module. 1250 */ 1251 static enum ice_status 1252 ice_determine_active_flash_banks(struct ice_hw *hw) 1253 { 1254 struct ice_bank_info *banks = &hw->flash.banks; 1255 enum ice_status status; 1256 u16 ctrl_word; 1257 1258 status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); 1259 if (status) { 1260 ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n"); 1261 return status; 1262 } 1263 1264 /* Check that the control word indicates validity */ 1265 if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { 1266 ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); 1267 return ICE_ERR_CFG; 1268 } 1269 1270 if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) 1271 banks->nvm_bank = ICE_1ST_FLASH_BANK; 1272 else 1273 banks->nvm_bank = ICE_2ND_FLASH_BANK; 1274 1275 if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK)) 1276 banks->orom_bank = ICE_1ST_FLASH_BANK; 1277 else 1278 banks->orom_bank = ICE_2ND_FLASH_BANK; 1279 1280 if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK)) 1281 banks->netlist_bank = ICE_1ST_FLASH_BANK; 1282 else 1283 banks->netlist_bank = ICE_2ND_FLASH_BANK; 1284 1285 status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr); 1286 if (status) { 1287 ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n"); 1288 return status; 1289 } 1290 1291 status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size); 1292 if (status) { 1293 ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n"); 1294 return status; 1295 } 1296 1297 status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr); 1298 if (status) { 1299 ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n"); 1300 return status; 1301 } 1302 1303 status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size); 1304 if (status) { 1305 ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n"); 1306 return status; 1307 } 1308 1309 status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr); 1310 if (status) { 1311 ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n"); 1312 return status; 1313 } 1314 1315 status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size); 1316 if (status) { 1317 ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n"); 1318 return status; 1319 } 1320 1321 return ICE_SUCCESS; 1322 } 1323 1324 /** 1325 * ice_init_nvm - initializes NVM setting 1326 * @hw: pointer to the HW struct 1327 * 1328 * This function reads and populates NVM settings such as Shadow RAM size, 1329 * max_timeout, and blank_nvm_mode 1330 */ 1331 enum ice_status ice_init_nvm(struct ice_hw *hw) 1332 { 1333 struct ice_flash_info *flash = &hw->flash; 1334 enum ice_status status; 1335 u32 fla, gens_stat; 1336 u8 sr_size; 1337 1338 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1339 1340 /* The SR size is stored regardless of the NVM programming mode 1341 * as the blank mode may be used in the factory line. 1342 */ 1343 gens_stat = rd32(hw, GLNVM_GENS); 1344 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 1345 1346 /* Switching to words (sr_size contains power of 2) */ 1347 flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 1348 1349 /* Check if we are in the normal or blank NVM programming mode */ 1350 fla = rd32(hw, GLNVM_FLA); 1351 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 1352 flash->blank_nvm_mode = false; 1353 } else { 1354 /* Blank programming mode */ 1355 flash->blank_nvm_mode = true; 1356 ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); 1357 return ICE_ERR_NVM_BLANK_MODE; 1358 } 1359 1360 status = ice_discover_flash_size(hw); 1361 if (status) { 1362 ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); 1363 return status; 1364 } 1365 1366 status = ice_determine_active_flash_banks(hw); 1367 if (status) { 1368 ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n"); 1369 return status; 1370 } 1371 1372 status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); 1373 if (status) { 1374 ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); 1375 return status; 1376 } 1377 1378 status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom); 1379 if (status) 1380 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); 1381 1382 /* read the netlist version information */ 1383 status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist); 1384 if (status) 1385 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); 1386 return ICE_SUCCESS; 1387 } 1388 1389 /** 1390 * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary 1391 * @hw: pointer to the HW structure 1392 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 1393 * @words: (in) number of words to read; (out) number of words actually read 1394 * @data: words read from the Shadow RAM 1395 * 1396 * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq 1397 * method. The buf read is preceded by the NVM ownership take 1398 * and followed by the release. 1399 */ 1400 enum ice_status 1401 ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 1402 { 1403 enum ice_status status; 1404 1405 status = ice_acquire_nvm(hw, ICE_RES_READ); 1406 if (!status) { 1407 status = ice_read_sr_buf_aq(hw, offset, words, data); 1408 ice_release_nvm(hw); 1409 } 1410 1411 return status; 1412 } 1413 1414 /** 1415 * __ice_write_sr_word - Writes Shadow RAM word 1416 * @hw: pointer to the HW structure 1417 * @offset: offset of the Shadow RAM word to write 1418 * @data: word to write to the Shadow RAM 1419 * 1420 * Writes a 16 bit word to the SR using the ice_write_sr_aq method. 1421 * NVM ownership have to be acquired and released (on ARQ completion event 1422 * reception) by caller. To commit SR to NVM update checksum function 1423 * should be called. 1424 */ 1425 enum ice_status 1426 __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data) 1427 { 1428 __le16 data_local = CPU_TO_LE16(*data); 1429 1430 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1431 1432 /* Value 0x00 below means that we treat SR as a flat mem */ 1433 return ice_write_sr_aq(hw, offset, 1, &data_local, false); 1434 } 1435 1436 /** 1437 * __ice_write_sr_buf - Writes Shadow RAM buf 1438 * @hw: pointer to the HW structure 1439 * @offset: offset of the Shadow RAM buffer to write 1440 * @words: number of words to write 1441 * @data: words to write to the Shadow RAM 1442 * 1443 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 1444 * NVM ownership must be acquired before calling this function and released 1445 * on ARQ completion event reception by caller. To commit SR to NVM update 1446 * checksum function should be called. 1447 */ 1448 enum ice_status 1449 __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data) 1450 { 1451 enum ice_status status; 1452 __le16 *data_local; 1453 void *vmem; 1454 u32 i; 1455 1456 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1457 1458 vmem = ice_calloc(hw, words, sizeof(u16)); 1459 if (!vmem) 1460 return ICE_ERR_NO_MEMORY; 1461 data_local = (_FORCE_ __le16 *)vmem; 1462 1463 for (i = 0; i < words; i++) 1464 data_local[i] = CPU_TO_LE16(data[i]); 1465 1466 /* Here we will only write one buffer as the size of the modules 1467 * mirrored in the Shadow RAM is always less than 4K. 1468 */ 1469 status = ice_write_sr_aq(hw, offset, words, data_local, false); 1470 1471 ice_free(hw, vmem); 1472 1473 return status; 1474 } 1475 1476 /** 1477 * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum 1478 * @hw: pointer to hardware structure 1479 * @checksum: pointer to the checksum 1480 * 1481 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 1482 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 1483 * is customer specific and unknown. Therefore, this function skips all maximum 1484 * possible size of VPD (1kB). 1485 */ 1486 static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum) 1487 { 1488 enum ice_status status = ICE_SUCCESS; 1489 u16 pcie_alt_module = 0; 1490 u16 checksum_local = 0; 1491 u16 vpd_module; 1492 void *vmem; 1493 u16 *data; 1494 u16 i; 1495 1496 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1497 1498 vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16)); 1499 if (!vmem) 1500 return ICE_ERR_NO_MEMORY; 1501 data = (u16 *)vmem; 1502 1503 /* read pointer to VPD area */ 1504 status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module); 1505 if (status) 1506 goto ice_calc_sr_checksum_exit; 1507 1508 /* read pointer to PCIe Alt Auto-load module */ 1509 status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR, 1510 &pcie_alt_module); 1511 if (status) 1512 goto ice_calc_sr_checksum_exit; 1513 1514 /* Calculate SW checksum that covers the whole 64kB shadow RAM 1515 * except the VPD and PCIe ALT Auto-load modules 1516 */ 1517 for (i = 0; i < hw->flash.sr_words; i++) { 1518 /* Read SR page */ 1519 if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) { 1520 u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS; 1521 1522 status = ice_read_sr_buf_aq(hw, i, &words, data); 1523 if (status != ICE_SUCCESS) 1524 goto ice_calc_sr_checksum_exit; 1525 } 1526 1527 /* Skip Checksum word */ 1528 if (i == ICE_SR_SW_CHECKSUM_WORD) 1529 continue; 1530 /* Skip VPD module (convert byte size to word count) */ 1531 if (i >= (u32)vpd_module && 1532 i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)) 1533 continue; 1534 /* Skip PCIe ALT module (convert byte size to word count) */ 1535 if (i >= (u32)pcie_alt_module && 1536 i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)) 1537 continue; 1538 1539 checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS]; 1540 } 1541 1542 *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local; 1543 1544 ice_calc_sr_checksum_exit: 1545 ice_free(hw, vmem); 1546 return status; 1547 } 1548 1549 /** 1550 * ice_update_sr_checksum - Updates the Shadow RAM SW checksum 1551 * @hw: pointer to hardware structure 1552 * 1553 * NVM ownership must be acquired before calling this function and released 1554 * on ARQ completion event reception by caller. 1555 * This function will commit SR to NVM. 1556 */ 1557 enum ice_status ice_update_sr_checksum(struct ice_hw *hw) 1558 { 1559 enum ice_status status; 1560 __le16 le_sum; 1561 u16 checksum; 1562 1563 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1564 1565 status = ice_calc_sr_checksum(hw, &checksum); 1566 if (!status) { 1567 le_sum = CPU_TO_LE16(checksum); 1568 status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1, 1569 &le_sum, true); 1570 } 1571 return status; 1572 } 1573 1574 /** 1575 * ice_validate_sr_checksum - Validate Shadow RAM SW checksum 1576 * @hw: pointer to hardware structure 1577 * @checksum: calculated checksum 1578 * 1579 * Performs checksum calculation and validates the Shadow RAM SW checksum. 1580 * If the caller does not need checksum, the value can be NULL. 1581 */ 1582 enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum) 1583 { 1584 enum ice_status status; 1585 u16 checksum_local; 1586 u16 checksum_sr; 1587 1588 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1589 1590 status = ice_acquire_nvm(hw, ICE_RES_READ); 1591 if (!status) { 1592 status = ice_calc_sr_checksum(hw, &checksum_local); 1593 ice_release_nvm(hw); 1594 if (status) 1595 return status; 1596 } else { 1597 return status; 1598 } 1599 1600 ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr); 1601 1602 /* Verify read checksum from EEPROM is the same as 1603 * calculated checksum 1604 */ 1605 if (checksum_local != checksum_sr) 1606 status = ICE_ERR_NVM_CHECKSUM; 1607 1608 /* If the user cares, return the calculated checksum */ 1609 if (checksum) 1610 *checksum = checksum_local; 1611 1612 return status; 1613 } 1614 1615 /** 1616 * ice_nvm_validate_checksum 1617 * @hw: pointer to the HW struct 1618 * 1619 * Verify NVM PFA checksum validity (0x0706) 1620 */ 1621 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) 1622 { 1623 struct ice_aqc_nvm_checksum *cmd; 1624 struct ice_aq_desc desc; 1625 enum ice_status status; 1626 1627 status = ice_acquire_nvm(hw, ICE_RES_READ); 1628 if (status) 1629 return status; 1630 1631 cmd = &desc.params.nvm_checksum; 1632 1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 1634 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; 1635 1636 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1637 1638 ice_release_nvm(hw); 1639 1640 if (!status) 1641 if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) 1642 status = ICE_ERR_NVM_CHECKSUM; 1643 1644 return status; 1645 } 1646 1647 /** 1648 * ice_nvm_recalculate_checksum 1649 * @hw: pointer to the HW struct 1650 * 1651 * Recalculate NVM PFA checksum (0x0706) 1652 */ 1653 enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw) 1654 { 1655 struct ice_aqc_nvm_checksum *cmd; 1656 struct ice_aq_desc desc; 1657 enum ice_status status; 1658 1659 status = ice_acquire_nvm(hw, ICE_RES_READ); 1660 if (status) 1661 return status; 1662 1663 cmd = &desc.params.nvm_checksum; 1664 1665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 1666 cmd->flags = ICE_AQC_NVM_CHECKSUM_RECALC; 1667 1668 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1669 1670 ice_release_nvm(hw); 1671 1672 return status; 1673 } 1674 1675 /** 1676 * ice_nvm_write_activate 1677 * @hw: pointer to the HW struct 1678 * @cmd_flags: NVM activate admin command bits (banks to be validated) 1679 * 1680 * Update the control word with the required banks' validity bits 1681 * and dumps the Shadow RAM to flash (0x0707) 1682 */ 1683 enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) 1684 { 1685 struct ice_aqc_nvm *cmd; 1686 struct ice_aq_desc desc; 1687 1688 cmd = &desc.params.nvm; 1689 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); 1690 1691 cmd->cmd_flags = cmd_flags; 1692 1693 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1694 } 1695 1696 /** 1697 * ice_get_nvm_minsrevs - Get the Minimum Security Revision values from flash 1698 * @hw: pointer to the HW struct 1699 * @minsrevs: structure to store NVM and OROM minsrev values 1700 * 1701 * Read the Minimum Security Revision TLV and extract the revision values from 1702 * the flash image into a readable structure for processing. 1703 */ 1704 enum ice_status 1705 ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) 1706 { 1707 struct ice_aqc_nvm_minsrev data; 1708 enum ice_status status; 1709 u16 valid; 1710 1711 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1712 1713 status = ice_acquire_nvm(hw, ICE_RES_READ); 1714 if (status) 1715 return status; 1716 1717 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), 1718 &data, true, false, NULL); 1719 1720 ice_release_nvm(hw); 1721 1722 if (status) 1723 return status; 1724 1725 valid = LE16_TO_CPU(data.validity); 1726 1727 /* Extract NVM minimum security revision */ 1728 if (valid & ICE_AQC_NVM_MINSREV_NVM_VALID) { 1729 u16 minsrev_l, minsrev_h; 1730 1731 minsrev_l = LE16_TO_CPU(data.nvm_minsrev_l); 1732 minsrev_h = LE16_TO_CPU(data.nvm_minsrev_h); 1733 1734 minsrevs->nvm = minsrev_h << 16 | minsrev_l; 1735 minsrevs->nvm_valid = true; 1736 } 1737 1738 /* Extract the OROM minimum security revision */ 1739 if (valid & ICE_AQC_NVM_MINSREV_OROM_VALID) { 1740 u16 minsrev_l, minsrev_h; 1741 1742 minsrev_l = LE16_TO_CPU(data.orom_minsrev_l); 1743 minsrev_h = LE16_TO_CPU(data.orom_minsrev_h); 1744 1745 minsrevs->orom = minsrev_h << 16 | minsrev_l; 1746 minsrevs->orom_valid = true; 1747 } 1748 1749 return ICE_SUCCESS; 1750 } 1751 1752 /** 1753 * ice_update_nvm_minsrevs - Update minimum security revision TLV data in flash 1754 * @hw: pointer to the HW struct 1755 * @minsrevs: minimum security revision information 1756 * 1757 * Update the NVM or Option ROM minimum security revision fields in the PFA 1758 * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid 1759 * fields to determine what update is being requested. If the valid bit is not 1760 * set for that module, then the associated minsrev will be left as is. 1761 */ 1762 enum ice_status 1763 ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) 1764 { 1765 struct ice_aqc_nvm_minsrev data; 1766 enum ice_status status; 1767 1768 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1769 1770 if (!minsrevs->nvm_valid && !minsrevs->orom_valid) { 1771 ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid"); 1772 return ICE_ERR_PARAM; 1773 } 1774 1775 status = ice_acquire_nvm(hw, ICE_RES_WRITE); 1776 if (status) 1777 return status; 1778 1779 /* Get current data */ 1780 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), 1781 &data, true, false, NULL); 1782 if (status) 1783 goto exit_release_res; 1784 1785 if (minsrevs->nvm_valid) { 1786 data.nvm_minsrev_l = CPU_TO_LE16(minsrevs->nvm & 0xFFFF); 1787 data.nvm_minsrev_h = CPU_TO_LE16(minsrevs->nvm >> 16); 1788 data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_NVM_VALID); 1789 } 1790 1791 if (minsrevs->orom_valid) { 1792 data.orom_minsrev_l = CPU_TO_LE16(minsrevs->orom & 0xFFFF); 1793 data.orom_minsrev_h = CPU_TO_LE16(minsrevs->orom >> 16); 1794 data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_OROM_VALID); 1795 } 1796 1797 /* Update flash data */ 1798 status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, 1799 true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); 1800 if (status) 1801 goto exit_release_res; 1802 1803 /* Dump the Shadow RAM to the flash */ 1804 status = ice_nvm_write_activate(hw, 0); 1805 1806 exit_release_res: 1807 ice_release_nvm(hw); 1808 1809 return status; 1810 } 1811 1812 /** 1813 * ice_nvm_access_get_features - Return the NVM access features structure 1814 * @cmd: NVM access command to process 1815 * @data: storage for the driver NVM features 1816 * 1817 * Fill in the data section of the NVM access request with a copy of the NVM 1818 * features structure. 1819 */ 1820 enum ice_status 1821 ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, 1822 union ice_nvm_access_data *data) 1823 { 1824 /* The provided data_size must be at least as large as our NVM 1825 * features structure. A larger size should not be treated as an 1826 * error, to allow future extensions to the features structure to 1827 * work on older drivers. 1828 */ 1829 if (cmd->data_size < sizeof(struct ice_nvm_features)) 1830 return ICE_ERR_NO_MEMORY; 1831 1832 /* Initialize the data buffer to zeros */ 1833 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1834 1835 /* Fill in the features data */ 1836 data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; 1837 data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; 1838 data->drv_features.size = sizeof(struct ice_nvm_features); 1839 data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; 1840 1841 return ICE_SUCCESS; 1842 } 1843 1844 /** 1845 * ice_nvm_access_get_module - Helper function to read module value 1846 * @cmd: NVM access command structure 1847 * 1848 * Reads the module value out of the NVM access config field. 1849 */ 1850 u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) 1851 { 1852 return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); 1853 } 1854 1855 /** 1856 * ice_nvm_access_get_flags - Helper function to read flags value 1857 * @cmd: NVM access command structure 1858 * 1859 * Reads the flags value out of the NVM access config field. 1860 */ 1861 u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) 1862 { 1863 return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); 1864 } 1865 1866 /** 1867 * ice_nvm_access_get_adapter - Helper function to read adapter info 1868 * @cmd: NVM access command structure 1869 * 1870 * Read the adapter info value out of the NVM access config field. 1871 */ 1872 u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) 1873 { 1874 return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> 1875 ICE_NVM_CFG_ADAPTER_INFO_S); 1876 } 1877 1878 /** 1879 * ice_validate_nvm_rw_reg - Check than an NVM access request is valid 1880 * @cmd: NVM access command structure 1881 * 1882 * Validates that an NVM access structure is request to read or write a valid 1883 * register offset. First validates that the module and flags are correct, and 1884 * then ensures that the register offset is one of the accepted registers. 1885 */ 1886 static enum ice_status 1887 ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) 1888 { 1889 u32 module, flags, offset; 1890 u16 i; 1891 1892 module = ice_nvm_access_get_module(cmd); 1893 flags = ice_nvm_access_get_flags(cmd); 1894 offset = cmd->offset; 1895 1896 /* Make sure the module and flags indicate a read/write request */ 1897 if (module != ICE_NVM_REG_RW_MODULE || 1898 flags != ICE_NVM_REG_RW_FLAGS || 1899 cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) 1900 return ICE_ERR_PARAM; 1901 1902 switch (offset) { 1903 case GL_HICR: 1904 case GL_HICR_EN: /* Note, this register is read only */ 1905 case GL_FWSTS: 1906 case GL_MNG_FWSM: 1907 case GLGEN_CSR_DEBUG_C: 1908 case GLGEN_RSTAT: 1909 case GLPCI_LBARCTRL: 1910 case GLNVM_GENS: 1911 case GLNVM_FLA: 1912 case PF_FUNC_RID: 1913 return ICE_SUCCESS; 1914 default: 1915 break; 1916 } 1917 1918 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) 1919 if (offset == (u32)GL_HIDA(i)) 1920 return ICE_SUCCESS; 1921 1922 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) 1923 if (offset == (u32)GL_HIBA(i)) 1924 return ICE_SUCCESS; 1925 1926 /* All other register offsets are not valid */ 1927 return ICE_ERR_OUT_OF_RANGE; 1928 } 1929 1930 /** 1931 * ice_nvm_access_read - Handle an NVM read request 1932 * @hw: pointer to the HW struct 1933 * @cmd: NVM access command to process 1934 * @data: storage for the register value read 1935 * 1936 * Process an NVM access request to read a register. 1937 */ 1938 enum ice_status 1939 ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1940 union ice_nvm_access_data *data) 1941 { 1942 enum ice_status status; 1943 1944 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1945 1946 /* Always initialize the output data, even on failure */ 1947 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1948 1949 /* Make sure this is a valid read/write access request */ 1950 status = ice_validate_nvm_rw_reg(cmd); 1951 if (status) 1952 return status; 1953 1954 ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", 1955 cmd->offset); 1956 1957 /* Read the register and store the contents in the data field */ 1958 data->regval = rd32(hw, cmd->offset); 1959 1960 return ICE_SUCCESS; 1961 } 1962 1963 /** 1964 * ice_nvm_access_write - Handle an NVM write request 1965 * @hw: pointer to the HW struct 1966 * @cmd: NVM access command to process 1967 * @data: NVM access data to write 1968 * 1969 * Process an NVM access request to write a register. 1970 */ 1971 enum ice_status 1972 ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1973 union ice_nvm_access_data *data) 1974 { 1975 enum ice_status status; 1976 1977 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1978 1979 /* Make sure this is a valid read/write access request */ 1980 status = ice_validate_nvm_rw_reg(cmd); 1981 if (status) 1982 return status; 1983 1984 /* Reject requests to write to read-only registers */ 1985 switch (cmd->offset) { 1986 case GL_HICR_EN: 1987 case GLGEN_RSTAT: 1988 return ICE_ERR_OUT_OF_RANGE; 1989 default: 1990 break; 1991 } 1992 1993 ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n", 1994 cmd->offset, data->regval); 1995 1996 /* Write the data field to the specified register */ 1997 wr32(hw, cmd->offset, data->regval); 1998 1999 return ICE_SUCCESS; 2000 } 2001 2002 /** 2003 * ice_handle_nvm_access - Handle an NVM access request 2004 * @hw: pointer to the HW struct 2005 * @cmd: NVM access command info 2006 * @data: pointer to read or return data 2007 * 2008 * Process an NVM access request. Read the command structure information and 2009 * determine if it is valid. If not, report an error indicating the command 2010 * was invalid. 2011 * 2012 * For valid commands, perform the necessary function, copying the data into 2013 * the provided data buffer. 2014 */ 2015 enum ice_status 2016 ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 2017 union ice_nvm_access_data *data) 2018 { 2019 u32 module, flags, adapter_info; 2020 2021 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2022 2023 /* Extended flags are currently reserved and must be zero */ 2024 if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) 2025 return ICE_ERR_PARAM; 2026 2027 /* Adapter info must match the HW device ID */ 2028 adapter_info = ice_nvm_access_get_adapter(cmd); 2029 if (adapter_info != hw->device_id) 2030 return ICE_ERR_PARAM; 2031 2032 switch (cmd->command) { 2033 case ICE_NVM_CMD_READ: 2034 module = ice_nvm_access_get_module(cmd); 2035 flags = ice_nvm_access_get_flags(cmd); 2036 2037 /* Getting the driver's NVM features structure shares the same 2038 * command type as reading a register. Read the config field 2039 * to determine if this is a request to get features. 2040 */ 2041 if (module == ICE_NVM_GET_FEATURES_MODULE && 2042 flags == ICE_NVM_GET_FEATURES_FLAGS && 2043 cmd->offset == 0) 2044 return ice_nvm_access_get_features(cmd, data); 2045 else 2046 return ice_nvm_access_read(hw, cmd, data); 2047 case ICE_NVM_CMD_WRITE: 2048 return ice_nvm_access_write(hw, cmd, data); 2049 default: 2050 return ICE_ERR_PARAM; 2051 } 2052 } 2053 2054