1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 35 /** 36 * ice_aq_read_nvm 37 * @hw: pointer to the HW struct 38 * @module_typeid: module pointer location in words from the NVM beginning 39 * @offset: byte offset from the module beginning 40 * @length: length of the section to be read (in bytes from the offset) 41 * @data: command buffer (size [bytes] = length) 42 * @last_command: tells if this is the last command in a series 43 * @read_shadow_ram: tell if this is a shadow RAM read 44 * @cd: pointer to command details structure or NULL 45 * 46 * Read the NVM using the admin queue commands (0x0701) 47 */ 48 enum ice_status 49 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 50 void *data, bool last_command, bool read_shadow_ram, 51 struct ice_sq_cd *cd) 52 { 53 struct ice_aq_desc desc; 54 struct ice_aqc_nvm *cmd; 55 56 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 57 58 cmd = &desc.params.nvm; 59 60 if (offset > ICE_AQC_NVM_MAX_OFFSET) 61 return ICE_ERR_PARAM; 62 63 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 64 65 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) 66 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; 67 68 /* If this is the last command in a series, set the proper flag. */ 69 if (last_command) 70 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 71 cmd->module_typeid = CPU_TO_LE16(module_typeid); 72 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 73 cmd->offset_high = (offset >> 16) & 0xFF; 74 cmd->length = CPU_TO_LE16(length); 75 76 return ice_aq_send_cmd(hw, &desc, data, length, cd); 77 } 78 79 /** 80 * ice_read_flat_nvm - Read portion of NVM by flat offset 81 * @hw: pointer to the HW struct 82 * @offset: offset from beginning of NVM 83 * @length: (in) number of bytes to read; (out) number of bytes actually read 84 * @data: buffer to return data in (sized to fit the specified length) 85 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 86 * 87 * Reads a portion of the NVM, as a flat memory space. This function correctly 88 * breaks read requests across Shadow RAM sectors and ensures that no single 89 * read request exceeds the maximum 4KB read for a single AdminQ command. 90 * 91 * Returns a status code on failure. Note that the data pointer may be 92 * partially updated if some reads succeed before a failure. 93 */ 94 enum ice_status 95 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 96 bool read_shadow_ram) 97 { 98 enum ice_status status; 99 u32 inlen = *length; 100 u32 bytes_read = 0; 101 bool last_cmd; 102 103 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 104 105 *length = 0; 106 107 /* Verify the length of the read if this is for the Shadow RAM */ 108 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { 109 ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); 110 return ICE_ERR_PARAM; 111 } 112 113 do { 114 u32 read_size, sector_offset; 115 116 /* ice_aq_read_nvm cannot read more than 4KB at a time. 117 * Additionally, a read from the Shadow RAM may not cross over 118 * a sector boundary. Conveniently, the sector size is also 119 * 4KB. 120 */ 121 sector_offset = offset % ICE_AQ_MAX_BUF_LEN; 122 read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, 123 inlen - bytes_read); 124 125 last_cmd = !(bytes_read + read_size < inlen); 126 127 /* ice_aq_read_nvm takes the length as a u16. Our read_size is 128 * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum 129 * size guarantees that it will fit within the 2 bytes. 130 */ 131 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 132 offset, (u16)read_size, 133 data + bytes_read, last_cmd, 134 read_shadow_ram, NULL); 135 if (status) 136 break; 137 138 bytes_read += read_size; 139 offset += read_size; 140 } while (!last_cmd); 141 142 *length = bytes_read; 143 return status; 144 } 145 146 /** 147 * ice_aq_update_nvm 148 * @hw: pointer to the HW struct 149 * @module_typeid: module pointer location in words from the NVM beginning 150 * @offset: byte offset from the module beginning 151 * @length: length of the section to be written (in bytes from the offset) 152 * @data: command buffer (size [bytes] = length) 153 * @last_command: tells if this is the last command in a series 154 * @command_flags: command parameters 155 * @cd: pointer to command details structure or NULL 156 * 157 * Update the NVM using the admin queue commands (0x0703) 158 */ 159 enum ice_status 160 ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, 161 u16 length, void *data, bool last_command, u8 command_flags, 162 struct ice_sq_cd *cd) 163 { 164 struct ice_aq_desc desc; 165 struct ice_aqc_nvm *cmd; 166 167 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 168 169 cmd = &desc.params.nvm; 170 171 /* In offset the highest byte must be zeroed. */ 172 if (offset & 0xFF000000) 173 return ICE_ERR_PARAM; 174 175 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); 176 177 cmd->cmd_flags |= command_flags; 178 179 /* If this is the last command in a series, set the proper flag. */ 180 if (last_command) 181 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 182 cmd->module_typeid = CPU_TO_LE16(module_typeid); 183 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 184 cmd->offset_high = (offset >> 16) & 0xFF; 185 cmd->length = CPU_TO_LE16(length); 186 187 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 188 189 return ice_aq_send_cmd(hw, &desc, data, length, cd); 190 } 191 192 /** 193 * ice_aq_erase_nvm 194 * @hw: pointer to the HW struct 195 * @module_typeid: module pointer location in words from the NVM beginning 196 * @cd: pointer to command details structure or NULL 197 * 198 * Erase the NVM sector using the admin queue commands (0x0702) 199 */ 200 enum ice_status 201 ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) 202 { 203 struct ice_aq_desc desc; 204 struct ice_aqc_nvm *cmd; 205 206 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 207 208 cmd = &desc.params.nvm; 209 210 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); 211 212 cmd->module_typeid = CPU_TO_LE16(module_typeid); 213 cmd->length = CPU_TO_LE16(ICE_AQC_NVM_ERASE_LEN); 214 cmd->offset_low = 0; 215 cmd->offset_high = 0; 216 217 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 218 } 219 220 /** 221 * ice_aq_read_nvm_cfg - read an NVM config block 222 * @hw: pointer to the HW struct 223 * @cmd_flags: NVM access admin command bits 224 * @field_id: field or feature ID 225 * @data: buffer for result 226 * @buf_size: buffer size 227 * @elem_count: pointer to count of elements read by FW 228 * @cd: pointer to command details structure or NULL 229 * 230 * Reads single or multiple feature/field ID and data (0x0704) 231 */ 232 enum ice_status 233 ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data, 234 u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd) 235 { 236 struct ice_aqc_nvm_cfg *cmd; 237 struct ice_aq_desc desc; 238 enum ice_status status; 239 240 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 241 242 cmd = &desc.params.nvm_cfg; 243 244 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read); 245 246 cmd->cmd_flags = cmd_flags; 247 cmd->id = CPU_TO_LE16(field_id); 248 249 status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 250 if (!status && elem_count) 251 *elem_count = LE16_TO_CPU(cmd->count); 252 253 return status; 254 } 255 256 /** 257 * ice_aq_write_nvm_cfg - write an NVM config block 258 * @hw: pointer to the HW struct 259 * @cmd_flags: NVM access admin command bits 260 * @data: buffer for result 261 * @buf_size: buffer size 262 * @elem_count: count of elements to be written 263 * @cd: pointer to command details structure or NULL 264 * 265 * Writes single or multiple feature/field ID and data (0x0705) 266 */ 267 enum ice_status 268 ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size, 269 u16 elem_count, struct ice_sq_cd *cd) 270 { 271 struct ice_aqc_nvm_cfg *cmd; 272 struct ice_aq_desc desc; 273 274 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 275 276 cmd = &desc.params.nvm_cfg; 277 278 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write); 279 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 280 281 cmd->count = CPU_TO_LE16(elem_count); 282 cmd->cmd_flags = cmd_flags; 283 284 return ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 285 } 286 287 /** 288 * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. 289 * @hw: pointer to the HW structure 290 * @offset: offset in words from module start 291 * @words: number of words to access 292 */ 293 static enum ice_status 294 ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) 295 { 296 if ((offset + words) > hw->nvm.sr_words) { 297 ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n"); 298 return ICE_ERR_PARAM; 299 } 300 301 if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { 302 /* We can access only up to 4KB (one sector), in one AQ write */ 303 ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n", 304 words, ICE_SR_SECTOR_SIZE_IN_WORDS); 305 return ICE_ERR_PARAM; 306 } 307 308 if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != 309 (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { 310 /* A single access cannot spread over two sectors */ 311 ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n"); 312 return ICE_ERR_PARAM; 313 } 314 315 return ICE_SUCCESS; 316 } 317 318 /** 319 * ice_read_sr_word_aq - Reads Shadow RAM via AQ 320 * @hw: pointer to the HW structure 321 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 322 * @data: word read from the Shadow RAM 323 * 324 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. 325 */ 326 enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 327 { 328 u32 bytes = sizeof(u16); 329 enum ice_status status; 330 __le16 data_local; 331 332 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 333 334 /* Note that ice_read_flat_nvm checks if the read is past the Shadow 335 * RAM size, and ensures we don't read across a Shadow RAM sector 336 * boundary 337 */ 338 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 339 (_FORCE_ u8 *)&data_local, true); 340 if (status) 341 return status; 342 343 *data = LE16_TO_CPU(data_local); 344 return ICE_SUCCESS; 345 } 346 347 /** 348 * ice_write_sr_aq - Writes Shadow RAM. 349 * @hw: pointer to the HW structure 350 * @offset: offset in words from module start 351 * @words: number of words to write 352 * @data: buffer with words to write to the Shadow RAM 353 * @last_command: tells the AdminQ that this is the last command 354 * 355 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 356 */ 357 static enum ice_status 358 ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data, 359 bool last_command) 360 { 361 enum ice_status status; 362 363 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 364 365 status = ice_check_sr_access_params(hw, offset, words); 366 if (!status) 367 status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data, 368 last_command, 0, NULL); 369 370 return status; 371 } 372 373 /** 374 * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ 375 * @hw: pointer to the HW structure 376 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 377 * @words: (in) number of words to read; (out) number of words actually read 378 * @data: words read from the Shadow RAM 379 * 380 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is 381 * taken before reading the buffer and later released. 382 */ 383 static enum ice_status 384 ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 385 { 386 u32 bytes = *words * 2, i; 387 enum ice_status status; 388 389 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 390 391 /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM 392 * sector restrictions necessary when reading from the NVM. 393 */ 394 status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); 395 396 /* Report the number of words successfully read */ 397 *words = bytes / 2; 398 399 /* Byte swap the words up to the amount we actually read */ 400 for (i = 0; i < *words; i++) 401 data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); 402 403 return status; 404 } 405 406 /** 407 * ice_acquire_nvm - Generic request for acquiring the NVM ownership 408 * @hw: pointer to the HW structure 409 * @access: NVM access type (read or write) 410 * 411 * This function will request NVM ownership. 412 */ 413 enum ice_status 414 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) 415 { 416 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 417 418 if (hw->nvm.blank_nvm_mode) 419 return ICE_SUCCESS; 420 421 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); 422 } 423 424 /** 425 * ice_release_nvm - Generic request for releasing the NVM ownership 426 * @hw: pointer to the HW structure 427 * 428 * This function will release NVM ownership. 429 */ 430 void ice_release_nvm(struct ice_hw *hw) 431 { 432 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 433 434 if (hw->nvm.blank_nvm_mode) 435 return; 436 437 ice_release_res(hw, ICE_NVM_RES_ID); 438 } 439 440 /** 441 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 442 * @hw: pointer to the HW structure 443 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 444 * @data: word read from the Shadow RAM 445 * 446 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 447 */ 448 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 449 { 450 enum ice_status status; 451 452 status = ice_acquire_nvm(hw, ICE_RES_READ); 453 if (!status) { 454 status = ice_read_sr_word_aq(hw, offset, data); 455 ice_release_nvm(hw); 456 } 457 458 return status; 459 } 460 461 /** 462 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 463 * @hw: pointer to hardware structure 464 * @module_tlv: pointer to module TLV to return 465 * @module_tlv_len: pointer to module TLV length to return 466 * @module_type: module type requested 467 * 468 * Finds the requested sub module TLV type from the Preserved Field 469 * Area (PFA) and returns the TLV pointer and length. The caller can 470 * use these to read the variable length TLV value. 471 */ 472 enum ice_status 473 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 474 u16 module_type) 475 { 476 enum ice_status status; 477 u16 pfa_len, pfa_ptr; 478 u16 next_tlv; 479 480 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 481 if (status != ICE_SUCCESS) { 482 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 483 return status; 484 } 485 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 486 if (status != ICE_SUCCESS) { 487 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 488 return status; 489 } 490 /* Starting with first TLV after PFA length, iterate through the list 491 * of TLVs to find the requested one. 492 */ 493 next_tlv = pfa_ptr + 1; 494 while (next_tlv < pfa_ptr + pfa_len) { 495 u16 tlv_sub_module_type; 496 u16 tlv_len; 497 498 /* Read TLV type */ 499 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 500 if (status != ICE_SUCCESS) { 501 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 502 break; 503 } 504 /* Read TLV length */ 505 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 506 if (status != ICE_SUCCESS) { 507 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 508 break; 509 } 510 if (tlv_sub_module_type == module_type) { 511 if (tlv_len) { 512 *module_tlv = next_tlv; 513 *module_tlv_len = tlv_len; 514 return ICE_SUCCESS; 515 } 516 return ICE_ERR_INVAL_SIZE; 517 } 518 /* Check next TLV, i.e. current TLV pointer + length + 2 words 519 * (for current TLV's type and length) 520 */ 521 next_tlv = next_tlv + tlv_len + 2; 522 } 523 /* Module does not exist */ 524 return ICE_ERR_DOES_NOT_EXIST; 525 } 526 527 /** 528 * ice_read_pba_string - Reads part number string from NVM 529 * @hw: pointer to hardware structure 530 * @pba_num: stores the part number string from the NVM 531 * @pba_num_size: part number string buffer length 532 * 533 * Reads the part number string from the NVM. 534 */ 535 enum ice_status 536 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) 537 { 538 u16 pba_tlv, pba_tlv_len; 539 enum ice_status status; 540 u16 pba_word, pba_size; 541 u16 i; 542 543 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 544 ICE_SR_PBA_BLOCK_PTR); 545 if (status != ICE_SUCCESS) { 546 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); 547 return status; 548 } 549 550 /* pba_size is the next word */ 551 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); 552 if (status != ICE_SUCCESS) { 553 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); 554 return status; 555 } 556 557 if (pba_tlv_len < pba_size) { 558 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); 559 return ICE_ERR_INVAL_SIZE; 560 } 561 562 /* Subtract one to get PBA word count (PBA Size word is included in 563 * total size) 564 */ 565 pba_size--; 566 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 567 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); 568 return ICE_ERR_PARAM; 569 } 570 571 for (i = 0; i < pba_size; i++) { 572 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); 573 if (status != ICE_SUCCESS) { 574 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); 575 return status; 576 } 577 578 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 579 pba_num[(i * 2) + 1] = pba_word & 0xFF; 580 } 581 pba_num[(pba_size * 2)] = '\0'; 582 583 return status; 584 } 585 586 /** 587 * ice_get_orom_ver_info - Read Option ROM version information 588 * @hw: pointer to the HW struct 589 * 590 * Read the Combo Image version data from the Boot Configuration TLV and fill 591 * in the option ROM version data. 592 */ 593 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) 594 { 595 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; 596 struct ice_orom_info *orom = &hw->nvm.orom; 597 enum ice_status status; 598 u32 combo_ver; 599 600 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, 601 ICE_SR_BOOT_CFG_PTR); 602 if (status) { 603 ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n"); 604 return status; 605 } 606 607 /* Boot Configuration Block must have length at least 2 words 608 * (Combo Image Version High and Combo Image Version Low) 609 */ 610 if (boot_cfg_tlv_len < 2) { 611 ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n"); 612 return ICE_ERR_INVAL_SIZE; 613 } 614 615 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), 616 &combo_hi); 617 if (status) { 618 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); 619 return status; 620 } 621 622 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), 623 &combo_lo); 624 if (status) { 625 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); 626 return status; 627 } 628 629 combo_ver = ((u32)combo_hi << 16) | combo_lo; 630 631 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> 632 ICE_OROM_VER_SHIFT); 633 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); 634 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> 635 ICE_OROM_VER_BUILD_SHIFT); 636 637 return ICE_SUCCESS; 638 } 639 640 /** 641 * ice_get_netlist_ver_info 642 * @hw: pointer to the HW struct 643 * 644 * Get the netlist version information 645 */ 646 enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw) 647 { 648 struct ice_netlist_ver_info *ver = &hw->netlist_ver; 649 enum ice_status ret; 650 u32 id_blk_start; 651 __le16 raw_data; 652 u16 data, i; 653 u16 *buff; 654 655 ret = ice_acquire_nvm(hw, ICE_RES_READ); 656 if (ret) 657 return ret; 658 buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN, 659 sizeof(*buff)); 660 if (!buff) { 661 ret = ICE_ERR_NO_MEMORY; 662 goto exit_no_mem; 663 } 664 665 /* read module length */ 666 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 667 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2, 668 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data, 669 false, false, NULL); 670 if (ret) 671 goto exit_error; 672 673 data = LE16_TO_CPU(raw_data); 674 /* exit if length is = 0 */ 675 if (!data) 676 goto exit_error; 677 678 /* read node count */ 679 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 680 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2, 681 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data, 682 false, false, NULL); 683 if (ret) 684 goto exit_error; 685 data = LE16_TO_CPU(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M; 686 687 /* netlist ID block starts from offset 4 + node count * 2 */ 688 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2; 689 690 /* read the entire netlist ID block */ 691 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID, 692 id_blk_start * 2, 693 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false, 694 false, NULL); 695 if (ret) 696 goto exit_error; 697 698 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++) 699 buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]); 700 701 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) | 702 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW]; 703 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) | 704 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW]; 705 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) | 706 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW]; 707 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) | 708 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW]; 709 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER]; 710 /* Read the left most 4 bytes of SHA */ 711 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 | 712 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14]; 713 714 exit_error: 715 ice_free(hw, buff); 716 exit_no_mem: 717 ice_release_nvm(hw); 718 return ret; 719 } 720 721 /** 722 * ice_discover_flash_size - Discover the available flash size. 723 * @hw: pointer to the HW struct 724 * 725 * The device flash could be up to 16MB in size. However, it is possible that 726 * the actual size is smaller. Use bisection to determine the accessible size 727 * of flash memory. 728 */ 729 static enum ice_status ice_discover_flash_size(struct ice_hw *hw) 730 { 731 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; 732 enum ice_status status; 733 734 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 735 736 status = ice_acquire_nvm(hw, ICE_RES_READ); 737 if (status) 738 return status; 739 740 while ((max_size - min_size) > 1) { 741 u32 offset = (max_size + min_size) / 2; 742 u32 len = 1; 743 u8 data; 744 745 status = ice_read_flat_nvm(hw, offset, &len, &data, false); 746 if (status == ICE_ERR_AQ_ERROR && 747 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { 748 ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", 749 __func__, offset); 750 status = ICE_SUCCESS; 751 max_size = offset; 752 } else if (!status) { 753 ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", 754 __func__, offset); 755 min_size = offset; 756 } else { 757 /* an unexpected error occurred */ 758 goto err_read_flat_nvm; 759 } 760 } 761 762 ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); 763 764 hw->nvm.flash_size = max_size; 765 766 err_read_flat_nvm: 767 ice_release_nvm(hw); 768 769 return status; 770 } 771 772 /** 773 * ice_init_nvm - initializes NVM setting 774 * @hw: pointer to the HW struct 775 * 776 * This function reads and populates NVM settings such as Shadow RAM size, 777 * max_timeout, and blank_nvm_mode 778 */ 779 enum ice_status ice_init_nvm(struct ice_hw *hw) 780 { 781 struct ice_nvm_info *nvm = &hw->nvm; 782 u16 eetrack_lo, eetrack_hi, ver; 783 enum ice_status status; 784 u32 fla, gens_stat; 785 u8 sr_size; 786 787 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 788 789 /* The SR size is stored regardless of the NVM programming mode 790 * as the blank mode may be used in the factory line. 791 */ 792 gens_stat = rd32(hw, GLNVM_GENS); 793 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 794 795 /* Switching to words (sr_size contains power of 2) */ 796 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 797 798 /* Check if we are in the normal or blank NVM programming mode */ 799 fla = rd32(hw, GLNVM_FLA); 800 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 801 nvm->blank_nvm_mode = false; 802 } else { 803 /* Blank programming mode */ 804 nvm->blank_nvm_mode = true; 805 ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); 806 return ICE_ERR_NVM_BLANK_MODE; 807 } 808 809 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); 810 if (status) { 811 ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n"); 812 return status; 813 } 814 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 815 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 816 817 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 818 if (status) { 819 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); 820 return status; 821 } 822 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 823 if (status) { 824 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); 825 return status; 826 } 827 828 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 829 830 status = ice_discover_flash_size(hw); 831 if (status) { 832 ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); 833 return status; 834 } 835 836 status = ice_get_orom_ver_info(hw); 837 if (status) { 838 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); 839 return status; 840 } 841 842 /* read the netlist version information */ 843 status = ice_get_netlist_ver_info(hw); 844 if (status) 845 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); 846 return ICE_SUCCESS; 847 } 848 849 /** 850 * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary 851 * @hw: pointer to the HW structure 852 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 853 * @words: (in) number of words to read; (out) number of words actually read 854 * @data: words read from the Shadow RAM 855 * 856 * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq 857 * method. The buf read is preceded by the NVM ownership take 858 * and followed by the release. 859 */ 860 enum ice_status 861 ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 862 { 863 enum ice_status status; 864 865 status = ice_acquire_nvm(hw, ICE_RES_READ); 866 if (!status) { 867 status = ice_read_sr_buf_aq(hw, offset, words, data); 868 ice_release_nvm(hw); 869 } 870 871 return status; 872 } 873 874 /** 875 * __ice_write_sr_word - Writes Shadow RAM word 876 * @hw: pointer to the HW structure 877 * @offset: offset of the Shadow RAM word to write 878 * @data: word to write to the Shadow RAM 879 * 880 * Writes a 16 bit word to the SR using the ice_write_sr_aq method. 881 * NVM ownership have to be acquired and released (on ARQ completion event 882 * reception) by caller. To commit SR to NVM update checksum function 883 * should be called. 884 */ 885 enum ice_status 886 __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data) 887 { 888 __le16 data_local = CPU_TO_LE16(*data); 889 890 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 891 892 /* Value 0x00 below means that we treat SR as a flat mem */ 893 return ice_write_sr_aq(hw, offset, 1, &data_local, false); 894 } 895 896 /** 897 * __ice_write_sr_buf - Writes Shadow RAM buf 898 * @hw: pointer to the HW structure 899 * @offset: offset of the Shadow RAM buffer to write 900 * @words: number of words to write 901 * @data: words to write to the Shadow RAM 902 * 903 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 904 * NVM ownership must be acquired before calling this function and released 905 * on ARQ completion event reception by caller. To commit SR to NVM update 906 * checksum function should be called. 907 */ 908 enum ice_status 909 __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data) 910 { 911 enum ice_status status; 912 __le16 *data_local; 913 void *vmem; 914 u32 i; 915 916 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 917 918 vmem = ice_calloc(hw, words, sizeof(u16)); 919 if (!vmem) 920 return ICE_ERR_NO_MEMORY; 921 data_local = (_FORCE_ __le16 *)vmem; 922 923 for (i = 0; i < words; i++) 924 data_local[i] = CPU_TO_LE16(data[i]); 925 926 /* Here we will only write one buffer as the size of the modules 927 * mirrored in the Shadow RAM is always less than 4K. 928 */ 929 status = ice_write_sr_aq(hw, offset, words, data_local, false); 930 931 ice_free(hw, vmem); 932 933 return status; 934 } 935 936 /** 937 * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum 938 * @hw: pointer to hardware structure 939 * @checksum: pointer to the checksum 940 * 941 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 942 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 943 * is customer specific and unknown. Therefore, this function skips all maximum 944 * possible size of VPD (1kB). 945 */ 946 static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum) 947 { 948 enum ice_status status = ICE_SUCCESS; 949 u16 pcie_alt_module = 0; 950 u16 checksum_local = 0; 951 u16 vpd_module; 952 void *vmem; 953 u16 *data; 954 u16 i; 955 956 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 957 958 vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16)); 959 if (!vmem) 960 return ICE_ERR_NO_MEMORY; 961 data = (u16 *)vmem; 962 963 /* read pointer to VPD area */ 964 status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module); 965 if (status) 966 goto ice_calc_sr_checksum_exit; 967 968 /* read pointer to PCIe Alt Auto-load module */ 969 status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR, 970 &pcie_alt_module); 971 if (status) 972 goto ice_calc_sr_checksum_exit; 973 974 /* Calculate SW checksum that covers the whole 64kB shadow RAM 975 * except the VPD and PCIe ALT Auto-load modules 976 */ 977 for (i = 0; i < hw->nvm.sr_words; i++) { 978 /* Read SR page */ 979 if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) { 980 u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS; 981 982 status = ice_read_sr_buf_aq(hw, i, &words, data); 983 if (status != ICE_SUCCESS) 984 goto ice_calc_sr_checksum_exit; 985 } 986 987 /* Skip Checksum word */ 988 if (i == ICE_SR_SW_CHECKSUM_WORD) 989 continue; 990 /* Skip VPD module (convert byte size to word count) */ 991 if (i >= (u32)vpd_module && 992 i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)) 993 continue; 994 /* Skip PCIe ALT module (convert byte size to word count) */ 995 if (i >= (u32)pcie_alt_module && 996 i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)) 997 continue; 998 999 checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS]; 1000 } 1001 1002 *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local; 1003 1004 ice_calc_sr_checksum_exit: 1005 ice_free(hw, vmem); 1006 return status; 1007 } 1008 1009 /** 1010 * ice_update_sr_checksum - Updates the Shadow RAM SW checksum 1011 * @hw: pointer to hardware structure 1012 * 1013 * NVM ownership must be acquired before calling this function and released 1014 * on ARQ completion event reception by caller. 1015 * This function will commit SR to NVM. 1016 */ 1017 enum ice_status ice_update_sr_checksum(struct ice_hw *hw) 1018 { 1019 enum ice_status status; 1020 __le16 le_sum; 1021 u16 checksum; 1022 1023 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1024 1025 status = ice_calc_sr_checksum(hw, &checksum); 1026 if (!status) { 1027 le_sum = CPU_TO_LE16(checksum); 1028 status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1, 1029 &le_sum, true); 1030 } 1031 return status; 1032 } 1033 1034 /** 1035 * ice_validate_sr_checksum - Validate Shadow RAM SW checksum 1036 * @hw: pointer to hardware structure 1037 * @checksum: calculated checksum 1038 * 1039 * Performs checksum calculation and validates the Shadow RAM SW checksum. 1040 * If the caller does not need checksum, the value can be NULL. 1041 */ 1042 enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum) 1043 { 1044 enum ice_status status; 1045 u16 checksum_local; 1046 u16 checksum_sr; 1047 1048 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1049 1050 status = ice_acquire_nvm(hw, ICE_RES_READ); 1051 if (!status) { 1052 status = ice_calc_sr_checksum(hw, &checksum_local); 1053 ice_release_nvm(hw); 1054 if (status) 1055 return status; 1056 } else { 1057 return status; 1058 } 1059 1060 ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr); 1061 1062 /* Verify read checksum from EEPROM is the same as 1063 * calculated checksum 1064 */ 1065 if (checksum_local != checksum_sr) 1066 status = ICE_ERR_NVM_CHECKSUM; 1067 1068 /* If the user cares, return the calculated checksum */ 1069 if (checksum) 1070 *checksum = checksum_local; 1071 1072 return status; 1073 } 1074 1075 /** 1076 * ice_nvm_validate_checksum 1077 * @hw: pointer to the HW struct 1078 * 1079 * Verify NVM PFA checksum validity (0x0706) 1080 */ 1081 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) 1082 { 1083 struct ice_aqc_nvm_checksum *cmd; 1084 struct ice_aq_desc desc; 1085 enum ice_status status; 1086 1087 status = ice_acquire_nvm(hw, ICE_RES_READ); 1088 if (status) 1089 return status; 1090 1091 cmd = &desc.params.nvm_checksum; 1092 1093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 1094 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; 1095 1096 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1097 ice_release_nvm(hw); 1098 1099 if (!status) 1100 if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) 1101 status = ICE_ERR_NVM_CHECKSUM; 1102 1103 return status; 1104 } 1105 1106 /** 1107 * ice_nvm_access_get_features - Return the NVM access features structure 1108 * @cmd: NVM access command to process 1109 * @data: storage for the driver NVM features 1110 * 1111 * Fill in the data section of the NVM access request with a copy of the NVM 1112 * features structure. 1113 */ 1114 enum ice_status 1115 ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, 1116 union ice_nvm_access_data *data) 1117 { 1118 /* The provided data_size must be at least as large as our NVM 1119 * features structure. A larger size should not be treated as an 1120 * error, to allow future extensions to the features structure to 1121 * work on older drivers. 1122 */ 1123 if (cmd->data_size < sizeof(struct ice_nvm_features)) 1124 return ICE_ERR_NO_MEMORY; 1125 1126 /* Initialize the data buffer to zeros */ 1127 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1128 1129 /* Fill in the features data */ 1130 data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; 1131 data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; 1132 data->drv_features.size = sizeof(struct ice_nvm_features); 1133 data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; 1134 1135 return ICE_SUCCESS; 1136 } 1137 1138 /** 1139 * ice_nvm_access_get_module - Helper function to read module value 1140 * @cmd: NVM access command structure 1141 * 1142 * Reads the module value out of the NVM access config field. 1143 */ 1144 u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) 1145 { 1146 return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); 1147 } 1148 1149 /** 1150 * ice_nvm_access_get_flags - Helper function to read flags value 1151 * @cmd: NVM access command structure 1152 * 1153 * Reads the flags value out of the NVM access config field. 1154 */ 1155 u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) 1156 { 1157 return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); 1158 } 1159 1160 /** 1161 * ice_nvm_access_get_adapter - Helper function to read adapter info 1162 * @cmd: NVM access command structure 1163 * 1164 * Read the adapter info value out of the NVM access config field. 1165 */ 1166 u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) 1167 { 1168 return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> 1169 ICE_NVM_CFG_ADAPTER_INFO_S); 1170 } 1171 1172 /** 1173 * ice_validate_nvm_rw_reg - Check than an NVM access request is valid 1174 * @cmd: NVM access command structure 1175 * 1176 * Validates that an NVM access structure is request to read or write a valid 1177 * register offset. First validates that the module and flags are correct, and 1178 * then ensures that the register offset is one of the accepted registers. 1179 */ 1180 static enum ice_status 1181 ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) 1182 { 1183 u32 module, flags, offset; 1184 u16 i; 1185 1186 module = ice_nvm_access_get_module(cmd); 1187 flags = ice_nvm_access_get_flags(cmd); 1188 offset = cmd->offset; 1189 1190 /* Make sure the module and flags indicate a read/write request */ 1191 if (module != ICE_NVM_REG_RW_MODULE || 1192 flags != ICE_NVM_REG_RW_FLAGS || 1193 cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) 1194 return ICE_ERR_PARAM; 1195 1196 switch (offset) { 1197 case GL_HICR: 1198 case GL_HICR_EN: /* Note, this register is read only */ 1199 case GL_FWSTS: 1200 case GL_MNG_FWSM: 1201 case GLGEN_CSR_DEBUG_C: 1202 case GLGEN_RSTAT: 1203 case GLPCI_LBARCTRL: 1204 case GLNVM_GENS: 1205 case GLNVM_FLA: 1206 case PF_FUNC_RID: 1207 return ICE_SUCCESS; 1208 default: 1209 break; 1210 } 1211 1212 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) 1213 if (offset == (u32)GL_HIDA(i)) 1214 return ICE_SUCCESS; 1215 1216 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) 1217 if (offset == (u32)GL_HIBA(i)) 1218 return ICE_SUCCESS; 1219 1220 /* All other register offsets are not valid */ 1221 return ICE_ERR_OUT_OF_RANGE; 1222 } 1223 1224 /** 1225 * ice_nvm_access_read - Handle an NVM read request 1226 * @hw: pointer to the HW struct 1227 * @cmd: NVM access command to process 1228 * @data: storage for the register value read 1229 * 1230 * Process an NVM access request to read a register. 1231 */ 1232 enum ice_status 1233 ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1234 union ice_nvm_access_data *data) 1235 { 1236 enum ice_status status; 1237 1238 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1239 1240 /* Always initialize the output data, even on failure */ 1241 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1242 1243 /* Make sure this is a valid read/write access request */ 1244 status = ice_validate_nvm_rw_reg(cmd); 1245 if (status) 1246 return status; 1247 1248 ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", 1249 cmd->offset); 1250 1251 /* Read the register and store the contents in the data field */ 1252 data->regval = rd32(hw, cmd->offset); 1253 1254 return ICE_SUCCESS; 1255 } 1256 1257 /** 1258 * ice_nvm_access_write - Handle an NVM write request 1259 * @hw: pointer to the HW struct 1260 * @cmd: NVM access command to process 1261 * @data: NVM access data to write 1262 * 1263 * Process an NVM access request to write a register. 1264 */ 1265 enum ice_status 1266 ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1267 union ice_nvm_access_data *data) 1268 { 1269 enum ice_status status; 1270 1271 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1272 1273 /* Make sure this is a valid read/write access request */ 1274 status = ice_validate_nvm_rw_reg(cmd); 1275 if (status) 1276 return status; 1277 1278 /* Reject requests to write to read-only registers */ 1279 switch (cmd->offset) { 1280 case GL_HICR_EN: 1281 case GLGEN_RSTAT: 1282 return ICE_ERR_OUT_OF_RANGE; 1283 default: 1284 break; 1285 } 1286 1287 ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n", 1288 cmd->offset, data->regval); 1289 1290 /* Write the data field to the specified register */ 1291 wr32(hw, cmd->offset, data->regval); 1292 1293 return ICE_SUCCESS; 1294 } 1295 1296 /** 1297 * ice_handle_nvm_access - Handle an NVM access request 1298 * @hw: pointer to the HW struct 1299 * @cmd: NVM access command info 1300 * @data: pointer to read or return data 1301 * 1302 * Process an NVM access request. Read the command structure information and 1303 * determine if it is valid. If not, report an error indicating the command 1304 * was invalid. 1305 * 1306 * For valid commands, perform the necessary function, copying the data into 1307 * the provided data buffer. 1308 */ 1309 enum ice_status 1310 ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1311 union ice_nvm_access_data *data) 1312 { 1313 u32 module, flags, adapter_info; 1314 1315 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1316 1317 /* Extended flags are currently reserved and must be zero */ 1318 if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) 1319 return ICE_ERR_PARAM; 1320 1321 /* Adapter info must match the HW device ID */ 1322 adapter_info = ice_nvm_access_get_adapter(cmd); 1323 if (adapter_info != hw->device_id) 1324 return ICE_ERR_PARAM; 1325 1326 switch (cmd->command) { 1327 case ICE_NVM_CMD_READ: 1328 module = ice_nvm_access_get_module(cmd); 1329 flags = ice_nvm_access_get_flags(cmd); 1330 1331 /* Getting the driver's NVM features structure shares the same 1332 * command type as reading a register. Read the config field 1333 * to determine if this is a request to get features. 1334 */ 1335 if (module == ICE_NVM_GET_FEATURES_MODULE && 1336 flags == ICE_NVM_GET_FEATURES_FLAGS && 1337 cmd->offset == 0) 1338 return ice_nvm_access_get_features(cmd, data); 1339 else 1340 return ice_nvm_access_read(hw, cmd, data); 1341 case ICE_NVM_CMD_WRITE: 1342 return ice_nvm_access_write(hw, cmd, data); 1343 default: 1344 return ICE_ERR_PARAM; 1345 } 1346 } 1347 1348