1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 35 /** 36 * ice_aq_read_nvm 37 * @hw: pointer to the HW struct 38 * @module_typeid: module pointer location in words from the NVM beginning 39 * @offset: byte offset from the module beginning 40 * @length: length of the section to be read (in bytes from the offset) 41 * @data: command buffer (size [bytes] = length) 42 * @last_command: tells if this is the last command in a series 43 * @read_shadow_ram: tell if this is a shadow RAM read 44 * @cd: pointer to command details structure or NULL 45 * 46 * Read the NVM using the admin queue commands (0x0701) 47 */ 48 enum ice_status 49 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, 50 void *data, bool last_command, bool read_shadow_ram, 51 struct ice_sq_cd *cd) 52 { 53 struct ice_aq_desc desc; 54 struct ice_aqc_nvm *cmd; 55 56 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 57 58 cmd = &desc.params.nvm; 59 60 if (offset > ICE_AQC_NVM_MAX_OFFSET) 61 return ICE_ERR_PARAM; 62 63 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); 64 65 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) 66 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; 67 68 /* If this is the last command in a series, set the proper flag. */ 69 if (last_command) 70 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 71 cmd->module_typeid = CPU_TO_LE16(module_typeid); 72 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 73 cmd->offset_high = (offset >> 16) & 0xFF; 74 cmd->length = CPU_TO_LE16(length); 75 76 return ice_aq_send_cmd(hw, &desc, data, length, cd); 77 } 78 79 /** 80 * ice_read_flat_nvm - Read portion of NVM by flat offset 81 * @hw: pointer to the HW struct 82 * @offset: offset from beginning of NVM 83 * @length: (in) number of bytes to read; (out) number of bytes actually read 84 * @data: buffer to return data in (sized to fit the specified length) 85 * @read_shadow_ram: if true, read from shadow RAM instead of NVM 86 * 87 * Reads a portion of the NVM, as a flat memory space. This function correctly 88 * breaks read requests across Shadow RAM sectors and ensures that no single 89 * read request exceeds the maximum 4Kb read for a single AdminQ command. 90 * 91 * Returns a status code on failure. Note that the data pointer may be 92 * partially updated if some reads succeed before a failure. 93 */ 94 enum ice_status 95 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, 96 bool read_shadow_ram) 97 { 98 enum ice_status status; 99 u32 inlen = *length; 100 u32 bytes_read = 0; 101 bool last_cmd; 102 103 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 104 105 *length = 0; 106 107 /* Verify the length of the read if this is for the Shadow RAM */ 108 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { 109 ice_debug(hw, ICE_DBG_NVM, 110 "NVM error: requested data is beyond Shadow RAM limit\n"); 111 return ICE_ERR_PARAM; 112 } 113 114 do { 115 u32 read_size, sector_offset; 116 117 /* ice_aq_read_nvm cannot read more than 4Kb at a time. 118 * Additionally, a read from the Shadow RAM may not cross over 119 * a sector boundary. Conveniently, the sector size is also 120 * 4Kb. 121 */ 122 sector_offset = offset % ICE_AQ_MAX_BUF_LEN; 123 read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, 124 inlen - bytes_read); 125 126 last_cmd = !(bytes_read + read_size < inlen); 127 128 /* ice_aq_read_nvm takes the length as a u16. Our read_size is 129 * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum 130 * size guarantees that it will fit within the 2 bytes. 131 */ 132 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 133 offset, (u16)read_size, 134 data + bytes_read, last_cmd, 135 read_shadow_ram, NULL); 136 if (status) 137 break; 138 139 bytes_read += read_size; 140 offset += read_size; 141 } while (!last_cmd); 142 143 *length = bytes_read; 144 return status; 145 } 146 147 /** 148 * ice_aq_update_nvm 149 * @hw: pointer to the HW struct 150 * @module_typeid: module pointer location in words from the NVM beginning 151 * @offset: byte offset from the module beginning 152 * @length: length of the section to be written (in bytes from the offset) 153 * @data: command buffer (size [bytes] = length) 154 * @last_command: tells if this is the last command in a series 155 * @command_flags: command parameters 156 * @cd: pointer to command details structure or NULL 157 * 158 * Update the NVM using the admin queue commands (0x0703) 159 */ 160 static enum ice_status 161 ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, 162 u16 length, void *data, bool last_command, u8 command_flags, 163 struct ice_sq_cd *cd) 164 { 165 struct ice_aq_desc desc; 166 struct ice_aqc_nvm *cmd; 167 168 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 169 170 cmd = &desc.params.nvm; 171 172 /* In offset the highest byte must be zeroed. */ 173 if (offset & 0xFF000000) 174 return ICE_ERR_PARAM; 175 176 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); 177 178 cmd->cmd_flags |= command_flags; 179 180 /* If this is the last command in a series, set the proper flag. */ 181 if (last_command) 182 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; 183 cmd->module_typeid = CPU_TO_LE16(module_typeid); 184 cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); 185 cmd->offset_high = (offset >> 16) & 0xFF; 186 cmd->length = CPU_TO_LE16(length); 187 188 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 189 190 return ice_aq_send_cmd(hw, &desc, data, length, cd); 191 } 192 193 /** 194 * ice_aq_erase_nvm 195 * @hw: pointer to the HW struct 196 * @module_typeid: module pointer location in words from the NVM beginning 197 * @cd: pointer to command details structure or NULL 198 * 199 * Erase the NVM sector using the admin queue commands (0x0702) 200 */ 201 enum ice_status 202 ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) 203 { 204 struct ice_aq_desc desc; 205 struct ice_aqc_nvm *cmd; 206 207 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 208 209 cmd = &desc.params.nvm; 210 211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); 212 213 cmd->module_typeid = CPU_TO_LE16(module_typeid); 214 cmd->length = CPU_TO_LE16(ICE_AQC_NVM_ERASE_LEN); 215 cmd->offset_low = 0; 216 cmd->offset_high = 0; 217 218 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 219 } 220 221 /** 222 * ice_aq_read_nvm_cfg - read an NVM config block 223 * @hw: pointer to the HW struct 224 * @cmd_flags: NVM access admin command bits 225 * @field_id: field or feature ID 226 * @data: buffer for result 227 * @buf_size: buffer size 228 * @elem_count: pointer to count of elements read by FW 229 * @cd: pointer to command details structure or NULL 230 * 231 * Reads single or multiple feature/field ID and data (0x0704) 232 */ 233 enum ice_status 234 ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data, 235 u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd) 236 { 237 struct ice_aqc_nvm_cfg *cmd; 238 struct ice_aq_desc desc; 239 enum ice_status status; 240 241 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 242 243 cmd = &desc.params.nvm_cfg; 244 245 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read); 246 247 cmd->cmd_flags = cmd_flags; 248 cmd->id = CPU_TO_LE16(field_id); 249 250 status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 251 if (!status && elem_count) 252 *elem_count = LE16_TO_CPU(cmd->count); 253 254 return status; 255 } 256 257 /** 258 * ice_aq_write_nvm_cfg - write an NVM config block 259 * @hw: pointer to the HW struct 260 * @cmd_flags: NVM access admin command bits 261 * @data: buffer for result 262 * @buf_size: buffer size 263 * @elem_count: count of elements to be written 264 * @cd: pointer to command details structure or NULL 265 * 266 * Writes single or multiple feature/field ID and data (0x0705) 267 */ 268 enum ice_status 269 ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size, 270 u16 elem_count, struct ice_sq_cd *cd) 271 { 272 struct ice_aqc_nvm_cfg *cmd; 273 struct ice_aq_desc desc; 274 275 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 276 277 cmd = &desc.params.nvm_cfg; 278 279 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write); 280 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 281 282 cmd->count = CPU_TO_LE16(elem_count); 283 cmd->cmd_flags = cmd_flags; 284 285 return ice_aq_send_cmd(hw, &desc, data, buf_size, cd); 286 } 287 288 /** 289 * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. 290 * @hw: pointer to the HW structure 291 * @offset: offset in words from module start 292 * @words: number of words to access 293 */ 294 static enum ice_status 295 ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) 296 { 297 if ((offset + words) > hw->nvm.sr_words) { 298 ice_debug(hw, ICE_DBG_NVM, 299 "NVM error: offset beyond SR lmt.\n"); 300 return ICE_ERR_PARAM; 301 } 302 303 if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { 304 /* We can access only up to 4KB (one sector), in one AQ write */ 305 ice_debug(hw, ICE_DBG_NVM, 306 "NVM error: tried to access %d words, limit is %d.\n", 307 words, ICE_SR_SECTOR_SIZE_IN_WORDS); 308 return ICE_ERR_PARAM; 309 } 310 311 if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != 312 (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { 313 /* A single access cannot spread over two sectors */ 314 ice_debug(hw, ICE_DBG_NVM, 315 "NVM error: cannot spread over two sectors.\n"); 316 return ICE_ERR_PARAM; 317 } 318 319 return ICE_SUCCESS; 320 } 321 322 /** 323 * ice_read_sr_word_aq - Reads Shadow RAM via AQ 324 * @hw: pointer to the HW structure 325 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 326 * @data: word read from the Shadow RAM 327 * 328 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. 329 */ 330 enum ice_status 331 ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) 332 { 333 u32 bytes = sizeof(u16); 334 enum ice_status status; 335 __le16 data_local; 336 337 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 338 339 /* Note that ice_read_flat_nvm checks if the read is past the Shadow 340 * RAM size, and ensures we don't read across a Shadow RAM sector 341 * boundary 342 */ 343 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, 344 (u8 *)&data_local, true); 345 if (status) 346 return status; 347 348 *data = LE16_TO_CPU(data_local); 349 return ICE_SUCCESS; 350 } 351 352 /** 353 * ice_write_sr_aq - Writes Shadow RAM. 354 * @hw: pointer to the HW structure 355 * @offset: offset in words from module start 356 * @words: number of words to write 357 * @data: buffer with words to write to the Shadow RAM 358 * @last_command: tells the AdminQ that this is the last command 359 * 360 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 361 */ 362 static enum ice_status 363 ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data, 364 bool last_command) 365 { 366 enum ice_status status; 367 368 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 369 370 status = ice_check_sr_access_params(hw, offset, words); 371 if (!status) 372 status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data, 373 last_command, 0, NULL); 374 375 return status; 376 } 377 378 /** 379 * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ 380 * @hw: pointer to the HW structure 381 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 382 * @words: (in) number of words to read; (out) number of words actually read 383 * @data: words read from the Shadow RAM 384 * 385 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is 386 * taken before reading the buffer and later released. 387 */ 388 static enum ice_status 389 ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 390 { 391 u32 bytes = *words * 2, i; 392 enum ice_status status; 393 394 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 395 396 /* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM 397 * sector restrictions necessary when reading from the NVM. 398 */ 399 status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); 400 401 /* Report the number of words successfully read */ 402 *words = bytes / 2; 403 404 /* Byte swap the words up to the amount we actually read */ 405 for (i = 0; i < *words; i++) 406 data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); 407 408 return status; 409 } 410 411 /** 412 * ice_acquire_nvm - Generic request for acquiring the NVM ownership 413 * @hw: pointer to the HW structure 414 * @access: NVM access type (read or write) 415 * 416 * This function will request NVM ownership. 417 */ 418 enum ice_status 419 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) 420 { 421 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 422 423 if (hw->nvm.blank_nvm_mode) 424 return ICE_SUCCESS; 425 426 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); 427 } 428 429 /** 430 * ice_release_nvm - Generic request for releasing the NVM ownership 431 * @hw: pointer to the HW structure 432 * 433 * This function will release NVM ownership. 434 */ 435 void ice_release_nvm(struct ice_hw *hw) 436 { 437 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 438 439 if (hw->nvm.blank_nvm_mode) 440 return; 441 442 ice_release_res(hw, ICE_NVM_RES_ID); 443 } 444 445 /** 446 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary 447 * @hw: pointer to the HW structure 448 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 449 * @data: word read from the Shadow RAM 450 * 451 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. 452 */ 453 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) 454 { 455 enum ice_status status; 456 457 status = ice_acquire_nvm(hw, ICE_RES_READ); 458 if (!status) { 459 status = ice_read_sr_word_aq(hw, offset, data); 460 ice_release_nvm(hw); 461 } 462 463 return status; 464 } 465 466 /** 467 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 468 * @hw: pointer to hardware structure 469 * @module_tlv: pointer to module TLV to return 470 * @module_tlv_len: pointer to module TLV length to return 471 * @module_type: module type requested 472 * 473 * Finds the requested sub module TLV type from the Preserved Field 474 * Area (PFA) and returns the TLV pointer and length. The caller can 475 * use these to read the variable length TLV value. 476 */ 477 enum ice_status 478 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 479 u16 module_type) 480 { 481 enum ice_status status; 482 u16 pfa_len, pfa_ptr; 483 u16 next_tlv; 484 485 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 486 if (status != ICE_SUCCESS) { 487 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 488 return status; 489 } 490 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 491 if (status != ICE_SUCCESS) { 492 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 493 return status; 494 } 495 /* Starting with first TLV after PFA length, iterate through the list 496 * of TLVs to find the requested one. 497 */ 498 next_tlv = pfa_ptr + 1; 499 while (next_tlv < pfa_ptr + pfa_len) { 500 u16 tlv_sub_module_type; 501 u16 tlv_len; 502 503 /* Read TLV type */ 504 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 505 if (status != ICE_SUCCESS) { 506 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 507 break; 508 } 509 /* Read TLV length */ 510 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 511 if (status != ICE_SUCCESS) { 512 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 513 break; 514 } 515 if (tlv_sub_module_type == module_type) { 516 if (tlv_len) { 517 *module_tlv = next_tlv; 518 *module_tlv_len = tlv_len; 519 return ICE_SUCCESS; 520 } 521 return ICE_ERR_INVAL_SIZE; 522 } 523 /* Check next TLV, i.e. current TLV pointer + length + 2 words 524 * (for current TLV's type and length) 525 */ 526 next_tlv = next_tlv + tlv_len + 2; 527 } 528 /* Module does not exist */ 529 return ICE_ERR_DOES_NOT_EXIST; 530 } 531 532 /** 533 * ice_read_pba_string - Reads part number string from NVM 534 * @hw: pointer to hardware structure 535 * @pba_num: stores the part number string from the NVM 536 * @pba_num_size: part number string buffer length 537 * 538 * Reads the part number string from the NVM. 539 */ 540 enum ice_status 541 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) 542 { 543 u16 pba_tlv, pba_tlv_len; 544 enum ice_status status; 545 u16 pba_word, pba_size; 546 u16 i; 547 548 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, 549 ICE_SR_PBA_BLOCK_PTR); 550 if (status != ICE_SUCCESS) { 551 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); 552 return status; 553 } 554 555 /* pba_size is the next word */ 556 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); 557 if (status != ICE_SUCCESS) { 558 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); 559 return status; 560 } 561 562 if (pba_tlv_len < pba_size) { 563 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); 564 return ICE_ERR_INVAL_SIZE; 565 } 566 567 /* Subtract one to get PBA word count (PBA Size word is included in 568 * total size) 569 */ 570 pba_size--; 571 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 572 ice_debug(hw, ICE_DBG_INIT, 573 "Buffer too small for PBA data.\n"); 574 return ICE_ERR_PARAM; 575 } 576 577 for (i = 0; i < pba_size; i++) { 578 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); 579 if (status != ICE_SUCCESS) { 580 ice_debug(hw, ICE_DBG_INIT, 581 "Failed to read PBA Block word %d.\n", i); 582 return status; 583 } 584 585 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 586 pba_num[(i * 2) + 1] = pba_word & 0xFF; 587 } 588 pba_num[(pba_size * 2)] = '\0'; 589 590 return status; 591 } 592 593 /** 594 * ice_get_orom_ver_info - Read Option ROM version information 595 * @hw: pointer to the HW struct 596 * 597 * Read the Combo Image version data from the Boot Configuration TLV and fill 598 * in the option ROM version data. 599 */ 600 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) 601 { 602 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; 603 struct ice_orom_info *orom = &hw->nvm.orom; 604 enum ice_status status; 605 u32 combo_ver; 606 607 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, 608 ICE_SR_BOOT_CFG_PTR); 609 if (status) { 610 ice_debug(hw, ICE_DBG_INIT, 611 "Failed to read Boot Configuration Block TLV.\n"); 612 return status; 613 } 614 615 /* Boot Configuration Block must have length at least 2 words 616 * (Combo Image Version High and Combo Image Version Low) 617 */ 618 if (boot_cfg_tlv_len < 2) { 619 ice_debug(hw, ICE_DBG_INIT, 620 "Invalid Boot Configuration Block TLV size.\n"); 621 return ICE_ERR_INVAL_SIZE; 622 } 623 624 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), 625 &combo_hi); 626 if (status) { 627 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); 628 return status; 629 } 630 631 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), 632 &combo_lo); 633 if (status) { 634 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); 635 return status; 636 } 637 638 combo_ver = ((u32)combo_hi << 16) | combo_lo; 639 640 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> 641 ICE_OROM_VER_SHIFT); 642 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); 643 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> 644 ICE_OROM_VER_BUILD_SHIFT); 645 646 return ICE_SUCCESS; 647 } 648 649 /** 650 * ice_discover_flash_size - Discover the available flash size. 651 * @hw: pointer to the HW struct 652 * 653 * The device flash could be up to 16MB in size. However, it is possible that 654 * the actual size is smaller. Use bisection to determine the accessible size 655 * of flash memory. 656 */ 657 static enum ice_status ice_discover_flash_size(struct ice_hw *hw) 658 { 659 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; 660 enum ice_status status; 661 662 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 663 664 status = ice_acquire_nvm(hw, ICE_RES_READ); 665 if (status) 666 return status; 667 668 while ((max_size - min_size) > 1) { 669 u32 offset = (max_size + min_size) / 2; 670 u32 len = 1; 671 u8 data; 672 673 status = ice_read_flat_nvm(hw, offset, &len, &data, false); 674 if (status == ICE_ERR_AQ_ERROR && 675 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { 676 ice_debug(hw, ICE_DBG_NVM, 677 "%s: New upper bound of %u bytes\n", 678 __func__, offset); 679 status = ICE_SUCCESS; 680 max_size = offset; 681 } else if (!status) { 682 ice_debug(hw, ICE_DBG_NVM, 683 "%s: New lower bound of %u bytes\n", 684 __func__, offset); 685 min_size = offset; 686 } else { 687 /* an unexpected error occurred */ 688 goto err_read_flat_nvm; 689 } 690 } 691 692 ice_debug(hw, ICE_DBG_NVM, 693 "Predicted flash size is %u bytes\n", max_size); 694 695 hw->nvm.flash_size = max_size; 696 697 err_read_flat_nvm: 698 ice_release_nvm(hw); 699 700 return status; 701 } 702 703 /** 704 * ice_init_nvm - initializes NVM setting 705 * @hw: pointer to the HW struct 706 * 707 * This function reads and populates NVM settings such as Shadow RAM size, 708 * max_timeout, and blank_nvm_mode 709 */ 710 enum ice_status ice_init_nvm(struct ice_hw *hw) 711 { 712 struct ice_nvm_info *nvm = &hw->nvm; 713 u16 eetrack_lo, eetrack_hi, ver; 714 enum ice_status status; 715 u32 fla, gens_stat; 716 u8 sr_size; 717 718 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 719 720 /* The SR size is stored regardless of the NVM programming mode 721 * as the blank mode may be used in the factory line. 722 */ 723 gens_stat = rd32(hw, GLNVM_GENS); 724 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; 725 726 /* Switching to words (sr_size contains power of 2) */ 727 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; 728 729 /* Check if we are in the normal or blank NVM programming mode */ 730 fla = rd32(hw, GLNVM_FLA); 731 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ 732 nvm->blank_nvm_mode = false; 733 } else { 734 /* Blank programming mode */ 735 nvm->blank_nvm_mode = true; 736 ice_debug(hw, ICE_DBG_NVM, 737 "NVM init error: unsupported blank mode.\n"); 738 return ICE_ERR_NVM_BLANK_MODE; 739 } 740 741 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); 742 if (status) { 743 ice_debug(hw, ICE_DBG_INIT, 744 "Failed to read DEV starter version.\n"); 745 return status; 746 } 747 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 748 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 749 750 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); 751 if (status) { 752 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); 753 return status; 754 } 755 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); 756 if (status) { 757 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); 758 return status; 759 } 760 761 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; 762 763 status = ice_discover_flash_size(hw); 764 if (status) { 765 ice_debug(hw, ICE_DBG_NVM, 766 "NVM init error: failed to discover flash size.\n"); 767 return status; 768 } 769 770 switch (hw->device_id) { 771 /* the following devices do not have boot_cfg_tlv yet */ 772 case ICE_DEV_ID_E822C_BACKPLANE: 773 case ICE_DEV_ID_E822C_QSFP: 774 case ICE_DEV_ID_E822C_10G_BASE_T: 775 case ICE_DEV_ID_E822C_SGMII: 776 case ICE_DEV_ID_E822C_SFP: 777 case ICE_DEV_ID_E822L_BACKPLANE: 778 case ICE_DEV_ID_E822L_SFP: 779 case ICE_DEV_ID_E822L_10G_BASE_T: 780 case ICE_DEV_ID_E822L_SGMII: 781 case ICE_DEV_ID_E823L_BACKPLANE: 782 case ICE_DEV_ID_E823L_SFP: 783 case ICE_DEV_ID_E823L_10G_BASE_T: 784 case ICE_DEV_ID_E823L_1GBE: 785 case ICE_DEV_ID_E823L_QSFP: 786 return status; 787 default: 788 break; 789 } 790 791 status = ice_get_orom_ver_info(hw); 792 if (status) { 793 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); 794 return status; 795 } 796 797 /* read the netlist version information */ 798 status = ice_get_netlist_ver_info(hw); 799 if (status) 800 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); 801 return ICE_SUCCESS; 802 } 803 804 /** 805 * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary 806 * @hw: pointer to the HW structure 807 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 808 * @words: (in) number of words to read; (out) number of words actually read 809 * @data: words read from the Shadow RAM 810 * 811 * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq 812 * method. The buf read is preceded by the NVM ownership take 813 * and followed by the release. 814 */ 815 enum ice_status 816 ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) 817 { 818 enum ice_status status; 819 820 status = ice_acquire_nvm(hw, ICE_RES_READ); 821 if (!status) { 822 status = ice_read_sr_buf_aq(hw, offset, words, data); 823 ice_release_nvm(hw); 824 } 825 826 return status; 827 } 828 829 /** 830 * __ice_write_sr_word - Writes Shadow RAM word 831 * @hw: pointer to the HW structure 832 * @offset: offset of the Shadow RAM word to write 833 * @data: word to write to the Shadow RAM 834 * 835 * Writes a 16 bit word to the SR using the ice_write_sr_aq method. 836 * NVM ownership have to be acquired and released (on ARQ completion event 837 * reception) by caller. To commit SR to NVM update checksum function 838 * should be called. 839 */ 840 enum ice_status 841 __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data) 842 { 843 __le16 data_local = CPU_TO_LE16(*data); 844 845 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 846 847 /* Value 0x00 below means that we treat SR as a flat mem */ 848 return ice_write_sr_aq(hw, offset, 1, &data_local, false); 849 } 850 851 /** 852 * __ice_write_sr_buf - Writes Shadow RAM buf 853 * @hw: pointer to the HW structure 854 * @offset: offset of the Shadow RAM buffer to write 855 * @words: number of words to write 856 * @data: words to write to the Shadow RAM 857 * 858 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 859 * NVM ownership must be acquired before calling this function and released 860 * on ARQ completion event reception by caller. To commit SR to NVM update 861 * checksum function should be called. 862 */ 863 enum ice_status 864 __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data) 865 { 866 enum ice_status status; 867 __le16 *data_local; 868 void *vmem; 869 u32 i; 870 871 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 872 873 vmem = ice_calloc(hw, words, sizeof(u16)); 874 if (!vmem) 875 return ICE_ERR_NO_MEMORY; 876 data_local = (_FORCE_ __le16 *)vmem; 877 878 for (i = 0; i < words; i++) 879 data_local[i] = CPU_TO_LE16(data[i]); 880 881 /* Here we will only write one buffer as the size of the modules 882 * mirrored in the Shadow RAM is always less than 4K. 883 */ 884 status = ice_write_sr_aq(hw, offset, words, data_local, false); 885 886 ice_free(hw, vmem); 887 888 return status; 889 } 890 891 /** 892 * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum 893 * @hw: pointer to hardware structure 894 * @checksum: pointer to the checksum 895 * 896 * This function calculates SW Checksum that covers the whole 64kB shadow RAM 897 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 898 * is customer specific and unknown. Therefore, this function skips all maximum 899 * possible size of VPD (1kB). 900 */ 901 static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum) 902 { 903 enum ice_status status = ICE_SUCCESS; 904 u16 pcie_alt_module = 0; 905 u16 checksum_local = 0; 906 u16 vpd_module; 907 void *vmem; 908 u16 *data; 909 u16 i; 910 911 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 912 913 vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16)); 914 if (!vmem) 915 return ICE_ERR_NO_MEMORY; 916 data = (u16 *)vmem; 917 918 /* read pointer to VPD area */ 919 status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module); 920 if (status) 921 goto ice_calc_sr_checksum_exit; 922 923 /* read pointer to PCIe Alt Auto-load module */ 924 status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR, 925 &pcie_alt_module); 926 if (status) 927 goto ice_calc_sr_checksum_exit; 928 929 /* Calculate SW checksum that covers the whole 64kB shadow RAM 930 * except the VPD and PCIe ALT Auto-load modules 931 */ 932 for (i = 0; i < hw->nvm.sr_words; i++) { 933 /* Read SR page */ 934 if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) { 935 u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS; 936 937 status = ice_read_sr_buf_aq(hw, i, &words, data); 938 if (status != ICE_SUCCESS) 939 goto ice_calc_sr_checksum_exit; 940 } 941 942 /* Skip Checksum word */ 943 if (i == ICE_SR_SW_CHECKSUM_WORD) 944 continue; 945 /* Skip VPD module (convert byte size to word count) */ 946 if ((i >= (u32)vpd_module) && 947 (i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS))) 948 continue; 949 /* Skip PCIe ALT module (convert byte size to word count) */ 950 if ((i >= (u32)pcie_alt_module) && 951 (i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS))) 952 continue; 953 954 checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS]; 955 } 956 957 *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local; 958 959 ice_calc_sr_checksum_exit: 960 ice_free(hw, vmem); 961 return status; 962 } 963 964 /** 965 * ice_update_sr_checksum - Updates the Shadow RAM SW checksum 966 * @hw: pointer to hardware structure 967 * 968 * NVM ownership must be acquired before calling this function and released 969 * on ARQ completion event reception by caller. 970 * This function will commit SR to NVM. 971 */ 972 enum ice_status ice_update_sr_checksum(struct ice_hw *hw) 973 { 974 enum ice_status status; 975 __le16 le_sum; 976 u16 checksum; 977 978 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 979 980 status = ice_calc_sr_checksum(hw, &checksum); 981 if (!status) { 982 le_sum = CPU_TO_LE16(checksum); 983 status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1, 984 &le_sum, true); 985 } 986 return status; 987 } 988 989 /** 990 * ice_validate_sr_checksum - Validate Shadow RAM SW checksum 991 * @hw: pointer to hardware structure 992 * @checksum: calculated checksum 993 * 994 * Performs checksum calculation and validates the Shadow RAM SW checksum. 995 * If the caller does not need checksum, the value can be NULL. 996 */ 997 enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum) 998 { 999 enum ice_status status; 1000 u16 checksum_local; 1001 u16 checksum_sr; 1002 1003 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1004 1005 status = ice_acquire_nvm(hw, ICE_RES_READ); 1006 if (!status) { 1007 status = ice_calc_sr_checksum(hw, &checksum_local); 1008 ice_release_nvm(hw); 1009 if (status) 1010 return status; 1011 } else { 1012 return status; 1013 } 1014 1015 ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr); 1016 1017 /* Verify read checksum from EEPROM is the same as 1018 * calculated checksum 1019 */ 1020 if (checksum_local != checksum_sr) 1021 status = ICE_ERR_NVM_CHECKSUM; 1022 1023 /* If the user cares, return the calculated checksum */ 1024 if (checksum) 1025 *checksum = checksum_local; 1026 1027 return status; 1028 } 1029 1030 /** 1031 * ice_nvm_validate_checksum 1032 * @hw: pointer to the HW struct 1033 * 1034 * Verify NVM PFA checksum validity (0x0706) 1035 */ 1036 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) 1037 { 1038 struct ice_aqc_nvm_checksum *cmd; 1039 struct ice_aq_desc desc; 1040 enum ice_status status; 1041 1042 status = ice_acquire_nvm(hw, ICE_RES_READ); 1043 if (status) 1044 return status; 1045 1046 cmd = &desc.params.nvm_checksum; 1047 1048 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); 1049 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; 1050 1051 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1052 ice_release_nvm(hw); 1053 1054 if (!status) 1055 if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) 1056 status = ICE_ERR_NVM_CHECKSUM; 1057 1058 return status; 1059 } 1060 1061 /** 1062 * ice_nvm_access_get_features - Return the NVM access features structure 1063 * @cmd: NVM access command to process 1064 * @data: storage for the driver NVM features 1065 * 1066 * Fill in the data section of the NVM access request with a copy of the NVM 1067 * features structure. 1068 */ 1069 enum ice_status 1070 ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, 1071 union ice_nvm_access_data *data) 1072 { 1073 /* The provided data_size must be at least as large as our NVM 1074 * features structure. A larger size should not be treated as an 1075 * error, to allow future extensions to to the features structure to 1076 * work on older drivers. 1077 */ 1078 if (cmd->data_size < sizeof(struct ice_nvm_features)) 1079 return ICE_ERR_NO_MEMORY; 1080 1081 /* Initialize the data buffer to zeros */ 1082 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1083 1084 /* Fill in the features data */ 1085 data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; 1086 data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; 1087 data->drv_features.size = sizeof(struct ice_nvm_features); 1088 data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; 1089 1090 return ICE_SUCCESS; 1091 } 1092 1093 /** 1094 * ice_nvm_access_get_module - Helper function to read module value 1095 * @cmd: NVM access command structure 1096 * 1097 * Reads the module value out of the NVM access config field. 1098 */ 1099 u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) 1100 { 1101 return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); 1102 } 1103 1104 /** 1105 * ice_nvm_access_get_flags - Helper function to read flags value 1106 * @cmd: NVM access command structure 1107 * 1108 * Reads the flags value out of the NVM access config field. 1109 */ 1110 u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) 1111 { 1112 return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); 1113 } 1114 1115 /** 1116 * ice_nvm_access_get_adapter - Helper function to read adapter info 1117 * @cmd: NVM access command structure 1118 * 1119 * Read the adapter info value out of the NVM access config field. 1120 */ 1121 u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) 1122 { 1123 return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> 1124 ICE_NVM_CFG_ADAPTER_INFO_S); 1125 } 1126 1127 /** 1128 * ice_validate_nvm_rw_reg - Check than an NVM access request is valid 1129 * @cmd: NVM access command structure 1130 * 1131 * Validates that an NVM access structure is request to read or write a valid 1132 * register offset. First validates that the module and flags are correct, and 1133 * then ensures that the register offset is one of the accepted registers. 1134 */ 1135 static enum ice_status 1136 ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) 1137 { 1138 u32 module, flags, offset; 1139 u16 i; 1140 1141 module = ice_nvm_access_get_module(cmd); 1142 flags = ice_nvm_access_get_flags(cmd); 1143 offset = cmd->offset; 1144 1145 /* Make sure the module and flags indicate a read/write request */ 1146 if (module != ICE_NVM_REG_RW_MODULE || 1147 flags != ICE_NVM_REG_RW_FLAGS || 1148 cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) 1149 return ICE_ERR_PARAM; 1150 1151 switch (offset) { 1152 case GL_HICR: 1153 case GL_HICR_EN: /* Note, this register is read only */ 1154 case GL_FWSTS: 1155 case GL_MNG_FWSM: 1156 case GLGEN_CSR_DEBUG_C: 1157 case GLGEN_RSTAT: 1158 case GLPCI_LBARCTRL: 1159 case GLNVM_GENS: 1160 case GLNVM_FLA: 1161 case PF_FUNC_RID: 1162 return ICE_SUCCESS; 1163 default: 1164 break; 1165 } 1166 1167 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) 1168 if (offset == (u32)GL_HIDA(i)) 1169 return ICE_SUCCESS; 1170 1171 for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) 1172 if (offset == (u32)GL_HIBA(i)) 1173 return ICE_SUCCESS; 1174 1175 /* All other register offsets are not valid */ 1176 return ICE_ERR_OUT_OF_RANGE; 1177 } 1178 1179 /** 1180 * ice_nvm_access_read - Handle an NVM read request 1181 * @hw: pointer to the HW struct 1182 * @cmd: NVM access command to process 1183 * @data: storage for the register value read 1184 * 1185 * Process an NVM access request to read a register. 1186 */ 1187 enum ice_status 1188 ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1189 union ice_nvm_access_data *data) 1190 { 1191 enum ice_status status; 1192 1193 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1194 1195 /* Always initialize the output data, even on failure */ 1196 ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); 1197 1198 /* Make sure this is a valid read/write access request */ 1199 status = ice_validate_nvm_rw_reg(cmd); 1200 if (status) 1201 return status; 1202 1203 ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", 1204 cmd->offset); 1205 1206 /* Read the register and store the contents in the data field */ 1207 data->regval = rd32(hw, cmd->offset); 1208 1209 return ICE_SUCCESS; 1210 } 1211 1212 /** 1213 * ice_nvm_access_write - Handle an NVM write request 1214 * @hw: pointer to the HW struct 1215 * @cmd: NVM access command to process 1216 * @data: NVM access data to write 1217 * 1218 * Process an NVM access request to write a register. 1219 */ 1220 enum ice_status 1221 ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1222 union ice_nvm_access_data *data) 1223 { 1224 enum ice_status status; 1225 1226 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1227 1228 /* Make sure this is a valid read/write access request */ 1229 status = ice_validate_nvm_rw_reg(cmd); 1230 if (status) 1231 return status; 1232 1233 /* Reject requests to write to read-only registers */ 1234 switch (cmd->offset) { 1235 case GL_HICR_EN: 1236 case GLGEN_RSTAT: 1237 return ICE_ERR_OUT_OF_RANGE; 1238 default: 1239 break; 1240 } 1241 1242 ice_debug(hw, ICE_DBG_NVM, 1243 "NVM access: writing register %08x with value %08x\n", 1244 cmd->offset, data->regval); 1245 1246 /* Write the data field to the specified register */ 1247 wr32(hw, cmd->offset, data->regval); 1248 1249 return ICE_SUCCESS; 1250 } 1251 1252 /** 1253 * ice_handle_nvm_access - Handle an NVM access request 1254 * @hw: pointer to the HW struct 1255 * @cmd: NVM access command info 1256 * @data: pointer to read or return data 1257 * 1258 * Process an NVM access request. Read the command structure information and 1259 * determine if it is valid. If not, report an error indicating the command 1260 * was invalid. 1261 * 1262 * For valid commands, perform the necessary function, copying the data into 1263 * the provided data buffer. 1264 */ 1265 enum ice_status 1266 ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, 1267 union ice_nvm_access_data *data) 1268 { 1269 u32 module, flags, adapter_info; 1270 1271 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1272 1273 /* Extended flags are currently reserved and must be zero */ 1274 if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) 1275 return ICE_ERR_PARAM; 1276 1277 /* Adapter info must match the HW device ID */ 1278 adapter_info = ice_nvm_access_get_adapter(cmd); 1279 if (adapter_info != hw->device_id) 1280 return ICE_ERR_PARAM; 1281 1282 switch (cmd->command) { 1283 case ICE_NVM_CMD_READ: 1284 module = ice_nvm_access_get_module(cmd); 1285 flags = ice_nvm_access_get_flags(cmd); 1286 1287 /* Getting the driver's NVM features structure shares the same 1288 * command type as reading a register. Read the config field 1289 * to determine if this is a request to get features. 1290 */ 1291 if (module == ICE_NVM_GET_FEATURES_MODULE && 1292 flags == ICE_NVM_GET_FEATURES_FLAGS && 1293 cmd->offset == 0) 1294 return ice_nvm_access_get_features(cmd, data); 1295 else 1296 return ice_nvm_access_read(hw, cmd, data); 1297 case ICE_NVM_CMD_WRITE: 1298 return ice_nvm_access_write(hw, cmd, data); 1299 default: 1300 return ICE_ERR_PARAM; 1301 } 1302 } 1303 1304