xref: /linux/drivers/net/ethernet/intel/ice/ice_nvm.c (revision 4fd18fc38757217c746aa063ba9e4729814dc737)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 
6 /**
7  * ice_aq_read_nvm
8  * @hw: pointer to the HW struct
9  * @module_typeid: module pointer location in words from the NVM beginning
10  * @offset: byte offset from the module beginning
11  * @length: length of the section to be read (in bytes from the offset)
12  * @data: command buffer (size [bytes] = length)
13  * @last_command: tells if this is the last command in a series
14  * @read_shadow_ram: tell if this is a shadow RAM read
15  * @cd: pointer to command details structure or NULL
16  *
17  * Read the NVM using the admin queue commands (0x0701)
18  */
19 static enum ice_status
20 ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
21 		void *data, bool last_command, bool read_shadow_ram,
22 		struct ice_sq_cd *cd)
23 {
24 	struct ice_aq_desc desc;
25 	struct ice_aqc_nvm *cmd;
26 
27 	cmd = &desc.params.nvm;
28 
29 	if (offset > ICE_AQC_NVM_MAX_OFFSET)
30 		return ICE_ERR_PARAM;
31 
32 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
33 
34 	if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
35 		cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
36 
37 	/* If this is the last command in a series, set the proper flag. */
38 	if (last_command)
39 		cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
40 	cmd->module_typeid = cpu_to_le16(module_typeid);
41 	cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
42 	cmd->offset_high = (offset >> 16) & 0xFF;
43 	cmd->length = cpu_to_le16(length);
44 
45 	return ice_aq_send_cmd(hw, &desc, data, length, cd);
46 }
47 
48 /**
49  * ice_read_flat_nvm - Read portion of NVM by flat offset
50  * @hw: pointer to the HW struct
51  * @offset: offset from beginning of NVM
52  * @length: (in) number of bytes to read; (out) number of bytes actually read
53  * @data: buffer to return data in (sized to fit the specified length)
54  * @read_shadow_ram: if true, read from shadow RAM instead of NVM
55  *
56  * Reads a portion of the NVM, as a flat memory space. This function correctly
57  * breaks read requests across Shadow RAM sectors and ensures that no single
58  * read request exceeds the maximum 4KB read for a single AdminQ command.
59  *
60  * Returns a status code on failure. Note that the data pointer may be
61  * partially updated if some reads succeed before a failure.
62  */
63 enum ice_status
64 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
65 		  bool read_shadow_ram)
66 {
67 	enum ice_status status;
68 	u32 inlen = *length;
69 	u32 bytes_read = 0;
70 	bool last_cmd;
71 
72 	*length = 0;
73 
74 	/* Verify the length of the read if this is for the Shadow RAM */
75 	if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
76 		ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
77 		return ICE_ERR_PARAM;
78 	}
79 
80 	do {
81 		u32 read_size, sector_offset;
82 
83 		/* ice_aq_read_nvm cannot read more than 4KB at a time.
84 		 * Additionally, a read from the Shadow RAM may not cross over
85 		 * a sector boundary. Conveniently, the sector size is also
86 		 * 4KB.
87 		 */
88 		sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
89 		read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
90 				  inlen - bytes_read);
91 
92 		last_cmd = !(bytes_read + read_size < inlen);
93 
94 		status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
95 					 offset, read_size,
96 					 data + bytes_read, last_cmd,
97 					 read_shadow_ram, NULL);
98 		if (status)
99 			break;
100 
101 		bytes_read += read_size;
102 		offset += read_size;
103 	} while (!last_cmd);
104 
105 	*length = bytes_read;
106 	return status;
107 }
108 
109 /**
110  * ice_aq_update_nvm
111  * @hw: pointer to the HW struct
112  * @module_typeid: module pointer location in words from the NVM beginning
113  * @offset: byte offset from the module beginning
114  * @length: length of the section to be written (in bytes from the offset)
115  * @data: command buffer (size [bytes] = length)
116  * @last_command: tells if this is the last command in a series
117  * @command_flags: command parameters
118  * @cd: pointer to command details structure or NULL
119  *
120  * Update the NVM using the admin queue commands (0x0703)
121  */
122 enum ice_status
123 ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
124 		  u16 length, void *data, bool last_command, u8 command_flags,
125 		  struct ice_sq_cd *cd)
126 {
127 	struct ice_aq_desc desc;
128 	struct ice_aqc_nvm *cmd;
129 
130 	cmd = &desc.params.nvm;
131 
132 	/* In offset the highest byte must be zeroed. */
133 	if (offset & 0xFF000000)
134 		return ICE_ERR_PARAM;
135 
136 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
137 
138 	cmd->cmd_flags |= command_flags;
139 
140 	/* If this is the last command in a series, set the proper flag. */
141 	if (last_command)
142 		cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
143 	cmd->module_typeid = cpu_to_le16(module_typeid);
144 	cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
145 	cmd->offset_high = (offset >> 16) & 0xFF;
146 	cmd->length = cpu_to_le16(length);
147 
148 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
149 
150 	return ice_aq_send_cmd(hw, &desc, data, length, cd);
151 }
152 
153 /**
154  * ice_aq_erase_nvm
155  * @hw: pointer to the HW struct
156  * @module_typeid: module pointer location in words from the NVM beginning
157  * @cd: pointer to command details structure or NULL
158  *
159  * Erase the NVM sector using the admin queue commands (0x0702)
160  */
161 enum ice_status
162 ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
163 {
164 	struct ice_aq_desc desc;
165 	struct ice_aqc_nvm *cmd;
166 
167 	cmd = &desc.params.nvm;
168 
169 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
170 
171 	cmd->module_typeid = cpu_to_le16(module_typeid);
172 	cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
173 	cmd->offset_low = 0;
174 	cmd->offset_high = 0;
175 
176 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
177 }
178 
179 /**
180  * ice_read_sr_word_aq - Reads Shadow RAM via AQ
181  * @hw: pointer to the HW structure
182  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
183  * @data: word read from the Shadow RAM
184  *
185  * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
186  */
187 static enum ice_status
188 ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
189 {
190 	u32 bytes = sizeof(u16);
191 	enum ice_status status;
192 	__le16 data_local;
193 
194 	/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
195 	 * Shadow RAM sector restrictions necessary when reading from the NVM.
196 	 */
197 	status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
198 				   (__force u8 *)&data_local, true);
199 	if (status)
200 		return status;
201 
202 	*data = le16_to_cpu(data_local);
203 	return 0;
204 }
205 
206 /**
207  * ice_acquire_nvm - Generic request for acquiring the NVM ownership
208  * @hw: pointer to the HW structure
209  * @access: NVM access type (read or write)
210  *
211  * This function will request NVM ownership.
212  */
213 enum ice_status
214 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
215 {
216 	if (hw->nvm.blank_nvm_mode)
217 		return 0;
218 
219 	return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
220 }
221 
222 /**
223  * ice_release_nvm - Generic request for releasing the NVM ownership
224  * @hw: pointer to the HW structure
225  *
226  * This function will release NVM ownership.
227  */
228 void ice_release_nvm(struct ice_hw *hw)
229 {
230 	if (hw->nvm.blank_nvm_mode)
231 		return;
232 
233 	ice_release_res(hw, ICE_NVM_RES_ID);
234 }
235 
236 /**
237  * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
238  * @hw: pointer to the HW structure
239  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
240  * @data: word read from the Shadow RAM
241  *
242  * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
243  */
244 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
245 {
246 	enum ice_status status;
247 
248 	status = ice_acquire_nvm(hw, ICE_RES_READ);
249 	if (!status) {
250 		status = ice_read_sr_word_aq(hw, offset, data);
251 		ice_release_nvm(hw);
252 	}
253 
254 	return status;
255 }
256 
257 /**
258  * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
259  * @hw: pointer to hardware structure
260  * @module_tlv: pointer to module TLV to return
261  * @module_tlv_len: pointer to module TLV length to return
262  * @module_type: module type requested
263  *
264  * Finds the requested sub module TLV type from the Preserved Field
265  * Area (PFA) and returns the TLV pointer and length. The caller can
266  * use these to read the variable length TLV value.
267  */
268 enum ice_status
269 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
270 		       u16 module_type)
271 {
272 	enum ice_status status;
273 	u16 pfa_len, pfa_ptr;
274 	u16 next_tlv;
275 
276 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
277 	if (status) {
278 		ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
279 		return status;
280 	}
281 	status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
282 	if (status) {
283 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
284 		return status;
285 	}
286 	/* Starting with first TLV after PFA length, iterate through the list
287 	 * of TLVs to find the requested one.
288 	 */
289 	next_tlv = pfa_ptr + 1;
290 	while (next_tlv < pfa_ptr + pfa_len) {
291 		u16 tlv_sub_module_type;
292 		u16 tlv_len;
293 
294 		/* Read TLV type */
295 		status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
296 		if (status) {
297 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
298 			break;
299 		}
300 		/* Read TLV length */
301 		status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
302 		if (status) {
303 			ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
304 			break;
305 		}
306 		if (tlv_sub_module_type == module_type) {
307 			if (tlv_len) {
308 				*module_tlv = next_tlv;
309 				*module_tlv_len = tlv_len;
310 				return 0;
311 			}
312 			return ICE_ERR_INVAL_SIZE;
313 		}
314 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
315 		 * (for current TLV's type and length)
316 		 */
317 		next_tlv = next_tlv + tlv_len + 2;
318 	}
319 	/* Module does not exist */
320 	return ICE_ERR_DOES_NOT_EXIST;
321 }
322 
323 /**
324  * ice_read_pba_string - Reads part number string from NVM
325  * @hw: pointer to hardware structure
326  * @pba_num: stores the part number string from the NVM
327  * @pba_num_size: part number string buffer length
328  *
329  * Reads the part number string from the NVM.
330  */
331 enum ice_status
332 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
333 {
334 	u16 pba_tlv, pba_tlv_len;
335 	enum ice_status status;
336 	u16 pba_word, pba_size;
337 	u16 i;
338 
339 	status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
340 					ICE_SR_PBA_BLOCK_PTR);
341 	if (status) {
342 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
343 		return status;
344 	}
345 
346 	/* pba_size is the next word */
347 	status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
348 	if (status) {
349 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
350 		return status;
351 	}
352 
353 	if (pba_tlv_len < pba_size) {
354 		ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
355 		return ICE_ERR_INVAL_SIZE;
356 	}
357 
358 	/* Subtract one to get PBA word count (PBA Size word is included in
359 	 * total size)
360 	 */
361 	pba_size--;
362 	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
363 		ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
364 		return ICE_ERR_PARAM;
365 	}
366 
367 	for (i = 0; i < pba_size; i++) {
368 		status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
369 		if (status) {
370 			ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
371 			return status;
372 		}
373 
374 		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
375 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
376 	}
377 	pba_num[(pba_size * 2)] = '\0';
378 
379 	return status;
380 }
381 
382 /**
383  * ice_get_orom_ver_info - Read Option ROM version information
384  * @hw: pointer to the HW struct
385  *
386  * Read the Combo Image version data from the Boot Configuration TLV and fill
387  * in the option ROM version data.
388  */
389 static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
390 {
391 	u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
392 	struct ice_orom_info *orom = &hw->nvm.orom;
393 	enum ice_status status;
394 	u32 combo_ver;
395 
396 	status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
397 					ICE_SR_BOOT_CFG_PTR);
398 	if (status) {
399 		ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
400 		return status;
401 	}
402 
403 	/* Boot Configuration Block must have length at least 2 words
404 	 * (Combo Image Version High and Combo Image Version Low)
405 	 */
406 	if (boot_cfg_tlv_len < 2) {
407 		ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
408 		return ICE_ERR_INVAL_SIZE;
409 	}
410 
411 	status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
412 				  &combo_hi);
413 	if (status) {
414 		ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
415 		return status;
416 	}
417 
418 	status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
419 				  &combo_lo);
420 	if (status) {
421 		ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
422 		return status;
423 	}
424 
425 	combo_ver = ((u32)combo_hi << 16) | combo_lo;
426 
427 	orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
428 			   ICE_OROM_VER_SHIFT);
429 	orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
430 	orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
431 			    ICE_OROM_VER_BUILD_SHIFT);
432 
433 	return 0;
434 }
435 
436 /**
437  * ice_get_netlist_ver_info
438  * @hw: pointer to the HW struct
439  *
440  * Get the netlist version information
441  */
442 static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
443 {
444 	struct ice_netlist_ver_info *ver = &hw->netlist_ver;
445 	enum ice_status ret;
446 	u32 id_blk_start;
447 	__le16 raw_data;
448 	u16 data, i;
449 	u16 *buff;
450 
451 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
452 	if (ret)
453 		return ret;
454 	buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
455 		       GFP_KERNEL);
456 	if (!buff) {
457 		ret = ICE_ERR_NO_MEMORY;
458 		goto exit_no_mem;
459 	}
460 
461 	/* read module length */
462 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
463 			      ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
464 			      ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
465 			      false, false, NULL);
466 	if (ret)
467 		goto exit_error;
468 
469 	data = le16_to_cpu(raw_data);
470 	/* exit if length is = 0 */
471 	if (!data)
472 		goto exit_error;
473 
474 	/* read node count */
475 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
476 			      ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
477 			      ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
478 			      false, false, NULL);
479 	if (ret)
480 		goto exit_error;
481 	data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
482 
483 	/* netlist ID block starts from offset 4 + node count * 2 */
484 	id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
485 
486 	/* read the entire netlist ID block */
487 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
488 			      id_blk_start * 2,
489 			      ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
490 			      false, NULL);
491 	if (ret)
492 		goto exit_error;
493 
494 	for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
495 		buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
496 
497 	ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
498 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
499 	ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
500 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
501 	ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
502 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
503 	ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
504 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
505 	ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
506 	/* Read the left most 4 bytes of SHA */
507 	ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
508 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
509 
510 exit_error:
511 	kfree(buff);
512 exit_no_mem:
513 	ice_release_nvm(hw);
514 	return ret;
515 }
516 
517 /**
518  * ice_discover_flash_size - Discover the available flash size.
519  * @hw: pointer to the HW struct
520  *
521  * The device flash could be up to 16MB in size. However, it is possible that
522  * the actual size is smaller. Use bisection to determine the accessible size
523  * of flash memory.
524  */
525 static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
526 {
527 	u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
528 	enum ice_status status;
529 
530 	status = ice_acquire_nvm(hw, ICE_RES_READ);
531 	if (status)
532 		return status;
533 
534 	while ((max_size - min_size) > 1) {
535 		u32 offset = (max_size + min_size) / 2;
536 		u32 len = 1;
537 		u8 data;
538 
539 		status = ice_read_flat_nvm(hw, offset, &len, &data, false);
540 		if (status == ICE_ERR_AQ_ERROR &&
541 		    hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
542 			ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
543 				  __func__, offset);
544 			status = 0;
545 			max_size = offset;
546 		} else if (!status) {
547 			ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
548 				  __func__, offset);
549 			min_size = offset;
550 		} else {
551 			/* an unexpected error occurred */
552 			goto err_read_flat_nvm;
553 		}
554 	}
555 
556 	ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
557 
558 	hw->nvm.flash_size = max_size;
559 
560 err_read_flat_nvm:
561 	ice_release_nvm(hw);
562 
563 	return status;
564 }
565 
566 /**
567  * ice_init_nvm - initializes NVM setting
568  * @hw: pointer to the HW struct
569  *
570  * This function reads and populates NVM settings such as Shadow RAM size,
571  * max_timeout, and blank_nvm_mode
572  */
573 enum ice_status ice_init_nvm(struct ice_hw *hw)
574 {
575 	struct ice_nvm_info *nvm = &hw->nvm;
576 	u16 eetrack_lo, eetrack_hi, ver;
577 	enum ice_status status;
578 	u32 fla, gens_stat;
579 	u8 sr_size;
580 
581 	/* The SR size is stored regardless of the NVM programming mode
582 	 * as the blank mode may be used in the factory line.
583 	 */
584 	gens_stat = rd32(hw, GLNVM_GENS);
585 	sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
586 
587 	/* Switching to words (sr_size contains power of 2) */
588 	nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
589 
590 	/* Check if we are in the normal or blank NVM programming mode */
591 	fla = rd32(hw, GLNVM_FLA);
592 	if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
593 		nvm->blank_nvm_mode = false;
594 	} else {
595 		/* Blank programming mode */
596 		nvm->blank_nvm_mode = true;
597 		ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
598 		return ICE_ERR_NVM_BLANK_MODE;
599 	}
600 
601 	status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
602 	if (status) {
603 		ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
604 		return status;
605 	}
606 	nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
607 	nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
608 
609 	status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
610 	if (status) {
611 		ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
612 		return status;
613 	}
614 	status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
615 	if (status) {
616 		ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
617 		return status;
618 	}
619 
620 	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
621 
622 	status = ice_discover_flash_size(hw);
623 	if (status) {
624 		ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
625 		return status;
626 	}
627 
628 	status = ice_get_orom_ver_info(hw);
629 	if (status) {
630 		ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
631 		return status;
632 	}
633 
634 	/* read the netlist version information */
635 	status = ice_get_netlist_ver_info(hw);
636 	if (status)
637 		ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
638 
639 	return 0;
640 }
641 
642 /**
643  * ice_nvm_validate_checksum
644  * @hw: pointer to the HW struct
645  *
646  * Verify NVM PFA checksum validity (0x0706)
647  */
648 enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
649 {
650 	struct ice_aqc_nvm_checksum *cmd;
651 	struct ice_aq_desc desc;
652 	enum ice_status status;
653 
654 	status = ice_acquire_nvm(hw, ICE_RES_READ);
655 	if (status)
656 		return status;
657 
658 	cmd = &desc.params.nvm_checksum;
659 
660 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
661 	cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
662 
663 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
664 	ice_release_nvm(hw);
665 
666 	if (!status)
667 		if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
668 			status = ICE_ERR_NVM_CHECKSUM;
669 
670 	return status;
671 }
672 
673 /**
674  * ice_nvm_write_activate
675  * @hw: pointer to the HW struct
676  * @cmd_flags: NVM activate admin command bits (banks to be validated)
677  *
678  * Update the control word with the required banks' validity bits
679  * and dumps the Shadow RAM to flash (0x0707)
680  */
681 enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
682 {
683 	struct ice_aqc_nvm *cmd;
684 	struct ice_aq_desc desc;
685 
686 	cmd = &desc.params.nvm;
687 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
688 
689 	cmd->cmd_flags = cmd_flags;
690 
691 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
692 }
693 
694 /**
695  * ice_aq_nvm_update_empr
696  * @hw: pointer to the HW struct
697  *
698  * Update empr (0x0709). This command allows SW to
699  * request an EMPR to activate new FW.
700  */
701 enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
702 {
703 	struct ice_aq_desc desc;
704 
705 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
706 
707 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
708 }
709 
710 /* ice_nvm_set_pkg_data
711  * @hw: pointer to the HW struct
712  * @del_pkg_data_flag: If is set then the current pkg_data store by FW
713  *		       is deleted.
714  *		       If bit is set to 1, then buffer should be size 0.
715  * @data: pointer to buffer
716  * @length: length of the buffer
717  * @cd: pointer to command details structure or NULL
718  *
719  * Set package data (0x070A). This command is equivalent to the reception
720  * of a PLDM FW Update GetPackageData cmd. This command should be sent
721  * as part of the NVM update as the first cmd in the flow.
722  */
723 
724 enum ice_status
725 ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
726 		     u16 length, struct ice_sq_cd *cd)
727 {
728 	struct ice_aqc_nvm_pkg_data *cmd;
729 	struct ice_aq_desc desc;
730 
731 	if (length != 0 && !data)
732 		return ICE_ERR_PARAM;
733 
734 	cmd = &desc.params.pkg_data;
735 
736 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
737 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
738 
739 	if (del_pkg_data_flag)
740 		cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
741 
742 	return ice_aq_send_cmd(hw, &desc, data, length, cd);
743 }
744 
745 /* ice_nvm_pass_component_tbl
746  * @hw: pointer to the HW struct
747  * @data: pointer to buffer
748  * @length: length of the buffer
749  * @transfer_flag: parameter for determining stage of the update
750  * @comp_response: a pointer to the response from the 0x070B AQC.
751  * @comp_response_code: a pointer to the response code from the 0x070B AQC.
752  * @cd: pointer to command details structure or NULL
753  *
754  * Pass component table (0x070B). This command is equivalent to the reception
755  * of a PLDM FW Update PassComponentTable cmd. This command should be sent once
756  * per component. It can be only sent after Set Package Data cmd and before
757  * actual update. FW will assume these commands are going to be sent until
758  * the TransferFlag is set to End or StartAndEnd.
759  */
760 
761 enum ice_status
762 ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
763 			   u8 transfer_flag, u8 *comp_response,
764 			   u8 *comp_response_code, struct ice_sq_cd *cd)
765 {
766 	struct ice_aqc_nvm_pass_comp_tbl *cmd;
767 	struct ice_aq_desc desc;
768 	enum ice_status status;
769 
770 	if (!data || !comp_response || !comp_response_code)
771 		return ICE_ERR_PARAM;
772 
773 	cmd = &desc.params.pass_comp_tbl;
774 
775 	ice_fill_dflt_direct_cmd_desc(&desc,
776 				      ice_aqc_opc_nvm_pass_component_tbl);
777 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
778 
779 	cmd->transfer_flag = transfer_flag;
780 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
781 
782 	if (!status) {
783 		*comp_response = cmd->component_response;
784 		*comp_response_code = cmd->component_response_code;
785 	}
786 	return status;
787 }
788