xref: /linux/drivers/net/ethernet/intel/ice/ice_common.c (revision 9abf22075da98c615be2f608ec1167329a71eafd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8 #include "ice_ptp_hw.h"
9 #include <linux/packing.h>
10 
11 #define ICE_PF_RESET_WAIT_COUNT	300
12 #define ICE_MAX_NETLIST_SIZE	10
13 
14 static const char * const ice_link_mode_str_low[] = {
15 	[0] = "100BASE_TX",
16 	[1] = "100M_SGMII",
17 	[2] = "1000BASE_T",
18 	[3] = "1000BASE_SX",
19 	[4] = "1000BASE_LX",
20 	[5] = "1000BASE_KX",
21 	[6] = "1G_SGMII",
22 	[7] = "2500BASE_T",
23 	[8] = "2500BASE_X",
24 	[9] = "2500BASE_KX",
25 	[10] = "5GBASE_T",
26 	[11] = "5GBASE_KR",
27 	[12] = "10GBASE_T",
28 	[13] = "10G_SFI_DA",
29 	[14] = "10GBASE_SR",
30 	[15] = "10GBASE_LR",
31 	[16] = "10GBASE_KR_CR1",
32 	[17] = "10G_SFI_AOC_ACC",
33 	[18] = "10G_SFI_C2C",
34 	[19] = "25GBASE_T",
35 	[20] = "25GBASE_CR",
36 	[21] = "25GBASE_CR_S",
37 	[22] = "25GBASE_CR1",
38 	[23] = "25GBASE_SR",
39 	[24] = "25GBASE_LR",
40 	[25] = "25GBASE_KR",
41 	[26] = "25GBASE_KR_S",
42 	[27] = "25GBASE_KR1",
43 	[28] = "25G_AUI_AOC_ACC",
44 	[29] = "25G_AUI_C2C",
45 	[30] = "40GBASE_CR4",
46 	[31] = "40GBASE_SR4",
47 	[32] = "40GBASE_LR4",
48 	[33] = "40GBASE_KR4",
49 	[34] = "40G_XLAUI_AOC_ACC",
50 	[35] = "40G_XLAUI",
51 	[36] = "50GBASE_CR2",
52 	[37] = "50GBASE_SR2",
53 	[38] = "50GBASE_LR2",
54 	[39] = "50GBASE_KR2",
55 	[40] = "50G_LAUI2_AOC_ACC",
56 	[41] = "50G_LAUI2",
57 	[42] = "50G_AUI2_AOC_ACC",
58 	[43] = "50G_AUI2",
59 	[44] = "50GBASE_CP",
60 	[45] = "50GBASE_SR",
61 	[46] = "50GBASE_FR",
62 	[47] = "50GBASE_LR",
63 	[48] = "50GBASE_KR_PAM4",
64 	[49] = "50G_AUI1_AOC_ACC",
65 	[50] = "50G_AUI1",
66 	[51] = "100GBASE_CR4",
67 	[52] = "100GBASE_SR4",
68 	[53] = "100GBASE_LR4",
69 	[54] = "100GBASE_KR4",
70 	[55] = "100G_CAUI4_AOC_ACC",
71 	[56] = "100G_CAUI4",
72 	[57] = "100G_AUI4_AOC_ACC",
73 	[58] = "100G_AUI4",
74 	[59] = "100GBASE_CR_PAM4",
75 	[60] = "100GBASE_KR_PAM4",
76 	[61] = "100GBASE_CP2",
77 	[62] = "100GBASE_SR2",
78 	[63] = "100GBASE_DR",
79 };
80 
81 static const char * const ice_link_mode_str_high[] = {
82 	[0] = "100GBASE_KR2_PAM4",
83 	[1] = "100G_CAUI2_AOC_ACC",
84 	[2] = "100G_CAUI2",
85 	[3] = "100G_AUI2_AOC_ACC",
86 	[4] = "100G_AUI2",
87 };
88 
89 /**
90  * ice_dump_phy_type - helper function to dump phy_type
91  * @hw: pointer to the HW structure
92  * @low: 64 bit value for phy_type_low
93  * @high: 64 bit value for phy_type_high
94  * @prefix: prefix string to differentiate multiple dumps
95  */
96 static void
97 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
98 {
99 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
100 
101 	for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
102 		if (low & BIT_ULL(i))
103 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
104 				  prefix, i, ice_link_mode_str_low[i]);
105 	}
106 
107 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
108 
109 	for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
110 		if (high & BIT_ULL(i))
111 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
112 				  prefix, i, ice_link_mode_str_high[i]);
113 	}
114 }
115 
116 /**
117  * ice_set_mac_type - Sets MAC type
118  * @hw: pointer to the HW structure
119  *
120  * This function sets the MAC type of the adapter based on the
121  * vendor ID and device ID stored in the HW structure.
122  */
123 static int ice_set_mac_type(struct ice_hw *hw)
124 {
125 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
126 		return -ENODEV;
127 
128 	switch (hw->device_id) {
129 	case ICE_DEV_ID_E810C_BACKPLANE:
130 	case ICE_DEV_ID_E810C_QSFP:
131 	case ICE_DEV_ID_E810C_SFP:
132 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
133 	case ICE_DEV_ID_E810_XXV_QSFP:
134 	case ICE_DEV_ID_E810_XXV_SFP:
135 		hw->mac_type = ICE_MAC_E810;
136 		break;
137 	case ICE_DEV_ID_E823C_10G_BASE_T:
138 	case ICE_DEV_ID_E823C_BACKPLANE:
139 	case ICE_DEV_ID_E823C_QSFP:
140 	case ICE_DEV_ID_E823C_SFP:
141 	case ICE_DEV_ID_E823C_SGMII:
142 	case ICE_DEV_ID_E822C_10G_BASE_T:
143 	case ICE_DEV_ID_E822C_BACKPLANE:
144 	case ICE_DEV_ID_E822C_QSFP:
145 	case ICE_DEV_ID_E822C_SFP:
146 	case ICE_DEV_ID_E822C_SGMII:
147 	case ICE_DEV_ID_E822L_10G_BASE_T:
148 	case ICE_DEV_ID_E822L_BACKPLANE:
149 	case ICE_DEV_ID_E822L_SFP:
150 	case ICE_DEV_ID_E822L_SGMII:
151 	case ICE_DEV_ID_E823L_10G_BASE_T:
152 	case ICE_DEV_ID_E823L_1GBE:
153 	case ICE_DEV_ID_E823L_BACKPLANE:
154 	case ICE_DEV_ID_E823L_QSFP:
155 	case ICE_DEV_ID_E823L_SFP:
156 		hw->mac_type = ICE_MAC_GENERIC;
157 		break;
158 	case ICE_DEV_ID_E825C_BACKPLANE:
159 	case ICE_DEV_ID_E825C_QSFP:
160 	case ICE_DEV_ID_E825C_SFP:
161 	case ICE_DEV_ID_E825C_SGMII:
162 		hw->mac_type = ICE_MAC_GENERIC_3K_E825;
163 		break;
164 	case ICE_DEV_ID_E830CC_BACKPLANE:
165 	case ICE_DEV_ID_E830CC_QSFP56:
166 	case ICE_DEV_ID_E830CC_SFP:
167 	case ICE_DEV_ID_E830CC_SFP_DD:
168 	case ICE_DEV_ID_E830C_BACKPLANE:
169 	case ICE_DEV_ID_E830_XXV_BACKPLANE:
170 	case ICE_DEV_ID_E830C_QSFP:
171 	case ICE_DEV_ID_E830_XXV_QSFP:
172 	case ICE_DEV_ID_E830C_SFP:
173 	case ICE_DEV_ID_E830_XXV_SFP:
174 	case ICE_DEV_ID_E835CC_BACKPLANE:
175 	case ICE_DEV_ID_E835CC_QSFP56:
176 	case ICE_DEV_ID_E835CC_SFP:
177 	case ICE_DEV_ID_E835C_BACKPLANE:
178 	case ICE_DEV_ID_E835C_QSFP:
179 	case ICE_DEV_ID_E835C_SFP:
180 	case ICE_DEV_ID_E835_L_BACKPLANE:
181 	case ICE_DEV_ID_E835_L_QSFP:
182 	case ICE_DEV_ID_E835_L_SFP:
183 		hw->mac_type = ICE_MAC_E830;
184 		break;
185 	default:
186 		hw->mac_type = ICE_MAC_UNKNOWN;
187 		break;
188 	}
189 
190 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
191 	return 0;
192 }
193 
194 /**
195  * ice_is_generic_mac - check if device's mac_type is generic
196  * @hw: pointer to the hardware structure
197  *
198  * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
199  */
200 bool ice_is_generic_mac(struct ice_hw *hw)
201 {
202 	return (hw->mac_type == ICE_MAC_GENERIC ||
203 		hw->mac_type == ICE_MAC_GENERIC_3K_E825);
204 }
205 
206 /**
207  * ice_clear_pf_cfg - Clear PF configuration
208  * @hw: pointer to the hardware structure
209  *
210  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
211  * configuration, flow director filters, etc.).
212  */
213 int ice_clear_pf_cfg(struct ice_hw *hw)
214 {
215 	struct libie_aq_desc desc;
216 
217 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
218 
219 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
220 }
221 
222 /**
223  * ice_aq_manage_mac_read - manage MAC address read command
224  * @hw: pointer to the HW struct
225  * @buf: a virtual buffer to hold the manage MAC read response
226  * @buf_size: Size of the virtual buffer
227  * @cd: pointer to command details structure or NULL
228  *
229  * This function is used to return per PF station MAC address (0x0107).
230  * NOTE: Upon successful completion of this command, MAC address information
231  * is returned in user specified buffer. Please interpret user specified
232  * buffer as "manage_mac_read" response.
233  * Response such as various MAC addresses are stored in HW struct (port.mac)
234  * ice_discover_dev_caps is expected to be called before this function is
235  * called.
236  */
237 static int
238 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
239 		       struct ice_sq_cd *cd)
240 {
241 	struct ice_aqc_manage_mac_read_resp *resp;
242 	struct ice_aqc_manage_mac_read *cmd;
243 	struct libie_aq_desc desc;
244 	int status;
245 	u16 flags;
246 	u8 i;
247 
248 	cmd = libie_aq_raw(&desc);
249 
250 	if (buf_size < sizeof(*resp))
251 		return -EINVAL;
252 
253 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
254 
255 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
256 	if (status)
257 		return status;
258 
259 	resp = buf;
260 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
261 
262 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
263 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
264 		return -EIO;
265 	}
266 
267 	/* A single port can report up to two (LAN and WoL) addresses */
268 	for (i = 0; i < cmd->num_addr; i++)
269 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
270 			ether_addr_copy(hw->port_info->mac.lan_addr,
271 					resp[i].mac_addr);
272 			ether_addr_copy(hw->port_info->mac.perm_addr,
273 					resp[i].mac_addr);
274 			break;
275 		}
276 
277 	return 0;
278 }
279 
280 /**
281  * ice_aq_get_phy_caps - returns PHY capabilities
282  * @pi: port information structure
283  * @qual_mods: report qualified modules
284  * @report_mode: report mode capabilities
285  * @pcaps: structure for PHY capabilities to be filled
286  * @cd: pointer to command details structure or NULL
287  *
288  * Returns the various PHY capabilities supported on the Port (0x0600)
289  */
290 int
291 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
292 		    struct ice_aqc_get_phy_caps_data *pcaps,
293 		    struct ice_sq_cd *cd)
294 {
295 	struct ice_aqc_get_phy_caps *cmd;
296 	u16 pcaps_size = sizeof(*pcaps);
297 	struct libie_aq_desc desc;
298 	const char *prefix;
299 	struct ice_hw *hw;
300 	int status;
301 
302 	cmd = libie_aq_raw(&desc);
303 
304 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
305 		return -EINVAL;
306 	hw = pi->hw;
307 
308 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
309 	    !ice_fw_supports_report_dflt_cfg(hw))
310 		return -EINVAL;
311 
312 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
313 
314 	if (qual_mods)
315 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
316 
317 	cmd->param0 |= cpu_to_le16(report_mode);
318 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
319 
320 	ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
321 
322 	switch (report_mode) {
323 	case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
324 		prefix = "phy_caps_media";
325 		break;
326 	case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
327 		prefix = "phy_caps_no_media";
328 		break;
329 	case ICE_AQC_REPORT_ACTIVE_CFG:
330 		prefix = "phy_caps_active";
331 		break;
332 	case ICE_AQC_REPORT_DFLT_CFG:
333 		prefix = "phy_caps_default";
334 		break;
335 	default:
336 		prefix = "phy_caps_invalid";
337 	}
338 
339 	ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
340 			  le64_to_cpu(pcaps->phy_type_high), prefix);
341 
342 	ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
343 		  prefix, report_mode);
344 	ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
345 	ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
346 		  pcaps->low_power_ctrl_an);
347 	ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
348 		  pcaps->eee_cap);
349 	ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
350 		  pcaps->eeer_value);
351 	ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
352 		  pcaps->link_fec_options);
353 	ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
354 		  prefix, pcaps->module_compliance_enforcement);
355 	ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
356 		  prefix, pcaps->extended_compliance_code);
357 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
358 		  pcaps->module_type[0]);
359 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
360 		  pcaps->module_type[1]);
361 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
362 		  pcaps->module_type[2]);
363 
364 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
365 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
366 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
367 		memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
368 		       sizeof(pi->phy.link_info.module_type));
369 	}
370 
371 	return status;
372 }
373 
374 /**
375  * ice_aq_get_link_topo_handle - get link topology node return status
376  * @pi: port information structure
377  * @node_type: requested node type
378  * @cd: pointer to command details structure or NULL
379  *
380  * Get link topology node return status for specified node type (0x06E0)
381  *
382  * Node type cage can be used to determine if cage is present. If AQC
383  * returns error (ENOENT), then no cage present. If no cage present, then
384  * connection type is backplane or BASE-T.
385  */
386 static int
387 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
388 			    struct ice_sq_cd *cd)
389 {
390 	struct ice_aqc_get_link_topo *cmd;
391 	struct libie_aq_desc desc;
392 
393 	cmd = libie_aq_raw(&desc);
394 
395 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
396 
397 	cmd->addr.topo_params.node_type_ctx =
398 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
399 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
400 
401 	/* set node type */
402 	cmd->addr.topo_params.node_type_ctx |=
403 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
404 
405 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
406 }
407 
408 /**
409  * ice_aq_get_netlist_node
410  * @hw: pointer to the hw struct
411  * @cmd: get_link_topo AQ structure
412  * @node_part_number: output node part number if node found
413  * @node_handle: output node handle parameter if node found
414  *
415  * Get netlist node handle.
416  */
417 int
418 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
419 			u8 *node_part_number, u16 *node_handle)
420 {
421 	struct ice_aqc_get_link_topo *resp;
422 	struct libie_aq_desc desc;
423 
424 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
425 	resp = libie_aq_raw(&desc);
426 	*resp = *cmd;
427 
428 	if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
429 		return -EINTR;
430 
431 	if (node_handle)
432 		*node_handle = le16_to_cpu(resp->addr.handle);
433 	if (node_part_number)
434 		*node_part_number = resp->node_part_num;
435 
436 	return 0;
437 }
438 
439 /**
440  * ice_find_netlist_node
441  * @hw: pointer to the hw struct
442  * @node_type: type of netlist node to look for
443  * @ctx: context of the search
444  * @node_part_number: node part number to look for
445  * @node_handle: output parameter if node found - optional
446  *
447  * Scan the netlist for a node handle of the given node type and part number.
448  *
449  * If node_handle is non-NULL it will be modified on function exit. It is only
450  * valid if the function returns zero, and should be ignored on any non-zero
451  * return value.
452  *
453  * Return:
454  * * 0 if the node is found,
455  * * -ENOENT if no handle was found,
456  * * negative error code on failure to access the AQ.
457  */
458 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
459 				 u8 node_part_number, u16 *node_handle)
460 {
461 	u8 idx;
462 
463 	for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
464 		struct ice_aqc_get_link_topo cmd = {};
465 		u8 rec_node_part_number;
466 		int status;
467 
468 		cmd.addr.topo_params.node_type_ctx =
469 			FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
470 			FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
471 		cmd.addr.topo_params.index = idx;
472 
473 		status = ice_aq_get_netlist_node(hw, &cmd,
474 						 &rec_node_part_number,
475 						 node_handle);
476 		if (status)
477 			return status;
478 
479 		if (rec_node_part_number == node_part_number)
480 			return 0;
481 	}
482 
483 	return -ENOENT;
484 }
485 
486 /**
487  * ice_is_media_cage_present
488  * @pi: port information structure
489  *
490  * Returns true if media cage is present, else false. If no cage, then
491  * media type is backplane or BASE-T.
492  */
493 static bool ice_is_media_cage_present(struct ice_port_info *pi)
494 {
495 	/* Node type cage can be used to determine if cage is present. If AQC
496 	 * returns error (ENOENT), then no cage present. If no cage present then
497 	 * connection type is backplane or BASE-T.
498 	 */
499 	return !ice_aq_get_link_topo_handle(pi,
500 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
501 					    NULL);
502 }
503 
504 /**
505  * ice_get_media_type - Gets media type
506  * @pi: port information structure
507  */
508 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
509 {
510 	struct ice_link_status *hw_link_info;
511 
512 	if (!pi)
513 		return ICE_MEDIA_UNKNOWN;
514 
515 	hw_link_info = &pi->phy.link_info;
516 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
517 		/* If more than one media type is selected, report unknown */
518 		return ICE_MEDIA_UNKNOWN;
519 
520 	if (hw_link_info->phy_type_low) {
521 		/* 1G SGMII is a special case where some DA cable PHYs
522 		 * may show this as an option when it really shouldn't
523 		 * be since SGMII is meant to be between a MAC and a PHY
524 		 * in a backplane. Try to detect this case and handle it
525 		 */
526 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
527 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
528 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
529 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
530 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
531 			return ICE_MEDIA_DA;
532 
533 		switch (hw_link_info->phy_type_low) {
534 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
535 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
536 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
537 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
538 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
539 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
540 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
541 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
542 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
543 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
544 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
545 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
546 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
547 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
548 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
549 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
550 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
551 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
552 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
553 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
554 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
555 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
556 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
557 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
558 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
559 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
560 			return ICE_MEDIA_FIBER;
561 		case ICE_PHY_TYPE_LOW_100BASE_TX:
562 		case ICE_PHY_TYPE_LOW_1000BASE_T:
563 		case ICE_PHY_TYPE_LOW_2500BASE_T:
564 		case ICE_PHY_TYPE_LOW_5GBASE_T:
565 		case ICE_PHY_TYPE_LOW_10GBASE_T:
566 		case ICE_PHY_TYPE_LOW_25GBASE_T:
567 			return ICE_MEDIA_BASET;
568 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
569 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
570 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
571 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
572 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
573 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
574 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
575 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
576 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
577 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
578 			return ICE_MEDIA_DA;
579 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
580 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
581 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
582 		case ICE_PHY_TYPE_LOW_50G_AUI2:
583 		case ICE_PHY_TYPE_LOW_50G_AUI1:
584 		case ICE_PHY_TYPE_LOW_100G_AUI4:
585 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
586 			if (ice_is_media_cage_present(pi))
587 				return ICE_MEDIA_DA;
588 			fallthrough;
589 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
590 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
591 		case ICE_PHY_TYPE_LOW_2500BASE_X:
592 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
593 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
594 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
595 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
596 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
597 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
598 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
599 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
600 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
601 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
602 			return ICE_MEDIA_BACKPLANE;
603 		}
604 	} else {
605 		switch (hw_link_info->phy_type_high) {
606 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
607 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
608 			if (ice_is_media_cage_present(pi))
609 				return ICE_MEDIA_DA;
610 			fallthrough;
611 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
612 			return ICE_MEDIA_BACKPLANE;
613 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
614 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
615 			return ICE_MEDIA_FIBER;
616 		}
617 	}
618 	return ICE_MEDIA_UNKNOWN;
619 }
620 
621 /**
622  * ice_get_link_status_datalen
623  * @hw: pointer to the HW struct
624  *
625  * Returns datalength for the Get Link Status AQ command, which is bigger for
626  * newer adapter families handled by ice driver.
627  */
628 static u16 ice_get_link_status_datalen(struct ice_hw *hw)
629 {
630 	switch (hw->mac_type) {
631 	case ICE_MAC_E830:
632 		return ICE_AQC_LS_DATA_SIZE_V2;
633 	case ICE_MAC_E810:
634 	default:
635 		return ICE_AQC_LS_DATA_SIZE_V1;
636 	}
637 }
638 
639 /**
640  * ice_aq_get_link_info
641  * @pi: port information structure
642  * @ena_lse: enable/disable LinkStatusEvent reporting
643  * @link: pointer to link status structure - optional
644  * @cd: pointer to command details structure or NULL
645  *
646  * Get Link Status (0x607). Returns the link status of the adapter.
647  */
648 int
649 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
650 		     struct ice_link_status *link, struct ice_sq_cd *cd)
651 {
652 	struct ice_aqc_get_link_status_data link_data = { 0 };
653 	struct ice_aqc_get_link_status *resp;
654 	struct ice_link_status *li_old, *li;
655 	enum ice_media_type *hw_media_type;
656 	struct ice_fc_info *hw_fc_info;
657 	struct libie_aq_desc desc;
658 	bool tx_pause, rx_pause;
659 	struct ice_hw *hw;
660 	u16 cmd_flags;
661 	int status;
662 
663 	if (!pi)
664 		return -EINVAL;
665 	hw = pi->hw;
666 	li_old = &pi->phy.link_info_old;
667 	hw_media_type = &pi->phy.media_type;
668 	li = &pi->phy.link_info;
669 	hw_fc_info = &pi->fc;
670 
671 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
672 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
673 	resp = libie_aq_raw(&desc);
674 	resp->cmd_flags = cpu_to_le16(cmd_flags);
675 	resp->lport_num = pi->lport;
676 
677 	status = ice_aq_send_cmd(hw, &desc, &link_data,
678 				 ice_get_link_status_datalen(hw), cd);
679 	if (status)
680 		return status;
681 
682 	/* save off old link status information */
683 	*li_old = *li;
684 
685 	/* update current link status information */
686 	li->link_speed = le16_to_cpu(link_data.link_speed);
687 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
688 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
689 	*hw_media_type = ice_get_media_type(pi);
690 	li->link_info = link_data.link_info;
691 	li->link_cfg_err = link_data.link_cfg_err;
692 	li->an_info = link_data.an_info;
693 	li->ext_info = link_data.ext_info;
694 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
695 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
696 	li->topo_media_conflict = link_data.topo_media_conflict;
697 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
698 				      ICE_AQ_CFG_PACING_TYPE_M);
699 
700 	/* update fc info */
701 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
702 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
703 	if (tx_pause && rx_pause)
704 		hw_fc_info->current_mode = ICE_FC_FULL;
705 	else if (tx_pause)
706 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
707 	else if (rx_pause)
708 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
709 	else
710 		hw_fc_info->current_mode = ICE_FC_NONE;
711 
712 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
713 
714 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
715 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
716 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
717 		  (unsigned long long)li->phy_type_low);
718 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
719 		  (unsigned long long)li->phy_type_high);
720 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
721 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
722 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
723 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
724 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
725 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
726 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
727 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
728 		  li->max_frame_size);
729 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
730 
731 	/* save link status information */
732 	if (link)
733 		*link = *li;
734 
735 	/* flag cleared so calling functions don't call AQ again */
736 	pi->phy.get_link_info = false;
737 
738 	return 0;
739 }
740 
741 /**
742  * ice_fill_tx_timer_and_fc_thresh
743  * @hw: pointer to the HW struct
744  * @cmd: pointer to MAC cfg structure
745  *
746  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
747  * descriptor
748  */
749 static void
750 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
751 				struct ice_aqc_set_mac_cfg *cmd)
752 {
753 	u32 val, fc_thres_m;
754 
755 	/* We read back the transmit timer and FC threshold value of
756 	 * LFC. Thus, we will use index =
757 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
758 	 *
759 	 * Also, because we are operating on transmit timer and FC
760 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
761 	 */
762 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
763 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
764 
765 	if (hw->mac_type == ICE_MAC_E830) {
766 		/* Retrieve the transmit timer */
767 		val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
768 		cmd->tx_tmr_value =
769 			le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
770 
771 		/* Retrieve the fc threshold */
772 		val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
773 		fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
774 	} else {
775 		/* Retrieve the transmit timer */
776 		val = rd32(hw,
777 			   E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
778 		cmd->tx_tmr_value =
779 			le16_encode_bits(val,
780 					 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
781 
782 		/* Retrieve the fc threshold */
783 		val = rd32(hw,
784 			   E800_REFRESH_TMR(E800_IDX_OF_LFC));
785 		fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
786 	}
787 	cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
788 }
789 
790 /**
791  * ice_aq_set_mac_cfg
792  * @hw: pointer to the HW struct
793  * @max_frame_size: Maximum Frame Size to be supported
794  * @cd: pointer to command details structure or NULL
795  *
796  * Set MAC configuration (0x0603)
797  */
798 int
799 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
800 {
801 	struct ice_aqc_set_mac_cfg *cmd;
802 	struct libie_aq_desc desc;
803 
804 	cmd = libie_aq_raw(&desc);
805 
806 	if (max_frame_size == 0)
807 		return -EINVAL;
808 
809 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
810 
811 	cmd->max_frame_size = cpu_to_le16(max_frame_size);
812 
813 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
814 
815 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
816 }
817 
818 /**
819  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
820  * @hw: pointer to the HW struct
821  */
822 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
823 {
824 	struct ice_switch_info *sw;
825 	int status;
826 
827 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
828 				       sizeof(*hw->switch_info), GFP_KERNEL);
829 	sw = hw->switch_info;
830 
831 	if (!sw)
832 		return -ENOMEM;
833 
834 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
835 	sw->prof_res_bm_init = 0;
836 
837 	/* Initialize recipe count with default recipes read from NVM */
838 	sw->recp_cnt = ICE_SW_LKUP_LAST;
839 
840 	status = ice_init_def_sw_recp(hw);
841 	if (status) {
842 		devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
843 		return status;
844 	}
845 	return 0;
846 }
847 
848 /**
849  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
850  * @hw: pointer to the HW struct
851  */
852 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
853 {
854 	struct ice_switch_info *sw = hw->switch_info;
855 	struct ice_vsi_list_map_info *v_pos_map;
856 	struct ice_vsi_list_map_info *v_tmp_map;
857 	struct ice_sw_recipe *recps;
858 	u8 i;
859 
860 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
861 				 list_entry) {
862 		list_del(&v_pos_map->list_entry);
863 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
864 	}
865 	recps = sw->recp_list;
866 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
867 		recps[i].root_rid = i;
868 
869 		if (recps[i].adv_rule) {
870 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
871 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
872 
873 			mutex_destroy(&recps[i].filt_rule_lock);
874 			list_for_each_entry_safe(lst_itr, tmp_entry,
875 						 &recps[i].filt_rules,
876 						 list_entry) {
877 				list_del(&lst_itr->list_entry);
878 				devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
879 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
880 			}
881 		} else {
882 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
883 
884 			mutex_destroy(&recps[i].filt_rule_lock);
885 			list_for_each_entry_safe(lst_itr, tmp_entry,
886 						 &recps[i].filt_rules,
887 						 list_entry) {
888 				list_del(&lst_itr->list_entry);
889 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
890 			}
891 		}
892 	}
893 	ice_rm_all_sw_replay_rule_info(hw);
894 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
895 	devm_kfree(ice_hw_to_dev(hw), sw);
896 }
897 
898 /**
899  * ice_get_itr_intrl_gran
900  * @hw: pointer to the HW struct
901  *
902  * Determines the ITR/INTRL granularities based on the maximum aggregate
903  * bandwidth according to the device's configuration during power-on.
904  */
905 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
906 {
907 	u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
908 				  rd32(hw, GL_PWR_MODE_CTL));
909 
910 	switch (max_agg_bw) {
911 	case ICE_MAX_AGG_BW_200G:
912 	case ICE_MAX_AGG_BW_100G:
913 	case ICE_MAX_AGG_BW_50G:
914 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
915 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
916 		break;
917 	case ICE_MAX_AGG_BW_25G:
918 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
919 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
920 		break;
921 	}
922 }
923 
924 /**
925  * ice_wait_fw_load - wait for PHY firmware loading to complete
926  * @hw: pointer to the hardware structure
927  * @timeout: milliseconds that can elapse before timing out, 0 to bypass waiting
928  *
929  * Return:
930  * * 0 on success
931  * * negative on timeout
932  */
933 static int ice_wait_fw_load(struct ice_hw *hw, u32 timeout)
934 {
935 	int fw_loading_reg;
936 
937 	if (!timeout)
938 		return 0;
939 
940 	fw_loading_reg = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
941 	/* notify the user only once if PHY FW is still loading */
942 	if (fw_loading_reg)
943 		dev_info(ice_hw_to_dev(hw), "Link initialization is blocked by PHY FW initialization. Link initialization will continue after PHY FW initialization completes.\n");
944 	else
945 		return 0;
946 
947 	return rd32_poll_timeout(hw, GL_MNG_FWSM, fw_loading_reg,
948 				 !(fw_loading_reg & GL_MNG_FWSM_FW_LOADING_M),
949 				 10000, timeout * 1000);
950 }
951 
952 static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
953 			    u16 size)
954 {
955 	struct ice_hw *hw = priv;
956 
957 	return ice_aq_send_cmd(hw, desc, buf, size, NULL);
958 }
959 
960 static int __fwlog_init(struct ice_hw *hw)
961 {
962 	struct ice_pf *pf = hw->back;
963 	struct libie_fwlog_api api = {
964 		.pdev = pf->pdev,
965 		.send_cmd = __fwlog_send_cmd,
966 		.priv = hw,
967 	};
968 	int err;
969 
970 	/* only support fw log commands on PF 0 */
971 	if (hw->bus.func)
972 		return -EINVAL;
973 
974 	err = ice_debugfs_pf_init(pf);
975 	if (err)
976 		return err;
977 
978 	api.debugfs_root = pf->ice_debugfs_pf;
979 
980 	return libie_fwlog_init(&hw->fwlog, &api);
981 }
982 
983 /**
984  * ice_init_hw - main hardware initialization routine
985  * @hw: pointer to the hardware structure
986  */
987 int ice_init_hw(struct ice_hw *hw)
988 {
989 	struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
990 	void *mac_buf __free(kfree) = NULL;
991 	u16 mac_buf_len;
992 	int status;
993 
994 	/* Set MAC type based on DeviceID */
995 	status = ice_set_mac_type(hw);
996 	if (status)
997 		return status;
998 
999 	hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
1000 
1001 	status = ice_reset(hw, ICE_RESET_PFR);
1002 	if (status)
1003 		return status;
1004 
1005 	ice_get_itr_intrl_gran(hw);
1006 
1007 	status = ice_create_all_ctrlq(hw);
1008 	if (status)
1009 		goto err_unroll_cqinit;
1010 
1011 	status = __fwlog_init(hw);
1012 	if (status)
1013 		ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
1014 			  status);
1015 
1016 	status = ice_clear_pf_cfg(hw);
1017 	if (status)
1018 		goto err_unroll_cqinit;
1019 
1020 	/* Set bit to enable Flow Director filters */
1021 	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
1022 	INIT_LIST_HEAD(&hw->fdir_list_head);
1023 
1024 	ice_clear_pxe_mode(hw);
1025 
1026 	status = ice_init_nvm(hw);
1027 	if (status)
1028 		goto err_unroll_cqinit;
1029 
1030 	status = ice_get_caps(hw);
1031 	if (status)
1032 		goto err_unroll_cqinit;
1033 
1034 	if (!hw->port_info)
1035 		hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
1036 					     sizeof(*hw->port_info),
1037 					     GFP_KERNEL);
1038 	if (!hw->port_info) {
1039 		status = -ENOMEM;
1040 		goto err_unroll_cqinit;
1041 	}
1042 
1043 	hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
1044 	/* set the back pointer to HW */
1045 	hw->port_info->hw = hw;
1046 
1047 	/* Initialize port_info struct with switch configuration data */
1048 	status = ice_get_initial_sw_cfg(hw);
1049 	if (status)
1050 		goto err_unroll_alloc;
1051 
1052 	hw->evb_veb = true;
1053 
1054 	/* init xarray for identifying scheduling nodes uniquely */
1055 	xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
1056 
1057 	/* Query the allocated resources for Tx scheduler */
1058 	status = ice_sched_query_res_alloc(hw);
1059 	if (status) {
1060 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1061 		goto err_unroll_alloc;
1062 	}
1063 	ice_sched_get_psm_clk_freq(hw);
1064 
1065 	/* Initialize port_info struct with scheduler data */
1066 	status = ice_sched_init_port(hw->port_info);
1067 	if (status)
1068 		goto err_unroll_sched;
1069 
1070 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1071 	if (!pcaps) {
1072 		status = -ENOMEM;
1073 		goto err_unroll_sched;
1074 	}
1075 
1076 	/* Initialize port_info struct with PHY capabilities */
1077 	status = ice_aq_get_phy_caps(hw->port_info, false,
1078 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1079 				     NULL);
1080 	if (status)
1081 		dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
1082 			 status);
1083 
1084 	/* Initialize port_info struct with link information */
1085 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1086 	if (status)
1087 		goto err_unroll_sched;
1088 
1089 	/* need a valid SW entry point to build a Tx tree */
1090 	if (!hw->sw_entry_point_layer) {
1091 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1092 		status = -EIO;
1093 		goto err_unroll_sched;
1094 	}
1095 	INIT_LIST_HEAD(&hw->agg_list);
1096 	/* Initialize max burst size */
1097 	if (!hw->max_burst_size)
1098 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1099 
1100 	status = ice_init_fltr_mgmt_struct(hw);
1101 	if (status)
1102 		goto err_unroll_sched;
1103 
1104 	/* Get MAC information */
1105 	/* A single port can report up to two (LAN and WoL) addresses */
1106 	mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
1107 			  GFP_KERNEL);
1108 	if (!mac_buf) {
1109 		status = -ENOMEM;
1110 		goto err_unroll_fltr_mgmt_struct;
1111 	}
1112 
1113 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1114 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1115 
1116 	if (status)
1117 		goto err_unroll_fltr_mgmt_struct;
1118 	/* enable jumbo frame support at MAC level */
1119 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1120 	if (status)
1121 		goto err_unroll_fltr_mgmt_struct;
1122 	/* Obtain counter base index which would be used by flow director */
1123 	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1124 	if (status)
1125 		goto err_unroll_fltr_mgmt_struct;
1126 	status = ice_init_hw_tbls(hw);
1127 	if (status)
1128 		goto err_unroll_fltr_mgmt_struct;
1129 
1130 	ice_init_dev_hw(hw->back);
1131 
1132 	mutex_init(&hw->tnl_lock);
1133 	ice_init_chk_recipe_reuse_support(hw);
1134 
1135 	/* Some cards require longer initialization times
1136 	 * due to necessity of loading FW from an external source.
1137 	 * This can take even half a minute.
1138 	 */
1139 	status = ice_wait_fw_load(hw, 30000);
1140 	if (status) {
1141 		dev_err(ice_hw_to_dev(hw), "ice_wait_fw_load timed out");
1142 		goto err_unroll_fltr_mgmt_struct;
1143 	}
1144 
1145 	hw->lane_num = ice_get_phy_lane_number(hw);
1146 
1147 	return 0;
1148 err_unroll_fltr_mgmt_struct:
1149 	ice_cleanup_fltr_mgmt_struct(hw);
1150 err_unroll_sched:
1151 	ice_sched_cleanup_all(hw);
1152 err_unroll_alloc:
1153 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1154 err_unroll_cqinit:
1155 	ice_destroy_all_ctrlq(hw);
1156 	return status;
1157 }
1158 
1159 static void __fwlog_deinit(struct ice_hw *hw)
1160 {
1161 	/* only support fw log commands on PF 0 */
1162 	if (hw->bus.func)
1163 		return;
1164 
1165 	ice_debugfs_pf_deinit(hw->back);
1166 	libie_fwlog_deinit(&hw->fwlog);
1167 }
1168 
1169 /**
1170  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1171  * @hw: pointer to the hardware structure
1172  *
1173  * This should be called only during nominal operation, not as a result of
1174  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1175  * applicable initializations if it fails for any reason.
1176  */
1177 void ice_deinit_hw(struct ice_hw *hw)
1178 {
1179 	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1180 	ice_cleanup_fltr_mgmt_struct(hw);
1181 
1182 	ice_sched_cleanup_all(hw);
1183 	ice_sched_clear_agg(hw);
1184 	ice_free_seg(hw);
1185 	ice_free_hw_tbls(hw);
1186 	mutex_destroy(&hw->tnl_lock);
1187 	__fwlog_deinit(hw);
1188 	ice_destroy_all_ctrlq(hw);
1189 
1190 	/* Clear VSI contexts if not already cleared */
1191 	ice_clear_all_vsi_ctx(hw);
1192 }
1193 
1194 /**
1195  * ice_check_reset - Check to see if a global reset is complete
1196  * @hw: pointer to the hardware structure
1197  */
1198 int ice_check_reset(struct ice_hw *hw)
1199 {
1200 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1201 
1202 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1203 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1204 	 * Add 1sec for outstanding AQ commands that can take a long time.
1205 	 */
1206 	grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
1207 				 rd32(hw, GLGEN_RSTCTL)) + 10;
1208 
1209 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1210 		mdelay(100);
1211 		reg = rd32(hw, GLGEN_RSTAT);
1212 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1213 			break;
1214 	}
1215 
1216 	if (cnt == grst_timeout) {
1217 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1218 		return -EIO;
1219 	}
1220 
1221 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1222 				 GLNVM_ULD_PCIER_DONE_1_M |\
1223 				 GLNVM_ULD_CORER_DONE_M |\
1224 				 GLNVM_ULD_GLOBR_DONE_M |\
1225 				 GLNVM_ULD_POR_DONE_M |\
1226 				 GLNVM_ULD_POR_DONE_1_M |\
1227 				 GLNVM_ULD_PCIER_DONE_2_M)
1228 
1229 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1230 					  GLNVM_ULD_PE_DONE_M : 0);
1231 
1232 	/* Device is Active; check Global Reset processes are done */
1233 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1234 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1235 		if (reg == uld_mask) {
1236 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1237 			break;
1238 		}
1239 		mdelay(10);
1240 	}
1241 
1242 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1243 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1244 			  reg);
1245 		return -EIO;
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 /**
1252  * ice_pf_reset - Reset the PF
1253  * @hw: pointer to the hardware structure
1254  *
1255  * If a global reset has been triggered, this function checks
1256  * for its completion and then issues the PF reset
1257  */
1258 static int ice_pf_reset(struct ice_hw *hw)
1259 {
1260 	u32 cnt, reg;
1261 
1262 	/* If at function entry a global reset was already in progress, i.e.
1263 	 * state is not 'device active' or any of the reset done bits are not
1264 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1265 	 * global reset is done.
1266 	 */
1267 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1268 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1269 		/* poll on global reset currently in progress until done */
1270 		if (ice_check_reset(hw))
1271 			return -EIO;
1272 
1273 		return 0;
1274 	}
1275 
1276 	/* Reset the PF */
1277 	reg = rd32(hw, PFGEN_CTRL);
1278 
1279 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1280 
1281 	/* Wait for the PFR to complete. The wait time is the global config lock
1282 	 * timeout plus the PFR timeout which will account for a possible reset
1283 	 * that is occurring during a download package operation.
1284 	 */
1285 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1286 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1287 		reg = rd32(hw, PFGEN_CTRL);
1288 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1289 			break;
1290 
1291 		mdelay(1);
1292 	}
1293 
1294 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1295 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1296 		return -EIO;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 /**
1303  * ice_reset - Perform different types of reset
1304  * @hw: pointer to the hardware structure
1305  * @req: reset request
1306  *
1307  * This function triggers a reset as specified by the req parameter.
1308  *
1309  * Note:
1310  * If anything other than a PF reset is triggered, PXE mode is restored.
1311  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1312  * interface has been restored in the rebuild flow.
1313  */
1314 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1315 {
1316 	u32 val = 0;
1317 
1318 	switch (req) {
1319 	case ICE_RESET_PFR:
1320 		return ice_pf_reset(hw);
1321 	case ICE_RESET_CORER:
1322 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1323 		val = GLGEN_RTRIG_CORER_M;
1324 		break;
1325 	case ICE_RESET_GLOBR:
1326 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1327 		val = GLGEN_RTRIG_GLOBR_M;
1328 		break;
1329 	default:
1330 		return -EINVAL;
1331 	}
1332 
1333 	val |= rd32(hw, GLGEN_RTRIG);
1334 	wr32(hw, GLGEN_RTRIG, val);
1335 	ice_flush(hw);
1336 
1337 	/* wait for the FW to be ready */
1338 	return ice_check_reset(hw);
1339 }
1340 
1341 /**
1342  * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
1343  * @hw: pointer to the hardware structure
1344  * @rxq_ctx: pointer to the packed Rx queue context
1345  * @rxq_index: the index of the Rx queue
1346  */
1347 static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
1348 				   const ice_rxq_ctx_buf_t *rxq_ctx,
1349 				   u32 rxq_index)
1350 {
1351 	/* Copy each dword separately to HW */
1352 	for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1353 		u32 ctx = ((const u32 *)rxq_ctx)[i];
1354 
1355 		wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
1356 
1357 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
1358 	}
1359 }
1360 
1361 /**
1362  * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
1363  * @hw: pointer to the hardware structure
1364  * @rxq_ctx: pointer to the packed Rx queue context
1365  * @rxq_index: the index of the Rx queue
1366  */
1367 static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
1368 				     ice_rxq_ctx_buf_t *rxq_ctx,
1369 				     u32 rxq_index)
1370 {
1371 	u32 *ctx = (u32 *)rxq_ctx;
1372 
1373 	/* Copy each dword separately from HW */
1374 	for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
1375 		*ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1376 
1377 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1378 	}
1379 }
1380 
1381 #define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
1382 	PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
1383 
1384 /* LAN Rx Queue Context */
1385 static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
1386 				 /* Field		Width	LSB */
1387 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1388 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1389 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1390 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1391 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1392 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1393 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1394 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1395 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1396 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1397 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1398 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1399 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1400 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1401 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1402 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1403 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1404 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1405 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1406 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1407 };
1408 
1409 /**
1410  * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
1411  * @ctx: the Rx queue context to pack
1412  * @buf: the HW buffer to pack into
1413  *
1414  * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
1415  * bit-packed HW layout.
1416  */
1417 static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
1418 			     ice_rxq_ctx_buf_t *buf)
1419 {
1420 	pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
1421 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1422 }
1423 
1424 /**
1425  * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
1426  * @buf: the HW buffer to unpack from
1427  * @ctx: the Rx queue context to unpack
1428  *
1429  * Unpack the Rx queue context from the HW buffer into the CPU-friendly
1430  * structure.
1431  */
1432 static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
1433 			       struct ice_rlan_ctx *ctx)
1434 {
1435 	unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
1436 		      QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1437 }
1438 
1439 /**
1440  * ice_write_rxq_ctx - Write Rx Queue context to hardware
1441  * @hw: pointer to the hardware structure
1442  * @rlan_ctx: pointer to the unpacked Rx queue context
1443  * @rxq_index: the index of the Rx queue
1444  *
1445  * Pack the sparse Rx Queue context into dense hardware format and write it
1446  * into the HW register space.
1447  *
1448  * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
1449  */
1450 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1451 		      u32 rxq_index)
1452 {
1453 	ice_rxq_ctx_buf_t buf = {};
1454 
1455 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1456 		return -EINVAL;
1457 
1458 	ice_pack_rxq_ctx(rlan_ctx, &buf);
1459 	ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
1460 
1461 	return 0;
1462 }
1463 
1464 /**
1465  * ice_read_rxq_ctx - Read Rx queue context from HW
1466  * @hw: pointer to the hardware structure
1467  * @rlan_ctx: pointer to the Rx queue context
1468  * @rxq_index: the index of the Rx queue
1469  *
1470  * Read the Rx queue context from the hardware registers, and unpack it into
1471  * the sparse Rx queue context structure.
1472  *
1473  * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
1474  */
1475 int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1476 		     u32 rxq_index)
1477 {
1478 	ice_rxq_ctx_buf_t buf = {};
1479 
1480 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1481 		return -EINVAL;
1482 
1483 	ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
1484 	ice_unpack_rxq_ctx(&buf, rlan_ctx);
1485 
1486 	return 0;
1487 }
1488 
1489 /* LAN Tx Queue Context */
1490 static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
1491 				    /* Field			Width	LSB */
1492 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1493 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1494 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1495 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1496 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1497 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1498 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1499 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1500 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1501 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1502 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1503 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1504 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1505 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1506 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1507 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1508 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1509 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1510 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1511 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1512 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1513 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1514 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1515 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1516 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1517 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1518 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1519 };
1520 
1521 /**
1522  * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
1523  * @ctx: the Tx queue context to pack
1524  * @buf: the Admin Queue HW buffer to pack into
1525  *
1526  * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
1527  * bit-packed Admin Queue layout.
1528  */
1529 void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
1530 {
1531 	pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
1532 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1533 }
1534 
1535 /**
1536  * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
1537  * @ctx: the Tx queue context to pack
1538  * @buf: the HW buffer to pack into
1539  *
1540  * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
1541  * bit-packed HW layout, including the internal data portion.
1542  */
1543 static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
1544 				  ice_txq_ctx_buf_full_t *buf)
1545 {
1546 	pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
1547 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1548 }
1549 
1550 /**
1551  * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
1552  * @buf: the HW buffer to unpack from
1553  * @ctx: the Tx queue context to unpack
1554  *
1555  * Unpack the Tx queue context from the HW buffer (including the full internal
1556  * state) into the CPU-friendly structure.
1557  */
1558 static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
1559 				    struct ice_tlan_ctx *ctx)
1560 {
1561 	unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
1562 		      QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1563 }
1564 
1565 /**
1566  * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
1567  * @hw: pointer to the hardware structure
1568  * @txq_ctx: pointer to the packed Tx queue context, including internal state
1569  * @txq_index: the index of the Tx queue
1570  *
1571  * Copy Tx Queue context from HW register space to dense structure
1572  */
1573 static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
1574 				     ice_txq_ctx_buf_full_t *txq_ctx,
1575 				     u32 txq_index)
1576 {
1577 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
1578 	u32 *ctx = (u32 *)txq_ctx;
1579 	u32 txq_base, reg;
1580 
1581 	/* Get Tx queue base within card space */
1582 	txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
1583 	txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
1584 
1585 	reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
1586 			 GLCOMM_QTX_CNTX_CTL_CMD_READ) |
1587 	      FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
1588 			 txq_base + txq_index) |
1589 	      GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
1590 
1591 	/* Prevent other PFs on the same adapter from accessing the Tx queue
1592 	 * context interface concurrently.
1593 	 */
1594 	spin_lock(&pf->adapter->txq_ctx_lock);
1595 
1596 	wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
1597 	ice_flush(hw);
1598 
1599 	/* Copy each dword separately from HW */
1600 	for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
1601 		*ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
1602 
1603 		ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
1604 	}
1605 
1606 	spin_unlock(&pf->adapter->txq_ctx_lock);
1607 }
1608 
1609 /**
1610  * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
1611  * @hw: pointer to the hardware structure
1612  * @txq_ctx: pointer to the packed Tx queue context, including internal state
1613  * @txq_index: the index of the Tx queue
1614  */
1615 static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
1616 				   const ice_txq_ctx_buf_full_t *txq_ctx,
1617 				   u32 txq_index)
1618 {
1619 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
1620 	u32 txq_base, reg;
1621 
1622 	/* Get Tx queue base within card space */
1623 	txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
1624 	txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
1625 
1626 	reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
1627 			 GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
1628 	      FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
1629 			 txq_base + txq_index) |
1630 	      GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
1631 
1632 	/* Prevent other PFs on the same adapter from accessing the Tx queue
1633 	 * context interface concurrently.
1634 	 */
1635 	spin_lock(&pf->adapter->txq_ctx_lock);
1636 
1637 	/* Copy each dword separately to HW */
1638 	for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
1639 		u32 ctx = ((const u32 *)txq_ctx)[i];
1640 
1641 		wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
1642 
1643 		ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
1644 	}
1645 
1646 	wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
1647 	ice_flush(hw);
1648 
1649 	spin_unlock(&pf->adapter->txq_ctx_lock);
1650 }
1651 
1652 /**
1653  * ice_read_txq_ctx - Read Tx queue context from HW
1654  * @hw: pointer to the hardware structure
1655  * @tlan_ctx: pointer to the Tx queue context
1656  * @txq_index: the index of the Tx queue
1657  *
1658  * Read the Tx queue context from the HW registers, then unpack it into the
1659  * ice_tlan_ctx structure for use.
1660  *
1661  * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
1662  */
1663 int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
1664 		     u32 txq_index)
1665 {
1666 	ice_txq_ctx_buf_full_t buf = {};
1667 
1668 	if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
1669 		return -EINVAL;
1670 
1671 	ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
1672 	ice_unpack_txq_ctx_full(&buf, tlan_ctx);
1673 
1674 	return 0;
1675 }
1676 
1677 /**
1678  * ice_write_txq_ctx - Write Tx queue context to HW
1679  * @hw: pointer to the hardware structure
1680  * @tlan_ctx: pointer to the Tx queue context
1681  * @txq_index: the index of the Tx queue
1682  *
1683  * Pack the Tx queue context into the dense HW layout, then write it into the
1684  * HW registers.
1685  *
1686  * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
1687  */
1688 int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
1689 		      u32 txq_index)
1690 {
1691 	ice_txq_ctx_buf_full_t buf = {};
1692 
1693 	if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
1694 		return -EINVAL;
1695 
1696 	ice_pack_txq_ctx_full(tlan_ctx, &buf);
1697 	ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
1698 
1699 	return 0;
1700 }
1701 
1702 /* Tx time Queue Context */
1703 static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
1704 				    /* Field			Width	LSB */
1705 	ICE_CTX_STORE(ice_txtime_ctx, base,			57,	0),
1706 	ICE_CTX_STORE(ice_txtime_ctx, pf_num,			3,	57),
1707 	ICE_CTX_STORE(ice_txtime_ctx, vmvf_num,			10,	60),
1708 	ICE_CTX_STORE(ice_txtime_ctx, vmvf_type,		2,	70),
1709 	ICE_CTX_STORE(ice_txtime_ctx, src_vsi,			10,	72),
1710 	ICE_CTX_STORE(ice_txtime_ctx, cpuid,			8,	82),
1711 	ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc,		1,	90),
1712 	ICE_CTX_STORE(ice_txtime_ctx, qlen,			13,	91),
1713 	ICE_CTX_STORE(ice_txtime_ctx, timer_num,		1,	104),
1714 	ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q,		1,	105),
1715 	ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32,		1,	106),
1716 	ICE_CTX_STORE(ice_txtime_ctx, ts_res,			4,	107),
1717 	ICE_CTX_STORE(ice_txtime_ctx, ts_round_type,		2,	111),
1718 	ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot,		3,	113),
1719 	ICE_CTX_STORE(ice_txtime_ctx, merging_ena,		1,	116),
1720 	ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id,		4,	117),
1721 	ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4,	121),
1722 	ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode,	1,	125),
1723 };
1724 
1725 /**
1726  * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
1727  * @ctx: the Tx time queue context to pack
1728  * @buf: the HW buffer to pack into
1729  *
1730  * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
1731  * its bit-packed HW layout.
1732  */
1733 void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
1734 			 ice_txtime_ctx_buf_t *buf)
1735 {
1736 	pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
1737 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
1738 }
1739 
1740 /* Sideband Queue command wrappers */
1741 
1742 /**
1743  * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1744  * @hw: pointer to the HW struct
1745  * @desc: descriptor describing the command
1746  * @buf: buffer to use for indirect commands (NULL for direct commands)
1747  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1748  * @cd: pointer to command details structure
1749  */
1750 static int
1751 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1752 		 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1753 {
1754 	return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1755 			       (struct libie_aq_desc *)desc, buf, buf_size, cd);
1756 }
1757 
1758 /**
1759  * ice_sbq_rw_reg - Fill Sideband Queue command
1760  * @hw: pointer to the HW struct
1761  * @in: message info to be filled in descriptor
1762  * @flags: control queue descriptor flags
1763  */
1764 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
1765 {
1766 	struct ice_sbq_cmd_desc desc = {0};
1767 	struct ice_sbq_msg_req msg = {0};
1768 	u16 msg_len;
1769 	int status;
1770 
1771 	msg_len = sizeof(msg);
1772 
1773 	msg.dest_dev = in->dest_dev;
1774 	msg.opcode = in->opcode;
1775 	msg.flags = ICE_SBQ_MSG_FLAGS;
1776 	msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1777 	msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1778 	msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1779 
1780 	if (in->opcode)
1781 		msg.data = cpu_to_le32(in->data);
1782 	else
1783 		/* data read comes back in completion, so shorten the struct by
1784 		 * sizeof(msg.data)
1785 		 */
1786 		msg_len -= sizeof(msg.data);
1787 
1788 	desc.flags = cpu_to_le16(flags);
1789 	desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1790 	desc.param0.cmd_len = cpu_to_le16(msg_len);
1791 	status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1792 	if (!status && !in->opcode)
1793 		in->data = le32_to_cpu
1794 			(((struct ice_sbq_msg_cmpl *)&msg)->data);
1795 	return status;
1796 }
1797 
1798 /* FW Admin Queue command wrappers */
1799 
1800 /* Software lock/mutex that is meant to be held while the Global Config Lock
1801  * in firmware is acquired by the software to prevent most (but not all) types
1802  * of AQ commands from being sent to FW
1803  */
1804 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1805 
1806 /**
1807  * ice_should_retry_sq_send_cmd
1808  * @opcode: AQ opcode
1809  *
1810  * Decide if we should retry the send command routine for the ATQ, depending
1811  * on the opcode.
1812  */
1813 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1814 {
1815 	switch (opcode) {
1816 	case ice_aqc_opc_get_link_topo:
1817 	case ice_aqc_opc_lldp_stop:
1818 	case ice_aqc_opc_lldp_start:
1819 	case ice_aqc_opc_lldp_filter_ctrl:
1820 		return true;
1821 	}
1822 
1823 	return false;
1824 }
1825 
1826 /**
1827  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1828  * @hw: pointer to the HW struct
1829  * @cq: pointer to the specific Control queue
1830  * @desc: prefilled descriptor describing the command
1831  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1832  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1833  * @cd: pointer to command details structure
1834  *
1835  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1836  * Queue if the EBUSY AQ error is returned.
1837  */
1838 static int
1839 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1840 		      struct libie_aq_desc *desc, void *buf, u16 buf_size,
1841 		      struct ice_sq_cd *cd)
1842 {
1843 	struct libie_aq_desc desc_cpy;
1844 	bool is_cmd_for_retry;
1845 	u8 idx = 0;
1846 	u16 opcode;
1847 	int status;
1848 
1849 	opcode = le16_to_cpu(desc->opcode);
1850 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1851 	memset(&desc_cpy, 0, sizeof(desc_cpy));
1852 
1853 	if (is_cmd_for_retry) {
1854 		/* All retryable cmds are direct, without buf. */
1855 		WARN_ON(buf);
1856 
1857 		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1858 	}
1859 
1860 	do {
1861 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1862 
1863 		if (!is_cmd_for_retry || !status ||
1864 		    hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
1865 			break;
1866 
1867 		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1868 
1869 		msleep(ICE_SQ_SEND_DELAY_TIME_MS);
1870 
1871 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1872 
1873 	return status;
1874 }
1875 
1876 /**
1877  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1878  * @hw: pointer to the HW struct
1879  * @desc: descriptor describing the command
1880  * @buf: buffer to use for indirect commands (NULL for direct commands)
1881  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1882  * @cd: pointer to command details structure
1883  *
1884  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1885  */
1886 int
1887 ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
1888 		u16 buf_size, struct ice_sq_cd *cd)
1889 {
1890 	struct libie_aqc_req_res *cmd = libie_aq_raw(desc);
1891 	bool lock_acquired = false;
1892 	int status;
1893 
1894 	/* When a package download is in process (i.e. when the firmware's
1895 	 * Global Configuration Lock resource is held), only the Download
1896 	 * Package, Get Version, Get Package Info List, Upload Section,
1897 	 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
1898 	 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
1899 	 * Recipes to Profile Association, and Release Resource (with resource
1900 	 * ID set to Global Config Lock) AdminQ commands are allowed; all others
1901 	 * must block until the package download completes and the Global Config
1902 	 * Lock is released.  See also ice_acquire_global_cfg_lock().
1903 	 */
1904 	switch (le16_to_cpu(desc->opcode)) {
1905 	case ice_aqc_opc_download_pkg:
1906 	case ice_aqc_opc_get_pkg_info_list:
1907 	case ice_aqc_opc_get_ver:
1908 	case ice_aqc_opc_upload_section:
1909 	case ice_aqc_opc_update_pkg:
1910 	case ice_aqc_opc_set_port_params:
1911 	case ice_aqc_opc_get_vlan_mode_parameters:
1912 	case ice_aqc_opc_set_vlan_mode_parameters:
1913 	case ice_aqc_opc_set_tx_topo:
1914 	case ice_aqc_opc_get_tx_topo:
1915 	case ice_aqc_opc_add_recipe:
1916 	case ice_aqc_opc_recipe_to_profile:
1917 	case ice_aqc_opc_get_recipe:
1918 	case ice_aqc_opc_get_recipe_to_profile:
1919 		break;
1920 	case ice_aqc_opc_release_res:
1921 		if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK)
1922 			break;
1923 		fallthrough;
1924 	default:
1925 		mutex_lock(&ice_global_cfg_lock_sw);
1926 		lock_acquired = true;
1927 		break;
1928 	}
1929 
1930 	status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1931 	if (lock_acquired)
1932 		mutex_unlock(&ice_global_cfg_lock_sw);
1933 
1934 	return status;
1935 }
1936 
1937 /**
1938  * ice_aq_get_fw_ver
1939  * @hw: pointer to the HW struct
1940  * @cd: pointer to command details structure or NULL
1941  *
1942  * Get the firmware version (0x0001) from the admin queue commands
1943  */
1944 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1945 {
1946 	struct libie_aqc_get_ver *resp;
1947 	struct libie_aq_desc desc;
1948 	int status;
1949 
1950 	resp = &desc.params.get_ver;
1951 
1952 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1953 
1954 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1955 
1956 	if (!status) {
1957 		hw->fw_branch = resp->fw_branch;
1958 		hw->fw_maj_ver = resp->fw_major;
1959 		hw->fw_min_ver = resp->fw_minor;
1960 		hw->fw_patch = resp->fw_patch;
1961 		hw->fw_build = le32_to_cpu(resp->fw_build);
1962 		hw->api_branch = resp->api_branch;
1963 		hw->api_maj_ver = resp->api_major;
1964 		hw->api_min_ver = resp->api_minor;
1965 		hw->api_patch = resp->api_patch;
1966 	}
1967 
1968 	return status;
1969 }
1970 
1971 /**
1972  * ice_aq_send_driver_ver
1973  * @hw: pointer to the HW struct
1974  * @dv: driver's major, minor version
1975  * @cd: pointer to command details structure or NULL
1976  *
1977  * Send the driver version (0x0002) to the firmware
1978  */
1979 int
1980 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1981 		       struct ice_sq_cd *cd)
1982 {
1983 	struct libie_aqc_driver_ver *cmd;
1984 	struct libie_aq_desc desc;
1985 	u16 len;
1986 
1987 	cmd = &desc.params.driver_ver;
1988 
1989 	if (!dv)
1990 		return -EINVAL;
1991 
1992 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1993 
1994 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
1995 	cmd->major_ver = dv->major_ver;
1996 	cmd->minor_ver = dv->minor_ver;
1997 	cmd->build_ver = dv->build_ver;
1998 	cmd->subbuild_ver = dv->subbuild_ver;
1999 
2000 	len = 0;
2001 	while (len < sizeof(dv->driver_string) &&
2002 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
2003 		len++;
2004 
2005 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
2006 }
2007 
2008 /**
2009  * ice_aq_q_shutdown
2010  * @hw: pointer to the HW struct
2011  * @unloading: is the driver unloading itself
2012  *
2013  * Tell the Firmware that we're shutting down the AdminQ and whether
2014  * or not the driver is unloading as well (0x0003).
2015  */
2016 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
2017 {
2018 	struct ice_aqc_q_shutdown *cmd;
2019 	struct libie_aq_desc desc;
2020 
2021 	cmd = libie_aq_raw(&desc);
2022 
2023 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
2024 
2025 	if (unloading)
2026 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
2027 
2028 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2029 }
2030 
2031 /**
2032  * ice_aq_req_res
2033  * @hw: pointer to the HW struct
2034  * @res: resource ID
2035  * @access: access type
2036  * @sdp_number: resource number
2037  * @timeout: the maximum time in ms that the driver may hold the resource
2038  * @cd: pointer to command details structure or NULL
2039  *
2040  * Requests common resource using the admin queue commands (0x0008).
2041  * When attempting to acquire the Global Config Lock, the driver can
2042  * learn of three states:
2043  *  1) 0 -         acquired lock, and can perform download package
2044  *  2) -EIO -      did not get lock, driver should fail to load
2045  *  3) -EALREADY - did not get lock, but another driver has
2046  *                 successfully downloaded the package; the driver does
2047  *                 not have to download the package and can continue
2048  *                 loading
2049  *
2050  * Note that if the caller is in an acquire lock, perform action, release lock
2051  * phase of operation, it is possible that the FW may detect a timeout and issue
2052  * a CORER. In this case, the driver will receive a CORER interrupt and will
2053  * have to determine its cause. The calling thread that is handling this flow
2054  * will likely get an error propagated back to it indicating the Download
2055  * Package, Update Package or the Release Resource AQ commands timed out.
2056  */
2057 static int
2058 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2059 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
2060 	       struct ice_sq_cd *cd)
2061 {
2062 	struct libie_aqc_req_res *cmd_resp;
2063 	struct libie_aq_desc desc;
2064 	int status;
2065 
2066 	cmd_resp = &desc.params.res_owner;
2067 
2068 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
2069 
2070 	cmd_resp->res_id = cpu_to_le16(res);
2071 	cmd_resp->access_type = cpu_to_le16(access);
2072 	cmd_resp->res_number = cpu_to_le32(sdp_number);
2073 	cmd_resp->timeout = cpu_to_le32(*timeout);
2074 	*timeout = 0;
2075 
2076 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2077 
2078 	/* The completion specifies the maximum time in ms that the driver
2079 	 * may hold the resource in the Timeout field.
2080 	 */
2081 
2082 	/* Global config lock response utilizes an additional status field.
2083 	 *
2084 	 * If the Global config lock resource is held by some other driver, the
2085 	 * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field
2086 	 * and the timeout field indicates the maximum time the current owner
2087 	 * of the resource has to free it.
2088 	 */
2089 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
2090 		if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
2091 			*timeout = le32_to_cpu(cmd_resp->timeout);
2092 			return 0;
2093 		} else if (le16_to_cpu(cmd_resp->status) ==
2094 			   LIBIE_AQ_RES_GLBL_IN_PROG) {
2095 			*timeout = le32_to_cpu(cmd_resp->timeout);
2096 			return -EIO;
2097 		} else if (le16_to_cpu(cmd_resp->status) ==
2098 			   LIBIE_AQ_RES_GLBL_DONE) {
2099 			return -EALREADY;
2100 		}
2101 
2102 		/* invalid FW response, force a timeout immediately */
2103 		*timeout = 0;
2104 		return -EIO;
2105 	}
2106 
2107 	/* If the resource is held by some other driver, the command completes
2108 	 * with a busy return value and the timeout field indicates the maximum
2109 	 * time the current owner of the resource has to free it.
2110 	 */
2111 	if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
2112 		*timeout = le32_to_cpu(cmd_resp->timeout);
2113 
2114 	return status;
2115 }
2116 
2117 /**
2118  * ice_aq_release_res
2119  * @hw: pointer to the HW struct
2120  * @res: resource ID
2121  * @sdp_number: resource number
2122  * @cd: pointer to command details structure or NULL
2123  *
2124  * release common resource using the admin queue commands (0x0009)
2125  */
2126 static int
2127 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
2128 		   struct ice_sq_cd *cd)
2129 {
2130 	struct libie_aqc_req_res *cmd;
2131 	struct libie_aq_desc desc;
2132 
2133 	cmd = &desc.params.res_owner;
2134 
2135 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
2136 
2137 	cmd->res_id = cpu_to_le16(res);
2138 	cmd->res_number = cpu_to_le32(sdp_number);
2139 
2140 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2141 }
2142 
2143 /**
2144  * ice_acquire_res
2145  * @hw: pointer to the HW structure
2146  * @res: resource ID
2147  * @access: access type (read or write)
2148  * @timeout: timeout in milliseconds
2149  *
2150  * This function will attempt to acquire the ownership of a resource.
2151  */
2152 int
2153 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2154 		enum ice_aq_res_access_type access, u32 timeout)
2155 {
2156 #define ICE_RES_POLLING_DELAY_MS	10
2157 	u32 delay = ICE_RES_POLLING_DELAY_MS;
2158 	u32 time_left = timeout;
2159 	int status;
2160 
2161 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2162 
2163 	/* A return code of -EALREADY means that another driver has
2164 	 * previously acquired the resource and performed any necessary updates;
2165 	 * in this case the caller does not obtain the resource and has no
2166 	 * further work to do.
2167 	 */
2168 	if (status == -EALREADY)
2169 		goto ice_acquire_res_exit;
2170 
2171 	if (status)
2172 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2173 
2174 	/* If necessary, poll until the current lock owner timeouts */
2175 	timeout = time_left;
2176 	while (status && timeout && time_left) {
2177 		mdelay(delay);
2178 		timeout = (timeout > delay) ? timeout - delay : 0;
2179 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2180 
2181 		if (status == -EALREADY)
2182 			/* lock free, but no work to do */
2183 			break;
2184 
2185 		if (!status)
2186 			/* lock acquired */
2187 			break;
2188 	}
2189 	if (status && status != -EALREADY)
2190 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2191 
2192 ice_acquire_res_exit:
2193 	if (status == -EALREADY) {
2194 		if (access == ICE_RES_WRITE)
2195 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2196 		else
2197 			ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
2198 	}
2199 	return status;
2200 }
2201 
2202 /**
2203  * ice_release_res
2204  * @hw: pointer to the HW structure
2205  * @res: resource ID
2206  *
2207  * This function will release a resource using the proper Admin Command.
2208  */
2209 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2210 {
2211 	unsigned long timeout;
2212 	int status;
2213 
2214 	/* there are some rare cases when trying to release the resource
2215 	 * results in an admin queue timeout, so handle them correctly
2216 	 */
2217 	timeout = jiffies + 10 * usecs_to_jiffies(ICE_CTL_Q_SQ_CMD_TIMEOUT);
2218 	do {
2219 		status = ice_aq_release_res(hw, res, 0, NULL);
2220 		if (status != -EIO)
2221 			break;
2222 		usleep_range(1000, 2000);
2223 	} while (time_before(jiffies, timeout));
2224 }
2225 
2226 /**
2227  * ice_aq_alloc_free_res - command to allocate/free resources
2228  * @hw: pointer to the HW struct
2229  * @buf: Indirect buffer to hold data parameters and response
2230  * @buf_size: size of buffer for indirect commands
2231  * @opc: pass in the command opcode
2232  *
2233  * Helper function to allocate/free resources using the admin queue commands
2234  */
2235 int ice_aq_alloc_free_res(struct ice_hw *hw,
2236 			  struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2237 			  enum ice_adminq_opc opc)
2238 {
2239 	struct ice_aqc_alloc_free_res_cmd *cmd;
2240 	struct libie_aq_desc desc;
2241 
2242 	cmd = libie_aq_raw(&desc);
2243 
2244 	if (!buf || buf_size < flex_array_size(buf, elem, 1))
2245 		return -EINVAL;
2246 
2247 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2248 
2249 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
2250 
2251 	cmd->num_entries = cpu_to_le16(1);
2252 
2253 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
2254 }
2255 
2256 /**
2257  * ice_alloc_hw_res - allocate resource
2258  * @hw: pointer to the HW struct
2259  * @type: type of resource
2260  * @num: number of resources to allocate
2261  * @btm: allocate from bottom
2262  * @res: pointer to array that will receive the resources
2263  */
2264 int
2265 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2266 {
2267 	struct ice_aqc_alloc_free_res_elem *buf;
2268 	u16 buf_len;
2269 	int status;
2270 
2271 	buf_len = struct_size(buf, elem, num);
2272 	buf = kzalloc(buf_len, GFP_KERNEL);
2273 	if (!buf)
2274 		return -ENOMEM;
2275 
2276 	/* Prepare buffer to allocate resource. */
2277 	buf->num_elems = cpu_to_le16(num);
2278 	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2279 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2280 	if (btm)
2281 		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2282 
2283 	status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
2284 	if (status)
2285 		goto ice_alloc_res_exit;
2286 
2287 	memcpy(res, buf->elem, sizeof(*buf->elem) * num);
2288 
2289 ice_alloc_res_exit:
2290 	kfree(buf);
2291 	return status;
2292 }
2293 
2294 /**
2295  * ice_free_hw_res - free allocated HW resource
2296  * @hw: pointer to the HW struct
2297  * @type: type of resource to free
2298  * @num: number of resources
2299  * @res: pointer to array that contains the resources to free
2300  */
2301 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2302 {
2303 	struct ice_aqc_alloc_free_res_elem *buf;
2304 	u16 buf_len;
2305 	int status;
2306 
2307 	buf_len = struct_size(buf, elem, num);
2308 	buf = kzalloc(buf_len, GFP_KERNEL);
2309 	if (!buf)
2310 		return -ENOMEM;
2311 
2312 	/* Prepare buffer to free resource. */
2313 	buf->num_elems = cpu_to_le16(num);
2314 	buf->res_type = cpu_to_le16(type);
2315 	memcpy(buf->elem, res, sizeof(*buf->elem) * num);
2316 
2317 	status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
2318 	if (status)
2319 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2320 
2321 	kfree(buf);
2322 	return status;
2323 }
2324 
2325 /**
2326  * ice_get_num_per_func - determine number of resources per PF
2327  * @hw: pointer to the HW structure
2328  * @max: value to be evenly split between each PF
2329  *
2330  * Determine the number of valid functions by going through the bitmap returned
2331  * from parsing capabilities and use this to calculate the number of resources
2332  * per PF based on the max value passed in.
2333  */
2334 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2335 {
2336 	u8 funcs;
2337 
2338 #define ICE_CAPS_VALID_FUNCS_M	0xFF
2339 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
2340 			 ICE_CAPS_VALID_FUNCS_M);
2341 
2342 	if (!funcs)
2343 		return 0;
2344 
2345 	return max / funcs;
2346 }
2347 
2348 /**
2349  * ice_parse_common_caps - parse common device/function capabilities
2350  * @hw: pointer to the HW struct
2351  * @caps: pointer to common capabilities structure
2352  * @elem: the capability element to parse
2353  * @prefix: message prefix for tracing capabilities
2354  *
2355  * Given a capability element, extract relevant details into the common
2356  * capability structure.
2357  *
2358  * Returns: true if the capability matches one of the common capability ids,
2359  * false otherwise.
2360  */
2361 static bool
2362 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2363 		      struct libie_aqc_list_caps_elem *elem, const char *prefix)
2364 {
2365 	u32 logical_id = le32_to_cpu(elem->logical_id);
2366 	u32 phys_id = le32_to_cpu(elem->phys_id);
2367 	u32 number = le32_to_cpu(elem->number);
2368 	u16 cap = le16_to_cpu(elem->cap);
2369 	bool found = true;
2370 
2371 	switch (cap) {
2372 	case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
2373 		caps->valid_functions = number;
2374 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2375 			  caps->valid_functions);
2376 		break;
2377 	case LIBIE_AQC_CAPS_SRIOV:
2378 		caps->sr_iov_1_1 = (number == 1);
2379 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2380 			  caps->sr_iov_1_1);
2381 		break;
2382 	case LIBIE_AQC_CAPS_DCB:
2383 		caps->dcb = (number == 1);
2384 		caps->active_tc_bitmap = logical_id;
2385 		caps->maxtc = phys_id;
2386 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2387 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2388 			  caps->active_tc_bitmap);
2389 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2390 		break;
2391 	case LIBIE_AQC_CAPS_RSS:
2392 		caps->rss_table_size = number;
2393 		caps->rss_table_entry_width = logical_id;
2394 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2395 			  caps->rss_table_size);
2396 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2397 			  caps->rss_table_entry_width);
2398 		break;
2399 	case LIBIE_AQC_CAPS_RXQS:
2400 		caps->num_rxq = number;
2401 		caps->rxq_first_id = phys_id;
2402 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2403 			  caps->num_rxq);
2404 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2405 			  caps->rxq_first_id);
2406 		break;
2407 	case LIBIE_AQC_CAPS_TXQS:
2408 		caps->num_txq = number;
2409 		caps->txq_first_id = phys_id;
2410 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2411 			  caps->num_txq);
2412 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2413 			  caps->txq_first_id);
2414 		break;
2415 	case LIBIE_AQC_CAPS_MSIX:
2416 		caps->num_msix_vectors = number;
2417 		caps->msix_vector_first_id = phys_id;
2418 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2419 			  caps->num_msix_vectors);
2420 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2421 			  caps->msix_vector_first_id);
2422 		break;
2423 	case LIBIE_AQC_CAPS_PENDING_NVM_VER:
2424 		caps->nvm_update_pending_nvm = true;
2425 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2426 		break;
2427 	case LIBIE_AQC_CAPS_PENDING_OROM_VER:
2428 		caps->nvm_update_pending_orom = true;
2429 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2430 		break;
2431 	case LIBIE_AQC_CAPS_PENDING_NET_VER:
2432 		caps->nvm_update_pending_netlist = true;
2433 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2434 		break;
2435 	case LIBIE_AQC_CAPS_NVM_MGMT:
2436 		caps->nvm_unified_update =
2437 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2438 			true : false;
2439 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2440 			  caps->nvm_unified_update);
2441 		break;
2442 	case LIBIE_AQC_CAPS_RDMA:
2443 		if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
2444 			caps->rdma = (number == 1);
2445 		ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2446 		break;
2447 	case LIBIE_AQC_CAPS_MAX_MTU:
2448 		caps->max_mtu = number;
2449 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2450 			  prefix, caps->max_mtu);
2451 		break;
2452 	case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2453 		caps->pcie_reset_avoidance = (number > 0);
2454 		ice_debug(hw, ICE_DBG_INIT,
2455 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2456 			  caps->pcie_reset_avoidance);
2457 		break;
2458 	case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2459 		caps->reset_restrict_support = (number == 1);
2460 		ice_debug(hw, ICE_DBG_INIT,
2461 			  "%s: reset_restrict_support = %d\n", prefix,
2462 			  caps->reset_restrict_support);
2463 		break;
2464 	case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
2465 		caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
2466 		ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2467 			  prefix, caps->roce_lag);
2468 		caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
2469 		ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
2470 			  prefix, caps->sriov_lag);
2471 		caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
2472 		ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
2473 			  prefix, caps->sriov_aa_lag);
2474 		break;
2475 	case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2476 		caps->tx_sched_topo_comp_mode_en = (number == 1);
2477 		break;
2478 	default:
2479 		/* Not one of the recognized common capabilities */
2480 		found = false;
2481 	}
2482 
2483 	return found;
2484 }
2485 
2486 /**
2487  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2488  * @hw: pointer to the HW structure
2489  * @caps: pointer to capabilities structure to fix
2490  *
2491  * Re-calculate the capabilities that are dependent on the number of physical
2492  * ports; i.e. some features are not supported or function differently on
2493  * devices with more than 4 ports.
2494  */
2495 static void
2496 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2497 {
2498 	/* This assumes device capabilities are always scanned before function
2499 	 * capabilities during the initialization flow.
2500 	 */
2501 	if (hw->dev_caps.num_funcs > 4) {
2502 		/* Max 4 TCs per port */
2503 		caps->maxtc = 4;
2504 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2505 			  caps->maxtc);
2506 		if (caps->rdma) {
2507 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2508 			caps->rdma = 0;
2509 		}
2510 
2511 		/* print message only when processing device capabilities
2512 		 * during initialization.
2513 		 */
2514 		if (caps == &hw->dev_caps.common_cap)
2515 			dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2516 	}
2517 }
2518 
2519 /**
2520  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2521  * @hw: pointer to the HW struct
2522  * @func_p: pointer to function capabilities structure
2523  * @cap: pointer to the capability element to parse
2524  *
2525  * Extract function capabilities for ICE_AQC_CAPS_VF.
2526  */
2527 static void
2528 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2529 		       struct libie_aqc_list_caps_elem *cap)
2530 {
2531 	u32 logical_id = le32_to_cpu(cap->logical_id);
2532 	u32 number = le32_to_cpu(cap->number);
2533 
2534 	func_p->num_allocd_vfs = number;
2535 	func_p->vf_base_id = logical_id;
2536 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2537 		  func_p->num_allocd_vfs);
2538 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2539 		  func_p->vf_base_id);
2540 }
2541 
2542 /**
2543  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2544  * @hw: pointer to the HW struct
2545  * @func_p: pointer to function capabilities structure
2546  * @cap: pointer to the capability element to parse
2547  *
2548  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2549  */
2550 static void
2551 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2552 			struct libie_aqc_list_caps_elem *cap)
2553 {
2554 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2555 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2556 		  le32_to_cpu(cap->number));
2557 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2558 		  func_p->guar_num_vsi);
2559 }
2560 
2561 /**
2562  * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2563  * @hw: pointer to the HW struct
2564  * @func_p: pointer to function capabilities structure
2565  * @cap: pointer to the capability element to parse
2566  *
2567  * Extract function capabilities for ICE_AQC_CAPS_1588.
2568  */
2569 static void
2570 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2571 			 struct libie_aqc_list_caps_elem *cap)
2572 {
2573 	struct ice_ts_func_info *info = &func_p->ts_func_info;
2574 	u32 number = le32_to_cpu(cap->number);
2575 
2576 	info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2577 	func_p->common_cap.ieee_1588 = info->ena;
2578 
2579 	info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2580 	info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2581 	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2582 	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2583 
2584 	if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
2585 		info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
2586 		info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2587 	} else {
2588 		info->clk_freq = ICE_TSPLL_FREQ_156_250;
2589 		info->clk_src = ICE_CLK_SRC_TIME_REF;
2590 	}
2591 
2592 	if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
2593 		info->time_ref = (enum ice_tspll_freq)info->clk_freq;
2594 	} else {
2595 		/* Unknown clock frequency, so assume a (probably incorrect)
2596 		 * default to avoid out-of-bounds look ups of frequency
2597 		 * related information.
2598 		 */
2599 		ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2600 			  info->clk_freq);
2601 		info->time_ref = ICE_TSPLL_FREQ_25_000;
2602 	}
2603 
2604 	ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2605 		  func_p->common_cap.ieee_1588);
2606 	ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2607 		  info->src_tmr_owned);
2608 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2609 		  info->tmr_ena);
2610 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2611 		  info->tmr_index_owned);
2612 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2613 		  info->tmr_index_assoc);
2614 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2615 		  info->clk_freq);
2616 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2617 		  info->clk_src);
2618 }
2619 
2620 /**
2621  * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2622  * @hw: pointer to the HW struct
2623  * @func_p: pointer to function capabilities structure
2624  *
2625  * Extract function capabilities for ICE_AQC_CAPS_FD.
2626  */
2627 static void
2628 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2629 {
2630 	u32 reg_val, gsize, bsize;
2631 
2632 	reg_val = rd32(hw, GLQF_FD_SIZE);
2633 	switch (hw->mac_type) {
2634 	case ICE_MAC_E830:
2635 		gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2636 		bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2637 		break;
2638 	case ICE_MAC_E810:
2639 	default:
2640 		gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2641 		bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2642 	}
2643 	func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
2644 	func_p->fd_fltr_best_effort = bsize;
2645 
2646 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2647 		  func_p->fd_fltr_guar);
2648 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2649 		  func_p->fd_fltr_best_effort);
2650 }
2651 
2652 /**
2653  * ice_parse_func_caps - Parse function capabilities
2654  * @hw: pointer to the HW struct
2655  * @func_p: pointer to function capabilities structure
2656  * @buf: buffer containing the function capability records
2657  * @cap_count: the number of capabilities
2658  *
2659  * Helper function to parse function (0x000A) capabilities list. For
2660  * capabilities shared between device and function, this relies on
2661  * ice_parse_common_caps.
2662  *
2663  * Loop through the list of provided capabilities and extract the relevant
2664  * data into the function capabilities structured.
2665  */
2666 static void
2667 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2668 		    void *buf, u32 cap_count)
2669 {
2670 	struct libie_aqc_list_caps_elem *cap_resp;
2671 	u32 i;
2672 
2673 	cap_resp = buf;
2674 
2675 	memset(func_p, 0, sizeof(*func_p));
2676 
2677 	for (i = 0; i < cap_count; i++) {
2678 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2679 		bool found;
2680 
2681 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2682 					      &cap_resp[i], "func caps");
2683 
2684 		switch (cap) {
2685 		case LIBIE_AQC_CAPS_VF:
2686 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2687 			break;
2688 		case LIBIE_AQC_CAPS_VSI:
2689 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2690 			break;
2691 		case LIBIE_AQC_CAPS_1588:
2692 			ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2693 			break;
2694 		case LIBIE_AQC_CAPS_FD:
2695 			ice_parse_fdir_func_caps(hw, func_p);
2696 			break;
2697 		default:
2698 			/* Don't list common capabilities as unknown */
2699 			if (!found)
2700 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2701 					  i, cap);
2702 			break;
2703 		}
2704 	}
2705 
2706 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2707 }
2708 
2709 /**
2710  * ice_func_id_to_logical_id - map from function id to logical pf id
2711  * @active_function_bitmap: active function bitmap
2712  * @pf_id: function number of device
2713  *
2714  * Return: logical PF ID.
2715  */
2716 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
2717 {
2718 	u8 logical_id = 0;
2719 	u8 i;
2720 
2721 	for (i = 0; i < pf_id; i++)
2722 		if (active_function_bitmap & BIT(i))
2723 			logical_id++;
2724 
2725 	return logical_id;
2726 }
2727 
2728 /**
2729  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2730  * @hw: pointer to the HW struct
2731  * @dev_p: pointer to device capabilities structure
2732  * @cap: capability element to parse
2733  *
2734  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2735  */
2736 static void
2737 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2738 			      struct libie_aqc_list_caps_elem *cap)
2739 {
2740 	u32 number = le32_to_cpu(cap->number);
2741 
2742 	dev_p->num_funcs = hweight32(number);
2743 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2744 		  dev_p->num_funcs);
2745 
2746 	hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id);
2747 }
2748 
2749 /**
2750  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2751  * @hw: pointer to the HW struct
2752  * @dev_p: pointer to device capabilities structure
2753  * @cap: capability element to parse
2754  *
2755  * Parse ICE_AQC_CAPS_VF for device capabilities.
2756  */
2757 static void
2758 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2759 		      struct libie_aqc_list_caps_elem *cap)
2760 {
2761 	u32 number = le32_to_cpu(cap->number);
2762 
2763 	dev_p->num_vfs_exposed = number;
2764 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2765 		  dev_p->num_vfs_exposed);
2766 }
2767 
2768 /**
2769  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2770  * @hw: pointer to the HW struct
2771  * @dev_p: pointer to device capabilities structure
2772  * @cap: capability element to parse
2773  *
2774  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2775  */
2776 static void
2777 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2778 		       struct libie_aqc_list_caps_elem *cap)
2779 {
2780 	u32 number = le32_to_cpu(cap->number);
2781 
2782 	dev_p->num_vsi_allocd_to_host = number;
2783 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2784 		  dev_p->num_vsi_allocd_to_host);
2785 }
2786 
2787 /**
2788  * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2789  * @hw: pointer to the HW struct
2790  * @dev_p: pointer to device capabilities structure
2791  * @cap: capability element to parse
2792  *
2793  * Parse ICE_AQC_CAPS_1588 for device capabilities.
2794  */
2795 static void
2796 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2797 			struct libie_aqc_list_caps_elem *cap)
2798 {
2799 	struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2800 	u32 logical_id = le32_to_cpu(cap->logical_id);
2801 	u32 phys_id = le32_to_cpu(cap->phys_id);
2802 	u32 number = le32_to_cpu(cap->number);
2803 
2804 	info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2805 	dev_p->common_cap.ieee_1588 = info->ena;
2806 
2807 	info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2808 	info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2809 	info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2810 
2811 	info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
2812 	info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2813 	info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2814 
2815 	info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
2816 	info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
2817 	info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
2818 
2819 	info->ena_ports = logical_id;
2820 	info->tmr_own_map = phys_id;
2821 
2822 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2823 		  dev_p->common_cap.ieee_1588);
2824 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2825 		  info->tmr0_owner);
2826 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2827 		  info->tmr0_owned);
2828 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2829 		  info->tmr0_ena);
2830 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2831 		  info->tmr1_owner);
2832 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2833 		  info->tmr1_owned);
2834 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2835 		  info->tmr1_ena);
2836 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
2837 		  info->ts_ll_read);
2838 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
2839 		  info->ts_ll_int_read);
2840 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
2841 		  info->ll_phy_tmr_update);
2842 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2843 		  info->ena_ports);
2844 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2845 		  info->tmr_own_map);
2846 }
2847 
2848 /**
2849  * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2850  * @hw: pointer to the HW struct
2851  * @dev_p: pointer to device capabilities structure
2852  * @cap: capability element to parse
2853  *
2854  * Parse ICE_AQC_CAPS_FD for device capabilities.
2855  */
2856 static void
2857 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2858 			struct libie_aqc_list_caps_elem *cap)
2859 {
2860 	u32 number = le32_to_cpu(cap->number);
2861 
2862 	dev_p->num_flow_director_fltr = number;
2863 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2864 		  dev_p->num_flow_director_fltr);
2865 }
2866 
2867 /**
2868  * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2869  * @hw: pointer to the HW struct
2870  * @dev_p: pointer to device capabilities structure
2871  * @cap: capability element to parse
2872  *
2873  * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2874  * enabled sensors.
2875  */
2876 static void
2877 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2878 			     struct libie_aqc_list_caps_elem *cap)
2879 {
2880 	dev_p->supported_sensors = le32_to_cpu(cap->number);
2881 
2882 	ice_debug(hw, ICE_DBG_INIT,
2883 		  "dev caps: supported sensors (bitmap) = 0x%x\n",
2884 		  dev_p->supported_sensors);
2885 }
2886 
2887 /**
2888  * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2889  * @hw: pointer to the HW struct
2890  * @dev_p: pointer to device capabilities structure
2891  * @cap: capability element to parse
2892  *
2893  * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2894  */
2895 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
2896 					struct ice_hw_dev_caps *dev_p,
2897 					struct libie_aqc_list_caps_elem *cap)
2898 {
2899 	dev_p->nac_topo.mode = le32_to_cpu(cap->number);
2900 	dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2901 
2902 	dev_info(ice_hw_to_dev(hw),
2903 		 "PF is configured in %s mode with IP instance ID %d\n",
2904 		 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
2905 		 "primary" : "secondary", dev_p->nac_topo.id);
2906 
2907 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2908 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2909 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2910 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2911 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
2912 		  dev_p->nac_topo.id);
2913 }
2914 
2915 /**
2916  * ice_parse_dev_caps - Parse device capabilities
2917  * @hw: pointer to the HW struct
2918  * @dev_p: pointer to device capabilities structure
2919  * @buf: buffer containing the device capability records
2920  * @cap_count: the number of capabilities
2921  *
2922  * Helper device to parse device (0x000B) capabilities list. For
2923  * capabilities shared between device and function, this relies on
2924  * ice_parse_common_caps.
2925  *
2926  * Loop through the list of provided capabilities and extract the relevant
2927  * data into the device capabilities structured.
2928  */
2929 static void
2930 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2931 		   void *buf, u32 cap_count)
2932 {
2933 	struct libie_aqc_list_caps_elem *cap_resp;
2934 	u32 i;
2935 
2936 	cap_resp = buf;
2937 
2938 	memset(dev_p, 0, sizeof(*dev_p));
2939 
2940 	for (i = 0; i < cap_count; i++) {
2941 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2942 		bool found;
2943 
2944 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2945 					      &cap_resp[i], "dev caps");
2946 
2947 		switch (cap) {
2948 		case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
2949 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2950 			break;
2951 		case LIBIE_AQC_CAPS_VF:
2952 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2953 			break;
2954 		case LIBIE_AQC_CAPS_VSI:
2955 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2956 			break;
2957 		case LIBIE_AQC_CAPS_1588:
2958 			ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2959 			break;
2960 		case LIBIE_AQC_CAPS_FD:
2961 			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2962 			break;
2963 		case LIBIE_AQC_CAPS_SENSOR_READING:
2964 			ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
2965 			break;
2966 		case LIBIE_AQC_CAPS_NAC_TOPOLOGY:
2967 			ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
2968 			break;
2969 		default:
2970 			/* Don't list common capabilities as unknown */
2971 			if (!found)
2972 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2973 					  i, cap);
2974 			break;
2975 		}
2976 	}
2977 
2978 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2979 }
2980 
2981 /**
2982  * ice_is_phy_rclk_in_netlist
2983  * @hw: pointer to the hw struct
2984  *
2985  * Check if the PHY Recovered Clock device is present in the netlist
2986  */
2987 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
2988 {
2989 	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
2990 				  ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
2991 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
2992 	    ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
2993 				  ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
2994 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
2995 		return false;
2996 
2997 	return true;
2998 }
2999 
3000 /**
3001  * ice_is_clock_mux_in_netlist
3002  * @hw: pointer to the hw struct
3003  *
3004  * Check if the Clock Multiplexer device is present in the netlist
3005  */
3006 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
3007 {
3008 	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
3009 				  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
3010 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
3011 				  NULL))
3012 		return false;
3013 
3014 	return true;
3015 }
3016 
3017 /**
3018  * ice_is_cgu_in_netlist - check for CGU presence
3019  * @hw: pointer to the hw struct
3020  *
3021  * Check if the Clock Generation Unit (CGU) device is present in the netlist.
3022  * Save the CGU part number in the hw structure for later use.
3023  * Return:
3024  * * true - cgu is present
3025  * * false - cgu is not present
3026  */
3027 bool ice_is_cgu_in_netlist(struct ice_hw *hw)
3028 {
3029 	if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
3030 				   ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
3031 				   ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
3032 				   NULL)) {
3033 		hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
3034 		return true;
3035 	} else if (!ice_find_netlist_node(hw,
3036 					  ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
3037 					  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
3038 					  ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
3039 					  NULL)) {
3040 		hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
3041 		return true;
3042 	}
3043 
3044 	return false;
3045 }
3046 
3047 /**
3048  * ice_is_gps_in_netlist
3049  * @hw: pointer to the hw struct
3050  *
3051  * Check if the GPS generic device is present in the netlist
3052  */
3053 bool ice_is_gps_in_netlist(struct ice_hw *hw)
3054 {
3055 	if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
3056 				  ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
3057 				  ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
3058 		return false;
3059 
3060 	return true;
3061 }
3062 
3063 /**
3064  * ice_aq_list_caps - query function/device capabilities
3065  * @hw: pointer to the HW struct
3066  * @buf: a buffer to hold the capabilities
3067  * @buf_size: size of the buffer
3068  * @cap_count: if not NULL, set to the number of capabilities reported
3069  * @opc: capabilities type to discover, device or function
3070  * @cd: pointer to command details structure or NULL
3071  *
3072  * Get the function (0x000A) or device (0x000B) capabilities description from
3073  * firmware and store it in the buffer.
3074  *
3075  * If the cap_count pointer is not NULL, then it is set to the number of
3076  * capabilities firmware will report. Note that if the buffer size is too
3077  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
3078  * cap_count will still be updated in this case. It is recommended that the
3079  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
3080  * firmware could return) to avoid this.
3081  */
3082 int
3083 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
3084 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3085 {
3086 	struct libie_aqc_list_caps *cmd;
3087 	struct libie_aq_desc desc;
3088 	int status;
3089 
3090 	cmd = &desc.params.get_cap;
3091 
3092 	if (opc != ice_aqc_opc_list_func_caps &&
3093 	    opc != ice_aqc_opc_list_dev_caps)
3094 		return -EINVAL;
3095 
3096 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
3097 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3098 
3099 	if (cap_count)
3100 		*cap_count = le32_to_cpu(cmd->count);
3101 
3102 	return status;
3103 }
3104 
3105 /**
3106  * ice_discover_dev_caps - Read and extract device capabilities
3107  * @hw: pointer to the hardware structure
3108  * @dev_caps: pointer to device capabilities structure
3109  *
3110  * Read the device capabilities and extract them into the dev_caps structure
3111  * for later use.
3112  */
3113 int
3114 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
3115 {
3116 	u32 cap_count = 0;
3117 	void *cbuf;
3118 	int status;
3119 
3120 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
3121 	if (!cbuf)
3122 		return -ENOMEM;
3123 
3124 	/* Although the driver doesn't know the number of capabilities the
3125 	 * device will return, we can simply send a 4KB buffer, the maximum
3126 	 * possible size that firmware can return.
3127 	 */
3128 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
3129 
3130 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3131 				  ice_aqc_opc_list_dev_caps, NULL);
3132 	if (!status)
3133 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
3134 	kfree(cbuf);
3135 
3136 	return status;
3137 }
3138 
3139 /**
3140  * ice_discover_func_caps - Read and extract function capabilities
3141  * @hw: pointer to the hardware structure
3142  * @func_caps: pointer to function capabilities structure
3143  *
3144  * Read the function capabilities and extract them into the func_caps structure
3145  * for later use.
3146  */
3147 static int
3148 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
3149 {
3150 	u32 cap_count = 0;
3151 	void *cbuf;
3152 	int status;
3153 
3154 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
3155 	if (!cbuf)
3156 		return -ENOMEM;
3157 
3158 	/* Although the driver doesn't know the number of capabilities the
3159 	 * device will return, we can simply send a 4KB buffer, the maximum
3160 	 * possible size that firmware can return.
3161 	 */
3162 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
3163 
3164 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3165 				  ice_aqc_opc_list_func_caps, NULL);
3166 	if (!status)
3167 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
3168 	kfree(cbuf);
3169 
3170 	return status;
3171 }
3172 
3173 /**
3174  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
3175  * @hw: pointer to the hardware structure
3176  */
3177 void ice_set_safe_mode_caps(struct ice_hw *hw)
3178 {
3179 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
3180 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
3181 	struct ice_hw_common_caps cached_caps;
3182 	u32 num_funcs;
3183 
3184 	/* cache some func_caps values that should be restored after memset */
3185 	cached_caps = func_caps->common_cap;
3186 
3187 	/* unset func capabilities */
3188 	memset(func_caps, 0, sizeof(*func_caps));
3189 
3190 #define ICE_RESTORE_FUNC_CAP(name) \
3191 	func_caps->common_cap.name = cached_caps.name
3192 
3193 	/* restore cached values */
3194 	ICE_RESTORE_FUNC_CAP(valid_functions);
3195 	ICE_RESTORE_FUNC_CAP(txq_first_id);
3196 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
3197 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
3198 	ICE_RESTORE_FUNC_CAP(max_mtu);
3199 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
3200 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
3201 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
3202 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
3203 
3204 	/* one Tx and one Rx queue in safe mode */
3205 	func_caps->common_cap.num_rxq = 1;
3206 	func_caps->common_cap.num_txq = 1;
3207 
3208 	/* two MSIX vectors, one for traffic and one for misc causes */
3209 	func_caps->common_cap.num_msix_vectors = 2;
3210 	func_caps->guar_num_vsi = 1;
3211 
3212 	/* cache some dev_caps values that should be restored after memset */
3213 	cached_caps = dev_caps->common_cap;
3214 	num_funcs = dev_caps->num_funcs;
3215 
3216 	/* unset dev capabilities */
3217 	memset(dev_caps, 0, sizeof(*dev_caps));
3218 
3219 #define ICE_RESTORE_DEV_CAP(name) \
3220 	dev_caps->common_cap.name = cached_caps.name
3221 
3222 	/* restore cached values */
3223 	ICE_RESTORE_DEV_CAP(valid_functions);
3224 	ICE_RESTORE_DEV_CAP(txq_first_id);
3225 	ICE_RESTORE_DEV_CAP(rxq_first_id);
3226 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
3227 	ICE_RESTORE_DEV_CAP(max_mtu);
3228 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
3229 	ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
3230 	ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
3231 	ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
3232 	dev_caps->num_funcs = num_funcs;
3233 
3234 	/* one Tx and one Rx queue per function in safe mode */
3235 	dev_caps->common_cap.num_rxq = num_funcs;
3236 	dev_caps->common_cap.num_txq = num_funcs;
3237 
3238 	/* two MSIX vectors per function */
3239 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
3240 }
3241 
3242 /**
3243  * ice_get_caps - get info about the HW
3244  * @hw: pointer to the hardware structure
3245  */
3246 int ice_get_caps(struct ice_hw *hw)
3247 {
3248 	int status;
3249 
3250 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
3251 	if (status)
3252 		return status;
3253 
3254 	return ice_discover_func_caps(hw, &hw->func_caps);
3255 }
3256 
3257 /**
3258  * ice_aq_manage_mac_write - manage MAC address write command
3259  * @hw: pointer to the HW struct
3260  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
3261  * @flags: flags to control write behavior
3262  * @cd: pointer to command details structure or NULL
3263  *
3264  * This function is used to write MAC address to the NVM (0x0108).
3265  */
3266 int
3267 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3268 			struct ice_sq_cd *cd)
3269 {
3270 	struct ice_aqc_manage_mac_write *cmd;
3271 	struct libie_aq_desc desc;
3272 
3273 	cmd = libie_aq_raw(&desc);
3274 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3275 
3276 	cmd->flags = flags;
3277 	ether_addr_copy(cmd->mac_addr, mac_addr);
3278 
3279 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3280 }
3281 
3282 /**
3283  * ice_aq_clear_pxe_mode
3284  * @hw: pointer to the HW struct
3285  *
3286  * Tell the firmware that the driver is taking over from PXE (0x0110).
3287  */
3288 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
3289 {
3290 	struct ice_aqc_clear_pxe *cmd;
3291 	struct libie_aq_desc desc;
3292 
3293 	cmd = libie_aq_raw(&desc);
3294 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3295 	cmd->rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3296 
3297 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3298 }
3299 
3300 /**
3301  * ice_clear_pxe_mode - clear pxe operations mode
3302  * @hw: pointer to the HW struct
3303  *
3304  * Make sure all PXE mode settings are cleared, including things
3305  * like descriptor fetch/write-back mode.
3306  */
3307 void ice_clear_pxe_mode(struct ice_hw *hw)
3308 {
3309 	if (ice_check_sq_alive(hw, &hw->adminq))
3310 		ice_aq_clear_pxe_mode(hw);
3311 }
3312 
3313 /**
3314  * ice_aq_set_port_params - set physical port parameters.
3315  * @pi: pointer to the port info struct
3316  * @double_vlan: if set double VLAN is enabled
3317  * @cd: pointer to command details structure or NULL
3318  *
3319  * Set Physical port parameters (0x0203)
3320  */
3321 int
3322 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
3323 		       struct ice_sq_cd *cd)
3324 
3325 {
3326 	struct ice_aqc_set_port_params *cmd;
3327 	struct ice_hw *hw = pi->hw;
3328 	struct libie_aq_desc desc;
3329 	u16 cmd_flags = 0;
3330 
3331 	cmd = libie_aq_raw(&desc);
3332 
3333 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3334 	if (double_vlan)
3335 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3336 	cmd->cmd_flags = cpu_to_le16(cmd_flags);
3337 
3338 	cmd->local_fwd_mode = pi->local_fwd_mode |
3339 				ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
3340 
3341 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3342 }
3343 
3344 /**
3345  * ice_is_100m_speed_supported
3346  * @hw: pointer to the HW struct
3347  *
3348  * returns true if 100M speeds are supported by the device,
3349  * false otherwise.
3350  */
3351 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3352 {
3353 	switch (hw->device_id) {
3354 	case ICE_DEV_ID_E822C_SGMII:
3355 	case ICE_DEV_ID_E822L_SGMII:
3356 	case ICE_DEV_ID_E823L_1GBE:
3357 	case ICE_DEV_ID_E823C_SGMII:
3358 	case ICE_DEV_ID_E825C_SGMII:
3359 		return true;
3360 	default:
3361 		return false;
3362 	}
3363 }
3364 
3365 /**
3366  * ice_get_link_speed_based_on_phy_type - returns link speed
3367  * @phy_type_low: lower part of phy_type
3368  * @phy_type_high: higher part of phy_type
3369  *
3370  * This helper function will convert an entry in PHY type structure
3371  * [phy_type_low, phy_type_high] to its corresponding link speed.
3372  * Note: In the structure of [phy_type_low, phy_type_high], there should
3373  * be one bit set, as this function will convert one PHY type to its
3374  * speed.
3375  *
3376  * Return:
3377  * * PHY speed for recognized PHY type
3378  * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3379  * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3380  */
3381 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3382 {
3383 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3384 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3385 
3386 	switch (phy_type_low) {
3387 	case ICE_PHY_TYPE_LOW_100BASE_TX:
3388 	case ICE_PHY_TYPE_LOW_100M_SGMII:
3389 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3390 		break;
3391 	case ICE_PHY_TYPE_LOW_1000BASE_T:
3392 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
3393 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
3394 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
3395 	case ICE_PHY_TYPE_LOW_1G_SGMII:
3396 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3397 		break;
3398 	case ICE_PHY_TYPE_LOW_2500BASE_T:
3399 	case ICE_PHY_TYPE_LOW_2500BASE_X:
3400 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
3401 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3402 		break;
3403 	case ICE_PHY_TYPE_LOW_5GBASE_T:
3404 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
3405 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3406 		break;
3407 	case ICE_PHY_TYPE_LOW_10GBASE_T:
3408 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3409 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
3410 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
3411 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3412 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3413 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3414 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3415 		break;
3416 	case ICE_PHY_TYPE_LOW_25GBASE_T:
3417 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
3418 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3419 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3420 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
3421 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
3422 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
3423 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3424 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3425 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3426 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3427 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3428 		break;
3429 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3430 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3431 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3432 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3433 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3434 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
3435 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3436 		break;
3437 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3438 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3439 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3440 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3441 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3442 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
3443 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3444 	case ICE_PHY_TYPE_LOW_50G_AUI2:
3445 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
3446 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
3447 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
3448 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
3449 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3450 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3451 	case ICE_PHY_TYPE_LOW_50G_AUI1:
3452 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3453 		break;
3454 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3455 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3456 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3457 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3458 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3459 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
3460 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3461 	case ICE_PHY_TYPE_LOW_100G_AUI4:
3462 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3463 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3464 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3465 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3466 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
3467 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3468 		break;
3469 	default:
3470 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3471 		break;
3472 	}
3473 
3474 	switch (phy_type_high) {
3475 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3476 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3477 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3478 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3479 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
3480 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3481 		break;
3482 	case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
3483 	case ICE_PHY_TYPE_HIGH_200G_SR4:
3484 	case ICE_PHY_TYPE_HIGH_200G_FR4:
3485 	case ICE_PHY_TYPE_HIGH_200G_LR4:
3486 	case ICE_PHY_TYPE_HIGH_200G_DR4:
3487 	case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
3488 	case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
3489 	case ICE_PHY_TYPE_HIGH_200G_AUI4:
3490 		speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
3491 		break;
3492 	default:
3493 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3494 		break;
3495 	}
3496 
3497 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3498 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3499 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3500 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3501 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3502 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3503 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3504 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3505 		return speed_phy_type_low;
3506 	else
3507 		return speed_phy_type_high;
3508 }
3509 
3510 /**
3511  * ice_update_phy_type
3512  * @phy_type_low: pointer to the lower part of phy_type
3513  * @phy_type_high: pointer to the higher part of phy_type
3514  * @link_speeds_bitmap: targeted link speeds bitmap
3515  *
3516  * Note: For the link_speeds_bitmap structure, you can check it at
3517  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3518  * link_speeds_bitmap include multiple speeds.
3519  *
3520  * Each entry in this [phy_type_low, phy_type_high] structure will
3521  * present a certain link speed. This helper function will turn on bits
3522  * in [phy_type_low, phy_type_high] structure based on the value of
3523  * link_speeds_bitmap input parameter.
3524  */
3525 void
3526 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3527 		    u16 link_speeds_bitmap)
3528 {
3529 	u64 pt_high;
3530 	u64 pt_low;
3531 	int index;
3532 	u16 speed;
3533 
3534 	/* We first check with low part of phy_type */
3535 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3536 		pt_low = BIT_ULL(index);
3537 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3538 
3539 		if (link_speeds_bitmap & speed)
3540 			*phy_type_low |= BIT_ULL(index);
3541 	}
3542 
3543 	/* We then check with high part of phy_type */
3544 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3545 		pt_high = BIT_ULL(index);
3546 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3547 
3548 		if (link_speeds_bitmap & speed)
3549 			*phy_type_high |= BIT_ULL(index);
3550 	}
3551 }
3552 
3553 /**
3554  * ice_aq_set_phy_cfg
3555  * @hw: pointer to the HW struct
3556  * @pi: port info structure of the interested logical port
3557  * @cfg: structure with PHY configuration data to be set
3558  * @cd: pointer to command details structure or NULL
3559  *
3560  * Set the various PHY configuration parameters supported on the Port.
3561  * One or more of the Set PHY config parameters may be ignored in an MFP
3562  * mode as the PF may not have the privilege to set some of the PHY Config
3563  * parameters. This status will be indicated by the command response (0x0601).
3564  */
3565 int
3566 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3567 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3568 {
3569 	struct ice_aqc_set_phy_cfg *cmd;
3570 	struct libie_aq_desc desc;
3571 	int status;
3572 
3573 	if (!cfg)
3574 		return -EINVAL;
3575 
3576 	/* Ensure that only valid bits of cfg->caps can be turned on. */
3577 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3578 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3579 			  cfg->caps);
3580 
3581 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3582 	}
3583 
3584 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3585 	cmd = libie_aq_raw(&desc);
3586 	cmd->lport_num = pi->lport;
3587 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
3588 
3589 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3590 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
3591 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
3592 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
3593 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
3594 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
3595 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
3596 		  cfg->low_power_ctrl_an);
3597 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
3598 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
3599 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
3600 		  cfg->link_fec_opt);
3601 
3602 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3603 	if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
3604 		status = 0;
3605 
3606 	if (!status)
3607 		pi->phy.curr_user_phy_cfg = *cfg;
3608 
3609 	return status;
3610 }
3611 
3612 /**
3613  * ice_update_link_info - update status of the HW network link
3614  * @pi: port info structure of the interested logical port
3615  */
3616 int ice_update_link_info(struct ice_port_info *pi)
3617 {
3618 	struct ice_link_status *li;
3619 	int status;
3620 
3621 	if (!pi)
3622 		return -EINVAL;
3623 
3624 	li = &pi->phy.link_info;
3625 
3626 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
3627 	if (status)
3628 		return status;
3629 
3630 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3631 		struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
3632 
3633 		pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3634 		if (!pcaps)
3635 			return -ENOMEM;
3636 
3637 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3638 					     pcaps, NULL);
3639 	}
3640 
3641 	return status;
3642 }
3643 
3644 /**
3645  * ice_aq_get_phy_equalization - function to read serdes equaliser
3646  * value from firmware using admin queue command.
3647  * @hw: pointer to the HW struct
3648  * @data_in: represents the serdes equalization parameter requested
3649  * @op_code: represents the serdes number and flag to represent tx or rx
3650  * @serdes_num: represents the serdes number
3651  * @output: pointer to the caller-supplied buffer to return serdes equaliser
3652  *
3653  * Return: non-zero status on error and 0 on success.
3654  */
3655 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
3656 				u8 serdes_num, int *output)
3657 {
3658 	struct ice_aqc_dnl_call_command *cmd;
3659 	struct ice_aqc_dnl_call buf = {};
3660 	struct libie_aq_desc desc;
3661 	int err;
3662 
3663 	buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
3664 	buf.sto.txrx_equa_reqs.op_code_serdes_sel =
3665 		cpu_to_le16(op_code | (serdes_num & 0xF));
3666 	cmd = libie_aq_raw(&desc);
3667 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
3668 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF |
3669 				  LIBIE_AQ_FLAG_RD |
3670 				  LIBIE_AQ_FLAG_SI);
3671 	desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
3672 	cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
3673 
3674 	err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
3675 			      NULL);
3676 	*output = err ? 0 : buf.sto.txrx_equa_resp.val;
3677 
3678 	return err;
3679 }
3680 
3681 #define FEC_REG_PORT(port) {	\
3682 	FEC_CORR_LOW_REG_PORT##port,		\
3683 	FEC_CORR_HIGH_REG_PORT##port,	\
3684 	FEC_UNCORR_LOW_REG_PORT##port,	\
3685 	FEC_UNCORR_HIGH_REG_PORT##port,	\
3686 }
3687 
3688 static const u32 fec_reg[][ICE_FEC_MAX] = {
3689 	FEC_REG_PORT(0),
3690 	FEC_REG_PORT(1),
3691 	FEC_REG_PORT(2),
3692 	FEC_REG_PORT(3)
3693 };
3694 
3695 /**
3696  * ice_aq_get_fec_stats - reads fec stats from phy
3697  * @hw: pointer to the HW struct
3698  * @pcs_quad: represents pcsquad of user input serdes
3699  * @pcs_port: represents the pcs port number part of above pcs quad
3700  * @fec_type: represents FEC stats type
3701  * @output: pointer to the caller-supplied buffer to return requested fec stats
3702  *
3703  * Return: non-zero status on error and 0 on success.
3704  */
3705 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
3706 			 enum ice_fec_stats_types fec_type, u32 *output)
3707 {
3708 	u16 flag = (LIBIE_AQ_FLAG_RD | LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_SI);
3709 	struct ice_sbq_msg_input msg = {};
3710 	u32 receiver_id, reg_offset;
3711 	int err;
3712 
3713 	if (pcs_port > 3)
3714 		return -EINVAL;
3715 
3716 	reg_offset = fec_reg[pcs_port][fec_type];
3717 
3718 	if (pcs_quad == 0)
3719 		receiver_id = FEC_RECEIVER_ID_PCS0;
3720 	else if (pcs_quad == 1)
3721 		receiver_id = FEC_RECEIVER_ID_PCS1;
3722 	else
3723 		return -EINVAL;
3724 
3725 	msg.msg_addr_low = lower_16_bits(reg_offset);
3726 	msg.msg_addr_high = receiver_id;
3727 	msg.opcode = ice_sbq_msg_rd;
3728 	msg.dest_dev = ice_sbq_dev_phy_0;
3729 
3730 	err = ice_sbq_rw_reg(hw, &msg, flag);
3731 	if (err)
3732 		return err;
3733 
3734 	*output = msg.data;
3735 	return 0;
3736 }
3737 
3738 /**
3739  * ice_cache_phy_user_req
3740  * @pi: port information structure
3741  * @cache_data: PHY logging data
3742  * @cache_mode: PHY logging mode
3743  *
3744  * Log the user request on (FC, FEC, SPEED) for later use.
3745  */
3746 static void
3747 ice_cache_phy_user_req(struct ice_port_info *pi,
3748 		       struct ice_phy_cache_mode_data cache_data,
3749 		       enum ice_phy_cache_mode cache_mode)
3750 {
3751 	if (!pi)
3752 		return;
3753 
3754 	switch (cache_mode) {
3755 	case ICE_FC_MODE:
3756 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3757 		break;
3758 	case ICE_SPEED_MODE:
3759 		pi->phy.curr_user_speed_req =
3760 			cache_data.data.curr_user_speed_req;
3761 		break;
3762 	case ICE_FEC_MODE:
3763 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3764 		break;
3765 	default:
3766 		break;
3767 	}
3768 }
3769 
3770 /**
3771  * ice_caps_to_fc_mode
3772  * @caps: PHY capabilities
3773  *
3774  * Convert PHY FC capabilities to ice FC mode
3775  */
3776 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3777 {
3778 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3779 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3780 		return ICE_FC_FULL;
3781 
3782 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3783 		return ICE_FC_TX_PAUSE;
3784 
3785 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3786 		return ICE_FC_RX_PAUSE;
3787 
3788 	return ICE_FC_NONE;
3789 }
3790 
3791 /**
3792  * ice_caps_to_fec_mode
3793  * @caps: PHY capabilities
3794  * @fec_options: Link FEC options
3795  *
3796  * Convert PHY FEC capabilities to ice FEC mode
3797  */
3798 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3799 {
3800 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3801 		return ICE_FEC_AUTO;
3802 
3803 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3804 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3805 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3806 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3807 		return ICE_FEC_BASER;
3808 
3809 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3810 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3811 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3812 		return ICE_FEC_RS;
3813 
3814 	return ICE_FEC_NONE;
3815 }
3816 
3817 /**
3818  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3819  * @pi: port information structure
3820  * @cfg: PHY configuration data to set FC mode
3821  * @req_mode: FC mode to configure
3822  */
3823 int
3824 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3825 	       enum ice_fc_mode req_mode)
3826 {
3827 	struct ice_phy_cache_mode_data cache_data;
3828 	u8 pause_mask = 0x0;
3829 
3830 	if (!pi || !cfg)
3831 		return -EINVAL;
3832 
3833 	switch (req_mode) {
3834 	case ICE_FC_FULL:
3835 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3836 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3837 		break;
3838 	case ICE_FC_RX_PAUSE:
3839 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3840 		break;
3841 	case ICE_FC_TX_PAUSE:
3842 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3843 		break;
3844 	default:
3845 		break;
3846 	}
3847 
3848 	/* clear the old pause settings */
3849 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3850 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3851 
3852 	/* set the new capabilities */
3853 	cfg->caps |= pause_mask;
3854 
3855 	/* Cache user FC request */
3856 	cache_data.data.curr_user_fc_req = req_mode;
3857 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3858 
3859 	return 0;
3860 }
3861 
3862 /**
3863  * ice_set_fc
3864  * @pi: port information structure
3865  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3866  * @ena_auto_link_update: enable automatic link update
3867  *
3868  * Set the requested flow control mode.
3869  */
3870 int
3871 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3872 {
3873 	struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
3874 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3875 	struct ice_hw *hw;
3876 	int status;
3877 
3878 	if (!pi || !aq_failures)
3879 		return -EINVAL;
3880 
3881 	*aq_failures = 0;
3882 	hw = pi->hw;
3883 
3884 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3885 	if (!pcaps)
3886 		return -ENOMEM;
3887 
3888 	/* Get the current PHY config */
3889 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3890 				     pcaps, NULL);
3891 	if (status) {
3892 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3893 		goto out;
3894 	}
3895 
3896 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3897 
3898 	/* Configure the set PHY data */
3899 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3900 	if (status)
3901 		goto out;
3902 
3903 	/* If the capabilities have changed, then set the new config */
3904 	if (cfg.caps != pcaps->caps) {
3905 		int retry_count, retry_max = 10;
3906 
3907 		/* Auto restart link so settings take effect */
3908 		if (ena_auto_link_update)
3909 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3910 
3911 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3912 		if (status) {
3913 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3914 			goto out;
3915 		}
3916 
3917 		/* Update the link info
3918 		 * It sometimes takes a really long time for link to
3919 		 * come back from the atomic reset. Thus, we wait a
3920 		 * little bit.
3921 		 */
3922 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3923 			status = ice_update_link_info(pi);
3924 
3925 			if (!status)
3926 				break;
3927 
3928 			mdelay(100);
3929 		}
3930 
3931 		if (status)
3932 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3933 	}
3934 
3935 out:
3936 	return status;
3937 }
3938 
3939 /**
3940  * ice_phy_caps_equals_cfg
3941  * @phy_caps: PHY capabilities
3942  * @phy_cfg: PHY configuration
3943  *
3944  * Helper function to determine if PHY capabilities matches PHY
3945  * configuration
3946  */
3947 bool
3948 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3949 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3950 {
3951 	u8 caps_mask, cfg_mask;
3952 
3953 	if (!phy_caps || !phy_cfg)
3954 		return false;
3955 
3956 	/* These bits are not common between capabilities and configuration.
3957 	 * Do not use them to determine equality.
3958 	 */
3959 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3960 					      ICE_AQC_GET_PHY_EN_MOD_QUAL);
3961 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3962 
3963 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3964 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3965 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3966 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3967 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3968 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3969 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3970 		return false;
3971 
3972 	return true;
3973 }
3974 
3975 /**
3976  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3977  * @pi: port information structure
3978  * @caps: PHY ability structure to copy date from
3979  * @cfg: PHY configuration structure to copy data to
3980  *
3981  * Helper function to copy AQC PHY get ability data to PHY set configuration
3982  * data structure
3983  */
3984 void
3985 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3986 			 struct ice_aqc_get_phy_caps_data *caps,
3987 			 struct ice_aqc_set_phy_cfg_data *cfg)
3988 {
3989 	if (!pi || !caps || !cfg)
3990 		return;
3991 
3992 	memset(cfg, 0, sizeof(*cfg));
3993 	cfg->phy_type_low = caps->phy_type_low;
3994 	cfg->phy_type_high = caps->phy_type_high;
3995 	cfg->caps = caps->caps;
3996 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3997 	cfg->eee_cap = caps->eee_cap;
3998 	cfg->eeer_value = caps->eeer_value;
3999 	cfg->link_fec_opt = caps->link_fec_options;
4000 	cfg->module_compliance_enforcement =
4001 		caps->module_compliance_enforcement;
4002 }
4003 
4004 /**
4005  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
4006  * @pi: port information structure
4007  * @cfg: PHY configuration data to set FEC mode
4008  * @fec: FEC mode to configure
4009  */
4010 int
4011 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
4012 		enum ice_fec_mode fec)
4013 {
4014 	struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
4015 	struct ice_hw *hw;
4016 	int status;
4017 
4018 	if (!pi || !cfg)
4019 		return -EINVAL;
4020 
4021 	hw = pi->hw;
4022 
4023 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
4024 	if (!pcaps)
4025 		return -ENOMEM;
4026 
4027 	status = ice_aq_get_phy_caps(pi, false,
4028 				     (ice_fw_supports_report_dflt_cfg(hw) ?
4029 				      ICE_AQC_REPORT_DFLT_CFG :
4030 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
4031 	if (status)
4032 		goto out;
4033 
4034 	cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
4035 	cfg->link_fec_opt = pcaps->link_fec_options;
4036 
4037 	switch (fec) {
4038 	case ICE_FEC_BASER:
4039 		/* Clear RS bits, and AND BASE-R ability
4040 		 * bits and OR request bits.
4041 		 */
4042 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
4043 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
4044 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
4045 			ICE_AQC_PHY_FEC_25G_KR_REQ;
4046 		break;
4047 	case ICE_FEC_RS:
4048 		/* Clear BASE-R bits, and AND RS ability
4049 		 * bits and OR request bits.
4050 		 */
4051 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
4052 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
4053 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
4054 		break;
4055 	case ICE_FEC_NONE:
4056 		/* Clear all FEC option bits. */
4057 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
4058 		break;
4059 	case ICE_FEC_AUTO:
4060 		/* AND auto FEC bit, and all caps bits. */
4061 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
4062 		cfg->link_fec_opt |= pcaps->link_fec_options;
4063 		break;
4064 	default:
4065 		status = -EINVAL;
4066 		break;
4067 	}
4068 
4069 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
4070 	    !ice_fw_supports_report_dflt_cfg(hw)) {
4071 		struct ice_link_default_override_tlv tlv = { 0 };
4072 
4073 		status = ice_get_link_default_override(&tlv, pi);
4074 		if (status)
4075 			goto out;
4076 
4077 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
4078 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
4079 			cfg->link_fec_opt = tlv.fec_options;
4080 	}
4081 
4082 out:
4083 	return status;
4084 }
4085 
4086 /**
4087  * ice_get_link_status - get status of the HW network link
4088  * @pi: port information structure
4089  * @link_up: pointer to bool (true/false = linkup/linkdown)
4090  *
4091  * Variable link_up is true if link is up, false if link is down.
4092  * The variable link_up is invalid if status is non zero. As a
4093  * result of this call, link status reporting becomes enabled
4094  */
4095 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
4096 {
4097 	struct ice_phy_info *phy_info;
4098 	int status = 0;
4099 
4100 	if (!pi || !link_up)
4101 		return -EINVAL;
4102 
4103 	phy_info = &pi->phy;
4104 
4105 	if (phy_info->get_link_info) {
4106 		status = ice_update_link_info(pi);
4107 
4108 		if (status)
4109 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
4110 				  status);
4111 	}
4112 
4113 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
4114 
4115 	return status;
4116 }
4117 
4118 /**
4119  * ice_aq_set_link_restart_an
4120  * @pi: pointer to the port information structure
4121  * @ena_link: if true: enable link, if false: disable link
4122  * @cd: pointer to command details structure or NULL
4123  *
4124  * Sets up the link and restarts the Auto-Negotiation over the link.
4125  */
4126 int
4127 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
4128 			   struct ice_sq_cd *cd)
4129 {
4130 	struct ice_aqc_restart_an *cmd;
4131 	struct libie_aq_desc desc;
4132 
4133 	cmd = libie_aq_raw(&desc);
4134 
4135 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
4136 
4137 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
4138 	cmd->lport_num = pi->lport;
4139 	if (ena_link)
4140 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
4141 	else
4142 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
4143 
4144 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
4145 }
4146 
4147 /**
4148  * ice_aq_set_event_mask
4149  * @hw: pointer to the HW struct
4150  * @port_num: port number of the physical function
4151  * @mask: event mask to be set
4152  * @cd: pointer to command details structure or NULL
4153  *
4154  * Set event mask (0x0613)
4155  */
4156 int
4157 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
4158 		      struct ice_sq_cd *cd)
4159 {
4160 	struct ice_aqc_set_event_mask *cmd;
4161 	struct libie_aq_desc desc;
4162 
4163 	cmd = libie_aq_raw(&desc);
4164 
4165 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
4166 
4167 	cmd->lport_num = port_num;
4168 
4169 	cmd->event_mask = cpu_to_le16(mask);
4170 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4171 }
4172 
4173 /**
4174  * ice_aq_set_mac_loopback
4175  * @hw: pointer to the HW struct
4176  * @ena_lpbk: Enable or Disable loopback
4177  * @cd: pointer to command details structure or NULL
4178  *
4179  * Enable/disable loopback on a given port
4180  */
4181 int
4182 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
4183 {
4184 	struct ice_aqc_set_mac_lb *cmd;
4185 	struct libie_aq_desc desc;
4186 
4187 	cmd = libie_aq_raw(&desc);
4188 
4189 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
4190 	if (ena_lpbk)
4191 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
4192 
4193 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4194 }
4195 
4196 /**
4197  * ice_aq_set_port_id_led
4198  * @pi: pointer to the port information
4199  * @is_orig_mode: is this LED set to original mode (by the net-list)
4200  * @cd: pointer to command details structure or NULL
4201  *
4202  * Set LED value for the given port (0x06e9)
4203  */
4204 int
4205 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
4206 		       struct ice_sq_cd *cd)
4207 {
4208 	struct ice_aqc_set_port_id_led *cmd;
4209 	struct ice_hw *hw = pi->hw;
4210 	struct libie_aq_desc desc;
4211 
4212 	cmd = libie_aq_raw(&desc);
4213 
4214 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
4215 
4216 	if (is_orig_mode)
4217 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
4218 	else
4219 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
4220 
4221 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4222 }
4223 
4224 /**
4225  * ice_aq_get_port_options
4226  * @hw: pointer to the HW struct
4227  * @options: buffer for the resultant port options
4228  * @option_count: input - size of the buffer in port options structures,
4229  *                output - number of returned port options
4230  * @lport: logical port to call the command with (optional)
4231  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
4232  *               when PF owns more than 1 port it must be true
4233  * @active_option_idx: index of active port option in returned buffer
4234  * @active_option_valid: active option in returned buffer is valid
4235  * @pending_option_idx: index of pending port option in returned buffer
4236  * @pending_option_valid: pending option in returned buffer is valid
4237  *
4238  * Calls Get Port Options AQC (0x06ea) and verifies result.
4239  */
4240 int
4241 ice_aq_get_port_options(struct ice_hw *hw,
4242 			struct ice_aqc_get_port_options_elem *options,
4243 			u8 *option_count, u8 lport, bool lport_valid,
4244 			u8 *active_option_idx, bool *active_option_valid,
4245 			u8 *pending_option_idx, bool *pending_option_valid)
4246 {
4247 	struct ice_aqc_get_port_options *cmd;
4248 	struct libie_aq_desc desc;
4249 	int status;
4250 	u8 i;
4251 
4252 	/* options buffer shall be able to hold max returned options */
4253 	if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
4254 		return -EINVAL;
4255 
4256 	cmd = libie_aq_raw(&desc);
4257 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
4258 
4259 	if (lport_valid)
4260 		cmd->lport_num = lport;
4261 	cmd->lport_num_valid = lport_valid;
4262 
4263 	status = ice_aq_send_cmd(hw, &desc, options,
4264 				 *option_count * sizeof(*options), NULL);
4265 	if (status)
4266 		return status;
4267 
4268 	/* verify direct FW response & set output parameters */
4269 	*option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
4270 				  cmd->port_options_count);
4271 	ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
4272 	*active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
4273 					 cmd->port_options);
4274 	if (*active_option_valid) {
4275 		*active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
4276 					       cmd->port_options);
4277 		if (*active_option_idx > (*option_count - 1))
4278 			return -EIO;
4279 		ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
4280 			  *active_option_idx);
4281 	}
4282 
4283 	*pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
4284 					  cmd->pending_port_option_status);
4285 	if (*pending_option_valid) {
4286 		*pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
4287 						cmd->pending_port_option_status);
4288 		if (*pending_option_idx > (*option_count - 1))
4289 			return -EIO;
4290 		ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
4291 			  *pending_option_idx);
4292 	}
4293 
4294 	/* mask output options fields */
4295 	for (i = 0; i < *option_count; i++) {
4296 		options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
4297 					   options[i].pmd);
4298 		options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
4299 						      options[i].max_lane_speed);
4300 		ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
4301 			  options[i].pmd, options[i].max_lane_speed);
4302 	}
4303 
4304 	return 0;
4305 }
4306 
4307 /**
4308  * ice_aq_set_port_option
4309  * @hw: pointer to the HW struct
4310  * @lport: logical port to call the command with
4311  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
4312  *               when PF owns more than 1 port it must be true
4313  * @new_option: new port option to be written
4314  *
4315  * Calls Set Port Options AQC (0x06eb).
4316  */
4317 int
4318 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
4319 		       u8 new_option)
4320 {
4321 	struct ice_aqc_set_port_option *cmd;
4322 	struct libie_aq_desc desc;
4323 
4324 	if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
4325 		return -EINVAL;
4326 
4327 	cmd = libie_aq_raw(&desc);
4328 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
4329 
4330 	if (lport_valid)
4331 		cmd->lport_num = lport;
4332 
4333 	cmd->lport_num_valid = lport_valid;
4334 	cmd->selected_port_option = new_option;
4335 
4336 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4337 }
4338 
4339 /**
4340  * ice_get_phy_lane_number - Get PHY lane number for current adapter
4341  * @hw: pointer to the hw struct
4342  *
4343  * Return: PHY lane number on success, negative error code otherwise.
4344  */
4345 int ice_get_phy_lane_number(struct ice_hw *hw)
4346 {
4347 	struct ice_aqc_get_port_options_elem *options;
4348 	unsigned int lport = 0;
4349 	unsigned int lane;
4350 	int err;
4351 
4352 	/* E82X does not have sequential IDs, lane number is PF ID.
4353 	 * For E825 device, the exception is the variant with external
4354 	 * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
4355 	 * mapping.
4356 	 */
4357 	if (hw->mac_type == ICE_MAC_GENERIC ||
4358 	    hw->device_id == ICE_DEV_ID_E825C_SGMII)
4359 		return hw->pf_id;
4360 
4361 	options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
4362 	if (!options)
4363 		return -ENOMEM;
4364 
4365 	for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
4366 		u8 options_count = ICE_AQC_PORT_OPT_MAX;
4367 		u8 speed, active_idx, pending_idx;
4368 		bool active_valid, pending_valid;
4369 
4370 		err = ice_aq_get_port_options(hw, options, &options_count, lane,
4371 					      true, &active_idx, &active_valid,
4372 					      &pending_idx, &pending_valid);
4373 		if (err)
4374 			goto err;
4375 
4376 		if (!active_valid)
4377 			continue;
4378 
4379 		speed = options[active_idx].max_lane_speed;
4380 		/* If we don't get speed for this lane, it's unoccupied */
4381 		if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
4382 			continue;
4383 
4384 		if (hw->pf_id == lport) {
4385 			if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
4386 			    ice_is_dual(hw) && !ice_is_primary(hw))
4387 				lane += ICE_PORTS_PER_QUAD;
4388 			kfree(options);
4389 			return lane;
4390 		}
4391 		lport++;
4392 	}
4393 
4394 	/* PHY lane not found */
4395 	err = -ENXIO;
4396 err:
4397 	kfree(options);
4398 	return err;
4399 }
4400 
4401 /**
4402  * ice_aq_sff_eeprom
4403  * @hw: pointer to the HW struct
4404  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
4405  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
4406  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
4407  * @page: QSFP page
4408  * @set_page: set or ignore the page
4409  * @data: pointer to data buffer to be read/written to the I2C device.
4410  * @length: 1-16 for read, 1 for write.
4411  * @write: 0 read, 1 for write.
4412  * @cd: pointer to command details structure or NULL
4413  *
4414  * Read/Write SFF EEPROM (0x06EE)
4415  */
4416 int
4417 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
4418 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
4419 		  bool write, struct ice_sq_cd *cd)
4420 {
4421 	struct ice_aqc_sff_eeprom *cmd;
4422 	struct libie_aq_desc desc;
4423 	u16 i2c_bus_addr;
4424 	int status;
4425 
4426 	if (!data || (mem_addr & 0xff00))
4427 		return -EINVAL;
4428 
4429 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
4430 	cmd = libie_aq_raw(&desc);
4431 	desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_RD);
4432 	cmd->lport_num = (u8)(lport & 0xff);
4433 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
4434 	i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
4435 		       FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
4436 	if (write)
4437 		i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
4438 	cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
4439 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
4440 	cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
4441 
4442 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
4443 	return status;
4444 }
4445 
4446 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
4447 {
4448 	switch (type) {
4449 	case ICE_LUT_VSI:
4450 		return ICE_LUT_VSI_SIZE;
4451 	case ICE_LUT_GLOBAL:
4452 		return ICE_LUT_GLOBAL_SIZE;
4453 	case ICE_LUT_PF:
4454 		return ICE_LUT_PF_SIZE;
4455 	}
4456 	WARN_ONCE(1, "incorrect type passed");
4457 	return ICE_LUT_VSI_SIZE;
4458 }
4459 
4460 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
4461 {
4462 	switch (size) {
4463 	case ICE_LUT_VSI_SIZE:
4464 		return ICE_AQC_LUT_SIZE_SMALL;
4465 	case ICE_LUT_GLOBAL_SIZE:
4466 		return ICE_AQC_LUT_SIZE_512;
4467 	case ICE_LUT_PF_SIZE:
4468 		return ICE_AQC_LUT_SIZE_2K;
4469 	}
4470 	WARN_ONCE(1, "incorrect size passed");
4471 	return 0;
4472 }
4473 
4474 /**
4475  * __ice_aq_get_set_rss_lut
4476  * @hw: pointer to the hardware structure
4477  * @params: RSS LUT parameters
4478  * @set: set true to set the table, false to get the table
4479  *
4480  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4481  */
4482 static int
4483 __ice_aq_get_set_rss_lut(struct ice_hw *hw,
4484 			 struct ice_aq_get_set_rss_lut_params *params, bool set)
4485 {
4486 	u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
4487 	enum ice_lut_type lut_type = params->lut_type;
4488 	struct ice_aqc_get_set_rss_lut *desc_params;
4489 	enum ice_aqc_lut_flags flags;
4490 	enum ice_lut_size lut_size;
4491 	struct libie_aq_desc desc;
4492 	u8 *lut = params->lut;
4493 
4494 
4495 	if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
4496 		return -EINVAL;
4497 
4498 	lut_size = ice_lut_type_to_size(lut_type);
4499 	if (lut_size > params->lut_size)
4500 		return -EINVAL;
4501 	else if (set && lut_size != params->lut_size)
4502 		return -EINVAL;
4503 
4504 	opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
4505 	ice_fill_dflt_direct_cmd_desc(&desc, opcode);
4506 	if (set)
4507 		desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4508 
4509 	desc_params = libie_aq_raw(&desc);
4510 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4511 	desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
4512 
4513 	if (lut_type == ICE_LUT_GLOBAL)
4514 		glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
4515 					  params->global_lut_id);
4516 
4517 	flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
4518 	desc_params->flags = cpu_to_le16(flags);
4519 
4520 	return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4521 }
4522 
4523 /**
4524  * ice_aq_get_rss_lut
4525  * @hw: pointer to the hardware structure
4526  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4527  *
4528  * get the RSS lookup table, PF or VSI type
4529  */
4530 int
4531 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4532 {
4533 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
4534 }
4535 
4536 /**
4537  * ice_aq_set_rss_lut
4538  * @hw: pointer to the hardware structure
4539  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4540  *
4541  * set the RSS lookup table, PF or VSI type
4542  */
4543 int
4544 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4545 {
4546 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
4547 }
4548 
4549 /**
4550  * __ice_aq_get_set_rss_key
4551  * @hw: pointer to the HW struct
4552  * @vsi_id: VSI FW index
4553  * @key: pointer to key info struct
4554  * @set: set true to set the key, false to get the key
4555  *
4556  * get (0x0B04) or set (0x0B02) the RSS key per VSI
4557  */
4558 static int
4559 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4560 			 struct ice_aqc_get_set_rss_keys *key, bool set)
4561 {
4562 	struct ice_aqc_get_set_rss_key *desc_params;
4563 	u16 key_size = sizeof(*key);
4564 	struct libie_aq_desc desc;
4565 
4566 	if (set) {
4567 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4568 		desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4569 	} else {
4570 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4571 	}
4572 
4573 	desc_params = libie_aq_raw(&desc);
4574 	desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
4575 
4576 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4577 }
4578 
4579 /**
4580  * ice_aq_get_rss_key
4581  * @hw: pointer to the HW struct
4582  * @vsi_handle: software VSI handle
4583  * @key: pointer to key info struct
4584  *
4585  * get the RSS key per VSI
4586  */
4587 int
4588 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4589 		   struct ice_aqc_get_set_rss_keys *key)
4590 {
4591 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4592 		return -EINVAL;
4593 
4594 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4595 					key, false);
4596 }
4597 
4598 /**
4599  * ice_aq_set_rss_key
4600  * @hw: pointer to the HW struct
4601  * @vsi_handle: software VSI handle
4602  * @keys: pointer to key info struct
4603  *
4604  * set the RSS key per VSI
4605  */
4606 int
4607 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4608 		   struct ice_aqc_get_set_rss_keys *keys)
4609 {
4610 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4611 		return -EINVAL;
4612 
4613 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4614 					keys, true);
4615 }
4616 
4617 /**
4618  * ice_aq_add_lan_txq
4619  * @hw: pointer to the hardware structure
4620  * @num_qgrps: Number of added queue groups
4621  * @qg_list: list of queue groups to be added
4622  * @buf_size: size of buffer for indirect command
4623  * @cd: pointer to command details structure or NULL
4624  *
4625  * Add Tx LAN queue (0x0C30)
4626  *
4627  * NOTE:
4628  * Prior to calling add Tx LAN queue:
4629  * Initialize the following as part of the Tx queue context:
4630  * Completion queue ID if the queue uses Completion queue, Quanta profile,
4631  * Cache profile and Packet shaper profile.
4632  *
4633  * After add Tx LAN queue AQ command is completed:
4634  * Interrupts should be associated with specific queues,
4635  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4636  * flow.
4637  */
4638 static int
4639 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4640 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4641 		   struct ice_sq_cd *cd)
4642 {
4643 	struct ice_aqc_add_tx_qgrp *list;
4644 	struct ice_aqc_add_txqs *cmd;
4645 	struct libie_aq_desc desc;
4646 	u16 i, sum_size = 0;
4647 
4648 	cmd = libie_aq_raw(&desc);
4649 
4650 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4651 
4652 	if (!qg_list)
4653 		return -EINVAL;
4654 
4655 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4656 		return -EINVAL;
4657 
4658 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
4659 		sum_size += struct_size(list, txqs, list->num_txqs);
4660 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4661 						      list->num_txqs);
4662 	}
4663 
4664 	if (buf_size != sum_size)
4665 		return -EINVAL;
4666 
4667 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4668 
4669 	cmd->num_qgrps = num_qgrps;
4670 
4671 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4672 }
4673 
4674 /**
4675  * ice_aq_dis_lan_txq
4676  * @hw: pointer to the hardware structure
4677  * @num_qgrps: number of groups in the list
4678  * @qg_list: the list of groups to disable
4679  * @buf_size: the total size of the qg_list buffer in bytes
4680  * @rst_src: if called due to reset, specifies the reset source
4681  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4682  * @cd: pointer to command details structure or NULL
4683  *
4684  * Disable LAN Tx queue (0x0C31)
4685  */
4686 static int
4687 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4688 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4689 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
4690 		   struct ice_sq_cd *cd)
4691 {
4692 	struct ice_aqc_dis_txq_item *item;
4693 	struct ice_aqc_dis_txqs *cmd;
4694 	struct libie_aq_desc desc;
4695 	u16 vmvf_and_timeout;
4696 	u16 i, sz = 0;
4697 	int status;
4698 
4699 	cmd = libie_aq_raw(&desc);
4700 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4701 
4702 	/* qg_list can be NULL only in VM/VF reset flow */
4703 	if (!qg_list && !rst_src)
4704 		return -EINVAL;
4705 
4706 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4707 		return -EINVAL;
4708 
4709 	cmd->num_entries = num_qgrps;
4710 
4711 	vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
4712 
4713 	switch (rst_src) {
4714 	case ICE_VM_RESET:
4715 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4716 		vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
4717 		break;
4718 	case ICE_VF_RESET:
4719 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4720 		/* In this case, FW expects vmvf_num to be absolute VF ID */
4721 		vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
4722 				    ICE_AQC_Q_DIS_VMVF_NUM_M;
4723 		break;
4724 	case ICE_NO_RESET:
4725 	default:
4726 		break;
4727 	}
4728 
4729 	cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
4730 
4731 	/* flush pipe on time out */
4732 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4733 	/* If no queue group info, we are in a reset flow. Issue the AQ */
4734 	if (!qg_list)
4735 		goto do_aq;
4736 
4737 	/* set RD bit to indicate that command buffer is provided by the driver
4738 	 * and it needs to be read by the firmware
4739 	 */
4740 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4741 
4742 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
4743 		u16 item_size = struct_size(item, q_id, item->num_qs);
4744 
4745 		/* If the num of queues is even, add 2 bytes of padding */
4746 		if ((item->num_qs % 2) == 0)
4747 			item_size += 2;
4748 
4749 		sz += item_size;
4750 
4751 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4752 	}
4753 
4754 	if (buf_size != sz)
4755 		return -EINVAL;
4756 
4757 do_aq:
4758 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4759 	if (status) {
4760 		if (!qg_list)
4761 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4762 				  vmvf_num, hw->adminq.sq_last_status);
4763 		else
4764 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4765 				  le16_to_cpu(qg_list[0].q_id[0]),
4766 				  hw->adminq.sq_last_status);
4767 	}
4768 	return status;
4769 }
4770 
4771 /**
4772  * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
4773  * @hw: pointer to the hardware structure
4774  * @buf: buffer for command
4775  * @buf_size: size of buffer in bytes
4776  * @num_qs: number of queues being configured
4777  * @oldport: origination lport
4778  * @newport: destination lport
4779  * @mode: cmd_type for move to use
4780  * @cd: pointer to command details structure or NULL
4781  *
4782  * Move/Configure LAN Tx queue (0x0C32)
4783  *
4784  * Return: Zero on success, associated error code on failure.
4785  */
4786 int
4787 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
4788 		   u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
4789 		   u8 mode, struct ice_sq_cd *cd)
4790 {
4791 	struct ice_aqc_cfg_txqs *cmd;
4792 	struct libie_aq_desc desc;
4793 	int status;
4794 
4795 	cmd = libie_aq_raw(&desc);
4796 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
4797 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4798 
4799 	if (!buf)
4800 		return -EINVAL;
4801 
4802 	cmd->cmd_type = mode;
4803 	cmd->num_qs = num_qs;
4804 	cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
4805 	cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
4806 	cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
4807 					 ICE_AQC_Q_CFG_MODE_KEEP_OWN);
4808 	cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
4809 	cmd->blocked_cgds = 0;
4810 
4811 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4812 	if (status)
4813 		ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
4814 			  hw->adminq.sq_last_status);
4815 	return status;
4816 }
4817 
4818 /**
4819  * ice_aq_add_rdma_qsets
4820  * @hw: pointer to the hardware structure
4821  * @num_qset_grps: Number of RDMA Qset groups
4822  * @qset_list: list of Qset groups to be added
4823  * @buf_size: size of buffer for indirect command
4824  * @cd: pointer to command details structure or NULL
4825  *
4826  * Add Tx RDMA Qsets (0x0C33)
4827  */
4828 static int
4829 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4830 		      struct ice_aqc_add_rdma_qset_data *qset_list,
4831 		      u16 buf_size, struct ice_sq_cd *cd)
4832 {
4833 	struct ice_aqc_add_rdma_qset_data *list;
4834 	struct ice_aqc_add_rdma_qset *cmd;
4835 	struct libie_aq_desc desc;
4836 	u16 i, sum_size = 0;
4837 
4838 	cmd = libie_aq_raw(&desc);
4839 
4840 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4841 
4842 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4843 		return -EINVAL;
4844 
4845 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4846 		u16 num_qsets = le16_to_cpu(list->num_qsets);
4847 
4848 		sum_size += struct_size(list, rdma_qsets, num_qsets);
4849 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4850 							     num_qsets);
4851 	}
4852 
4853 	if (buf_size != sum_size)
4854 		return -EINVAL;
4855 
4856 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4857 
4858 	cmd->num_qset_grps = num_qset_grps;
4859 
4860 	return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4861 }
4862 
4863 /**
4864  * ice_aq_set_txtimeq - set Tx time queues
4865  * @hw: pointer to the hardware structure
4866  * @txtimeq: first Tx time queue id to configure
4867  * @q_count: number of queues to configure
4868  * @txtime_qg: queue group to be set
4869  * @buf_size: size of buffer for indirect command
4870  * @cd: pointer to command details structure or NULL
4871  *
4872  * Set Tx Time queue (0x0C35)
4873  * Return: 0 on success or negative value on failure.
4874  */
4875 int
4876 ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
4877 		   struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
4878 		   struct ice_sq_cd *cd)
4879 {
4880 	struct ice_aqc_set_txtimeqs *cmd;
4881 	struct libie_aq_desc desc;
4882 	u16 size;
4883 
4884 	if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
4885 	    q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
4886 		return -EINVAL;
4887 
4888 	size = struct_size(txtime_qg, txtimeqs, q_count);
4889 	if (buf_size != size)
4890 		return -EINVAL;
4891 
4892 	cmd = libie_aq_raw(&desc);
4893 
4894 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
4895 
4896 	desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
4897 
4898 	cmd->q_id = cpu_to_le16(txtimeq);
4899 	cmd->q_amount = cpu_to_le16(q_count);
4900 	return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
4901 }
4902 
4903 /* End of FW Admin Queue command wrappers */
4904 
4905 /**
4906  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4907  * @hw: pointer to the HW struct
4908  * @vsi_handle: software VSI handle
4909  * @tc: TC number
4910  * @q_handle: software queue handle
4911  */
4912 struct ice_q_ctx *
4913 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4914 {
4915 	struct ice_vsi_ctx *vsi;
4916 	struct ice_q_ctx *q_ctx;
4917 
4918 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4919 	if (!vsi)
4920 		return NULL;
4921 	if (q_handle >= vsi->num_lan_q_entries[tc])
4922 		return NULL;
4923 	if (!vsi->lan_q_ctx[tc])
4924 		return NULL;
4925 	q_ctx = vsi->lan_q_ctx[tc];
4926 	return &q_ctx[q_handle];
4927 }
4928 
4929 /**
4930  * ice_ena_vsi_txq
4931  * @pi: port information structure
4932  * @vsi_handle: software VSI handle
4933  * @tc: TC number
4934  * @q_handle: software queue handle
4935  * @num_qgrps: Number of added queue groups
4936  * @buf: list of queue groups to be added
4937  * @buf_size: size of buffer for indirect command
4938  * @cd: pointer to command details structure or NULL
4939  *
4940  * This function adds one LAN queue
4941  */
4942 int
4943 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4944 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4945 		struct ice_sq_cd *cd)
4946 {
4947 	struct ice_aqc_txsched_elem_data node = { 0 };
4948 	struct ice_sched_node *parent;
4949 	struct ice_q_ctx *q_ctx;
4950 	struct ice_hw *hw;
4951 	int status;
4952 
4953 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4954 		return -EIO;
4955 
4956 	if (num_qgrps > 1 || buf->num_txqs > 1)
4957 		return -ENOSPC;
4958 
4959 	hw = pi->hw;
4960 
4961 	if (!ice_is_vsi_valid(hw, vsi_handle))
4962 		return -EINVAL;
4963 
4964 	mutex_lock(&pi->sched_lock);
4965 
4966 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4967 	if (!q_ctx) {
4968 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4969 			  q_handle);
4970 		status = -EINVAL;
4971 		goto ena_txq_exit;
4972 	}
4973 
4974 	/* find a parent node */
4975 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4976 					    ICE_SCHED_NODE_OWNER_LAN);
4977 	if (!parent) {
4978 		status = -EINVAL;
4979 		goto ena_txq_exit;
4980 	}
4981 
4982 	buf->parent_teid = parent->info.node_teid;
4983 	node.parent_teid = parent->info.node_teid;
4984 	/* Mark that the values in the "generic" section as valid. The default
4985 	 * value in the "generic" section is zero. This means that :
4986 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4987 	 * - 0 priority among siblings, indicated by Bit 1-3.
4988 	 * - WFQ, indicated by Bit 4.
4989 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4990 	 * Bit 5-6.
4991 	 * - Bit 7 is reserved.
4992 	 * Without setting the generic section as valid in valid_sections, the
4993 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4994 	 */
4995 	buf->txqs[0].info.valid_sections =
4996 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4997 		ICE_AQC_ELEM_VALID_EIR;
4998 	buf->txqs[0].info.generic = 0;
4999 	buf->txqs[0].info.cir_bw.bw_profile_idx =
5000 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5001 	buf->txqs[0].info.cir_bw.bw_alloc =
5002 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5003 	buf->txqs[0].info.eir_bw.bw_profile_idx =
5004 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5005 	buf->txqs[0].info.eir_bw.bw_alloc =
5006 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5007 
5008 	/* add the LAN queue */
5009 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5010 	if (status) {
5011 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5012 			  le16_to_cpu(buf->txqs[0].txq_id),
5013 			  hw->adminq.sq_last_status);
5014 		goto ena_txq_exit;
5015 	}
5016 
5017 	node.node_teid = buf->txqs[0].q_teid;
5018 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5019 	q_ctx->q_handle = q_handle;
5020 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
5021 
5022 	/* add a leaf node into scheduler tree queue layer */
5023 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
5024 	if (!status)
5025 		status = ice_sched_replay_q_bw(pi, q_ctx);
5026 
5027 ena_txq_exit:
5028 	mutex_unlock(&pi->sched_lock);
5029 	return status;
5030 }
5031 
5032 /**
5033  * ice_dis_vsi_txq
5034  * @pi: port information structure
5035  * @vsi_handle: software VSI handle
5036  * @tc: TC number
5037  * @num_queues: number of queues
5038  * @q_handles: pointer to software queue handle array
5039  * @q_ids: pointer to the q_id array
5040  * @q_teids: pointer to queue node teids
5041  * @rst_src: if called due to reset, specifies the reset source
5042  * @vmvf_num: the relative VM or VF number that is undergoing the reset
5043  * @cd: pointer to command details structure or NULL
5044  *
5045  * This function removes queues and their corresponding nodes in SW DB
5046  */
5047 int
5048 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5049 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
5050 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
5051 		struct ice_sq_cd *cd)
5052 {
5053 	DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
5054 	u16 i, buf_size = __struct_size(qg_list);
5055 	struct ice_q_ctx *q_ctx;
5056 	int status = -ENOENT;
5057 	struct ice_hw *hw;
5058 
5059 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5060 		return -EIO;
5061 
5062 	hw = pi->hw;
5063 
5064 	if (!num_queues) {
5065 		/* if queue is disabled already yet the disable queue command
5066 		 * has to be sent to complete the VF reset, then call
5067 		 * ice_aq_dis_lan_txq without any queue information
5068 		 */
5069 		if (rst_src)
5070 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5071 						  vmvf_num, NULL);
5072 		return -EIO;
5073 	}
5074 
5075 	mutex_lock(&pi->sched_lock);
5076 
5077 	for (i = 0; i < num_queues; i++) {
5078 		struct ice_sched_node *node;
5079 
5080 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5081 		if (!node)
5082 			continue;
5083 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5084 		if (!q_ctx) {
5085 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5086 				  q_handles[i]);
5087 			continue;
5088 		}
5089 		if (q_ctx->q_handle != q_handles[i]) {
5090 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5091 				  q_ctx->q_handle, q_handles[i]);
5092 			continue;
5093 		}
5094 		qg_list->parent_teid = node->info.parent_teid;
5095 		qg_list->num_qs = 1;
5096 		qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
5097 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5098 					    vmvf_num, cd);
5099 
5100 		if (status)
5101 			break;
5102 		ice_free_sched_node(pi, node);
5103 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5104 		q_ctx->q_teid = ICE_INVAL_TEID;
5105 	}
5106 	mutex_unlock(&pi->sched_lock);
5107 	return status;
5108 }
5109 
5110 /**
5111  * ice_cfg_vsi_qs - configure the new/existing VSI queues
5112  * @pi: port information structure
5113  * @vsi_handle: software VSI handle
5114  * @tc_bitmap: TC bitmap
5115  * @maxqs: max queues array per TC
5116  * @owner: LAN or RDMA
5117  *
5118  * This function adds/updates the VSI queues per TC.
5119  */
5120 static int
5121 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5122 	       u16 *maxqs, u8 owner)
5123 {
5124 	int status = 0;
5125 	u8 i;
5126 
5127 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5128 		return -EIO;
5129 
5130 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5131 		return -EINVAL;
5132 
5133 	mutex_lock(&pi->sched_lock);
5134 
5135 	ice_for_each_traffic_class(i) {
5136 		/* configuration is possible only if TC node is present */
5137 		if (!ice_sched_get_tc_node(pi, i))
5138 			continue;
5139 
5140 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5141 					   ice_is_tc_ena(tc_bitmap, i));
5142 		if (status)
5143 			break;
5144 	}
5145 
5146 	mutex_unlock(&pi->sched_lock);
5147 	return status;
5148 }
5149 
5150 /**
5151  * ice_cfg_vsi_lan - configure VSI LAN queues
5152  * @pi: port information structure
5153  * @vsi_handle: software VSI handle
5154  * @tc_bitmap: TC bitmap
5155  * @max_lanqs: max LAN queues array per TC
5156  *
5157  * This function adds/updates the VSI LAN queues per TC.
5158  */
5159 int
5160 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5161 		u16 *max_lanqs)
5162 {
5163 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5164 			      ICE_SCHED_NODE_OWNER_LAN);
5165 }
5166 
5167 /**
5168  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5169  * @pi: port information structure
5170  * @vsi_handle: software VSI handle
5171  * @tc_bitmap: TC bitmap
5172  * @max_rdmaqs: max RDMA queues array per TC
5173  *
5174  * This function adds/updates the VSI RDMA queues per TC.
5175  */
5176 int
5177 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5178 		 u16 *max_rdmaqs)
5179 {
5180 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5181 			      ICE_SCHED_NODE_OWNER_RDMA);
5182 }
5183 
5184 /**
5185  * ice_ena_vsi_rdma_qset
5186  * @pi: port information structure
5187  * @vsi_handle: software VSI handle
5188  * @tc: TC number
5189  * @rdma_qset: pointer to RDMA Qset
5190  * @num_qsets: number of RDMA Qsets
5191  * @qset_teid: pointer to Qset node TEIDs
5192  *
5193  * This function adds RDMA Qset
5194  */
5195 int
5196 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5197 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5198 {
5199 	struct ice_aqc_txsched_elem_data node = { 0 };
5200 	struct ice_aqc_add_rdma_qset_data *buf;
5201 	struct ice_sched_node *parent;
5202 	struct ice_hw *hw;
5203 	u16 i, buf_size;
5204 	int ret;
5205 
5206 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5207 		return -EIO;
5208 	hw = pi->hw;
5209 
5210 	if (!ice_is_vsi_valid(hw, vsi_handle))
5211 		return -EINVAL;
5212 
5213 	buf_size = struct_size(buf, rdma_qsets, num_qsets);
5214 	buf = kzalloc(buf_size, GFP_KERNEL);
5215 	if (!buf)
5216 		return -ENOMEM;
5217 	mutex_lock(&pi->sched_lock);
5218 
5219 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5220 					    ICE_SCHED_NODE_OWNER_RDMA);
5221 	if (!parent) {
5222 		ret = -EINVAL;
5223 		goto rdma_error_exit;
5224 	}
5225 	buf->parent_teid = parent->info.node_teid;
5226 	node.parent_teid = parent->info.node_teid;
5227 
5228 	buf->num_qsets = cpu_to_le16(num_qsets);
5229 	for (i = 0; i < num_qsets; i++) {
5230 		buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
5231 		buf->rdma_qsets[i].info.valid_sections =
5232 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5233 			ICE_AQC_ELEM_VALID_EIR;
5234 		buf->rdma_qsets[i].info.generic = 0;
5235 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5236 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5237 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5238 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5239 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5240 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5241 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5242 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5243 	}
5244 	ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5245 	if (ret) {
5246 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5247 		goto rdma_error_exit;
5248 	}
5249 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5250 	for (i = 0; i < num_qsets; i++) {
5251 		node.node_teid = buf->rdma_qsets[i].qset_teid;
5252 		ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5253 					 &node, NULL);
5254 		if (ret)
5255 			break;
5256 		qset_teid[i] = le32_to_cpu(node.node_teid);
5257 	}
5258 rdma_error_exit:
5259 	mutex_unlock(&pi->sched_lock);
5260 	kfree(buf);
5261 	return ret;
5262 }
5263 
5264 /**
5265  * ice_dis_vsi_rdma_qset - free RDMA resources
5266  * @pi: port_info struct
5267  * @count: number of RDMA Qsets to free
5268  * @qset_teid: TEID of Qset node
5269  * @q_id: list of queue IDs being disabled
5270  */
5271 int
5272 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5273 		      u16 *q_id)
5274 {
5275 	DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
5276 	u16 qg_size = __struct_size(qg_list);
5277 	struct ice_hw *hw;
5278 	int status = 0;
5279 	int i;
5280 
5281 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5282 		return -EIO;
5283 
5284 	hw = pi->hw;
5285 
5286 	mutex_lock(&pi->sched_lock);
5287 
5288 	for (i = 0; i < count; i++) {
5289 		struct ice_sched_node *node;
5290 
5291 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5292 		if (!node)
5293 			continue;
5294 
5295 		qg_list->parent_teid = node->info.parent_teid;
5296 		qg_list->num_qs = 1;
5297 		qg_list->q_id[0] =
5298 			cpu_to_le16(q_id[i] |
5299 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5300 
5301 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5302 					    ICE_NO_RESET, 0, NULL);
5303 		if (status)
5304 			break;
5305 
5306 		ice_free_sched_node(pi, node);
5307 	}
5308 
5309 	mutex_unlock(&pi->sched_lock);
5310 	return status;
5311 }
5312 
5313 /**
5314  * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
5315  * @hw: pointer to the HW struct
5316  * @dpll_idx: index of dpll to be measured
5317  * @meas: array to be filled with results
5318  * @meas_num: max number of results array can hold
5319  *
5320  * Get CGU measurements (0x0C59) of phase and frequency offsets for input
5321  * pins on given dpll.
5322  *
5323  * Return: 0 on success or negative value on failure.
5324  */
5325 int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
5326 				     struct ice_cgu_input_measure *meas,
5327 				     u16 meas_num)
5328 {
5329 	struct ice_aqc_get_cgu_input_measure *cmd;
5330 	struct libie_aq_desc desc;
5331 
5332 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
5333 	cmd = libie_aq_raw(&desc);
5334 	cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
5335 
5336 	return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
5337 }
5338 
5339 /**
5340  * ice_aq_get_cgu_abilities - get cgu abilities
5341  * @hw: pointer to the HW struct
5342  * @abilities: CGU abilities
5343  *
5344  * Get CGU abilities (0x0C61)
5345  * Return: 0 on success or negative value on failure.
5346  */
5347 int
5348 ice_aq_get_cgu_abilities(struct ice_hw *hw,
5349 			 struct ice_aqc_get_cgu_abilities *abilities)
5350 {
5351 	struct libie_aq_desc desc;
5352 
5353 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
5354 	return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
5355 }
5356 
5357 /**
5358  * ice_aq_set_input_pin_cfg - set input pin config
5359  * @hw: pointer to the HW struct
5360  * @input_idx: Input index
5361  * @flags1: Input flags
5362  * @flags2: Input flags
5363  * @freq: Frequency in Hz
5364  * @phase_delay: Delay in ps
5365  *
5366  * Set CGU input config (0x0C62)
5367  * Return: 0 on success or negative value on failure.
5368  */
5369 int
5370 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
5371 			 u32 freq, s32 phase_delay)
5372 {
5373 	struct ice_aqc_set_cgu_input_config *cmd;
5374 	struct libie_aq_desc desc;
5375 
5376 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
5377 	cmd = libie_aq_raw(&desc);
5378 	cmd->input_idx = input_idx;
5379 	cmd->flags1 = flags1;
5380 	cmd->flags2 = flags2;
5381 	cmd->freq = cpu_to_le32(freq);
5382 	cmd->phase_delay = cpu_to_le32(phase_delay);
5383 
5384 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5385 }
5386 
5387 /**
5388  * ice_aq_get_input_pin_cfg - get input pin config
5389  * @hw: pointer to the HW struct
5390  * @input_idx: Input index
5391  * @status: Pin status
5392  * @type: Pin type
5393  * @flags1: Input flags
5394  * @flags2: Input flags
5395  * @freq: Frequency in Hz
5396  * @phase_delay: Delay in ps
5397  *
5398  * Get CGU input config (0x0C63)
5399  * Return: 0 on success or negative value on failure.
5400  */
5401 int
5402 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
5403 			 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
5404 {
5405 	struct ice_aqc_get_cgu_input_config *cmd;
5406 	struct libie_aq_desc desc;
5407 	int ret;
5408 
5409 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
5410 	cmd = libie_aq_raw(&desc);
5411 	cmd->input_idx = input_idx;
5412 
5413 	ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5414 	if (!ret) {
5415 		if (status)
5416 			*status = cmd->status;
5417 		if (type)
5418 			*type = cmd->type;
5419 		if (flags1)
5420 			*flags1 = cmd->flags1;
5421 		if (flags2)
5422 			*flags2 = cmd->flags2;
5423 		if (freq)
5424 			*freq = le32_to_cpu(cmd->freq);
5425 		if (phase_delay)
5426 			*phase_delay = le32_to_cpu(cmd->phase_delay);
5427 	}
5428 
5429 	return ret;
5430 }
5431 
5432 /**
5433  * ice_aq_set_output_pin_cfg - set output pin config
5434  * @hw: pointer to the HW struct
5435  * @output_idx: Output index
5436  * @flags: Output flags
5437  * @src_sel: Index of DPLL block
5438  * @freq: Output frequency
5439  * @phase_delay: Output phase compensation
5440  *
5441  * Set CGU output config (0x0C64)
5442  * Return: 0 on success or negative value on failure.
5443  */
5444 int
5445 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
5446 			  u8 src_sel, u32 freq, s32 phase_delay)
5447 {
5448 	struct ice_aqc_set_cgu_output_config *cmd;
5449 	struct libie_aq_desc desc;
5450 
5451 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
5452 	cmd = libie_aq_raw(&desc);
5453 	cmd->output_idx = output_idx;
5454 	cmd->flags = flags;
5455 	cmd->src_sel = src_sel;
5456 	cmd->freq = cpu_to_le32(freq);
5457 	cmd->phase_delay = cpu_to_le32(phase_delay);
5458 
5459 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5460 }
5461 
5462 /**
5463  * ice_aq_get_output_pin_cfg - get output pin config
5464  * @hw: pointer to the HW struct
5465  * @output_idx: Output index
5466  * @flags: Output flags
5467  * @src_sel: Internal DPLL source
5468  * @freq: Output frequency
5469  * @src_freq: Source frequency
5470  *
5471  * Get CGU output config (0x0C65)
5472  * Return: 0 on success or negative value on failure.
5473  */
5474 int
5475 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
5476 			  u8 *src_sel, u32 *freq, u32 *src_freq)
5477 {
5478 	struct ice_aqc_get_cgu_output_config *cmd;
5479 	struct libie_aq_desc desc;
5480 	int ret;
5481 
5482 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
5483 	cmd = libie_aq_raw(&desc);
5484 	cmd->output_idx = output_idx;
5485 
5486 	ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5487 	if (!ret) {
5488 		if (flags)
5489 			*flags = cmd->flags;
5490 		if (src_sel)
5491 			*src_sel = cmd->src_sel;
5492 		if (freq)
5493 			*freq = le32_to_cpu(cmd->freq);
5494 		if (src_freq)
5495 			*src_freq = le32_to_cpu(cmd->src_freq);
5496 	}
5497 
5498 	return ret;
5499 }
5500 
5501 /**
5502  * ice_aq_get_cgu_dpll_status - get dpll status
5503  * @hw: pointer to the HW struct
5504  * @dpll_num: DPLL index
5505  * @ref_state: Reference clock state
5506  * @config: current DPLL config
5507  * @dpll_state: current DPLL state
5508  * @phase_offset: Phase offset in ns
5509  * @eec_mode: EEC_mode
5510  *
5511  * Get CGU DPLL status (0x0C66)
5512  * Return: 0 on success or negative value on failure.
5513  */
5514 int
5515 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
5516 			   u8 *dpll_state, u8 *config, s64 *phase_offset,
5517 			   u8 *eec_mode)
5518 {
5519 	struct ice_aqc_get_cgu_dpll_status *cmd;
5520 	struct libie_aq_desc desc;
5521 	int status;
5522 
5523 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
5524 	cmd = libie_aq_raw(&desc);
5525 	cmd->dpll_num = dpll_num;
5526 
5527 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5528 	if (!status) {
5529 		*ref_state = cmd->ref_state;
5530 		*dpll_state = cmd->dpll_state;
5531 		*config = cmd->config;
5532 		*phase_offset = le32_to_cpu(cmd->phase_offset_h);
5533 		*phase_offset <<= 32;
5534 		*phase_offset += le32_to_cpu(cmd->phase_offset_l);
5535 		*phase_offset = sign_extend64(*phase_offset, 47);
5536 		*eec_mode = cmd->eec_mode;
5537 	}
5538 
5539 	return status;
5540 }
5541 
5542 /**
5543  * ice_aq_set_cgu_dpll_config - set dpll config
5544  * @hw: pointer to the HW struct
5545  * @dpll_num: DPLL index
5546  * @ref_state: Reference clock state
5547  * @config: DPLL config
5548  * @eec_mode: EEC mode
5549  *
5550  * Set CGU DPLL config (0x0C67)
5551  * Return: 0 on success or negative value on failure.
5552  */
5553 int
5554 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
5555 			   u8 config, u8 eec_mode)
5556 {
5557 	struct ice_aqc_set_cgu_dpll_config *cmd;
5558 	struct libie_aq_desc desc;
5559 
5560 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
5561 	cmd = libie_aq_raw(&desc);
5562 	cmd->dpll_num = dpll_num;
5563 	cmd->ref_state = ref_state;
5564 	cmd->config = config;
5565 	cmd->eec_mode = eec_mode;
5566 
5567 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5568 }
5569 
5570 /**
5571  * ice_aq_set_cgu_ref_prio - set input reference priority
5572  * @hw: pointer to the HW struct
5573  * @dpll_num: DPLL index
5574  * @ref_idx: Reference pin index
5575  * @ref_priority: Reference input priority
5576  *
5577  * Set CGU reference priority (0x0C68)
5578  * Return: 0 on success or negative value on failure.
5579  */
5580 int
5581 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5582 			u8 ref_priority)
5583 {
5584 	struct ice_aqc_set_cgu_ref_prio *cmd;
5585 	struct libie_aq_desc desc;
5586 
5587 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
5588 	cmd = libie_aq_raw(&desc);
5589 	cmd->dpll_num = dpll_num;
5590 	cmd->ref_idx = ref_idx;
5591 	cmd->ref_priority = ref_priority;
5592 
5593 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5594 }
5595 
5596 /**
5597  * ice_aq_get_cgu_ref_prio - get input reference priority
5598  * @hw: pointer to the HW struct
5599  * @dpll_num: DPLL index
5600  * @ref_idx: Reference pin index
5601  * @ref_prio: Reference input priority
5602  *
5603  * Get CGU reference priority (0x0C69)
5604  * Return: 0 on success or negative value on failure.
5605  */
5606 int
5607 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5608 			u8 *ref_prio)
5609 {
5610 	struct ice_aqc_get_cgu_ref_prio *cmd;
5611 	struct libie_aq_desc desc;
5612 	int status;
5613 
5614 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
5615 	cmd = libie_aq_raw(&desc);
5616 	cmd->dpll_num = dpll_num;
5617 	cmd->ref_idx = ref_idx;
5618 
5619 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5620 	if (!status)
5621 		*ref_prio = cmd->ref_priority;
5622 
5623 	return status;
5624 }
5625 
5626 /**
5627  * ice_aq_get_cgu_info - get cgu info
5628  * @hw: pointer to the HW struct
5629  * @cgu_id: CGU ID
5630  * @cgu_cfg_ver: CGU config version
5631  * @cgu_fw_ver: CGU firmware version
5632  *
5633  * Get CGU info (0x0C6A)
5634  * Return: 0 on success or negative value on failure.
5635  */
5636 int
5637 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
5638 		    u32 *cgu_fw_ver)
5639 {
5640 	struct ice_aqc_get_cgu_info *cmd;
5641 	struct libie_aq_desc desc;
5642 	int status;
5643 
5644 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
5645 	cmd = libie_aq_raw(&desc);
5646 
5647 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5648 	if (!status) {
5649 		*cgu_id = le32_to_cpu(cmd->cgu_id);
5650 		*cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
5651 		*cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
5652 	}
5653 
5654 	return status;
5655 }
5656 
5657 /**
5658  * ice_aq_set_phy_rec_clk_out - set RCLK phy out
5659  * @hw: pointer to the HW struct
5660  * @phy_output: PHY reference clock output pin
5661  * @enable: GPIO state to be applied
5662  * @freq: PHY output frequency
5663  *
5664  * Set phy recovered clock as reference (0x0630)
5665  * Return: 0 on success or negative value on failure.
5666  */
5667 int
5668 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
5669 			   u32 *freq)
5670 {
5671 	struct ice_aqc_set_phy_rec_clk_out *cmd;
5672 	struct libie_aq_desc desc;
5673 	int status;
5674 
5675 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
5676 	cmd = libie_aq_raw(&desc);
5677 	cmd->phy_output = phy_output;
5678 	cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
5679 	cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
5680 	cmd->freq = cpu_to_le32(*freq);
5681 
5682 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5683 	if (!status)
5684 		*freq = le32_to_cpu(cmd->freq);
5685 
5686 	return status;
5687 }
5688 
5689 /**
5690  * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
5691  * @hw: pointer to the HW struct
5692  * @phy_output: PHY reference clock output pin
5693  * @port_num: Port number
5694  * @flags: PHY flags
5695  * @node_handle: PHY output frequency
5696  *
5697  * Get PHY recovered clock output info (0x0631)
5698  * Return: 0 on success or negative value on failure.
5699  */
5700 int
5701 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
5702 			   u8 *flags, u16 *node_handle)
5703 {
5704 	struct ice_aqc_get_phy_rec_clk_out *cmd;
5705 	struct libie_aq_desc desc;
5706 	int status;
5707 
5708 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
5709 	cmd = libie_aq_raw(&desc);
5710 	cmd->phy_output = *phy_output;
5711 
5712 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5713 	if (!status) {
5714 		*phy_output = cmd->phy_output;
5715 		if (port_num)
5716 			*port_num = cmd->port_num;
5717 		if (flags)
5718 			*flags = cmd->flags;
5719 		if (node_handle)
5720 			*node_handle = le16_to_cpu(cmd->node_handle);
5721 	}
5722 
5723 	return status;
5724 }
5725 
5726 /**
5727  * ice_aq_get_sensor_reading
5728  * @hw: pointer to the HW struct
5729  * @data: pointer to data to be read from the sensor
5730  *
5731  * Get sensor reading (0x0632)
5732  */
5733 int ice_aq_get_sensor_reading(struct ice_hw *hw,
5734 			      struct ice_aqc_get_sensor_reading_resp *data)
5735 {
5736 	struct ice_aqc_get_sensor_reading *cmd;
5737 	struct libie_aq_desc desc;
5738 	int status;
5739 
5740 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5741 	cmd = libie_aq_raw(&desc);
5742 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT	0
5743 #define ICE_INTERNAL_TEMP_SENSOR	0
5744 	cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
5745 	cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
5746 
5747 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5748 	if (!status)
5749 		memcpy(data, &desc.params.raw,
5750 		       sizeof(*data));
5751 
5752 	return status;
5753 }
5754 
5755 /**
5756  * ice_replay_pre_init - replay pre initialization
5757  * @hw: pointer to the HW struct
5758  *
5759  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5760  */
5761 static int ice_replay_pre_init(struct ice_hw *hw)
5762 {
5763 	struct ice_switch_info *sw = hw->switch_info;
5764 	u8 i;
5765 
5766 	/* Delete old entries from replay filter list head if there is any */
5767 	ice_rm_all_sw_replay_rule_info(hw);
5768 	/* In start of replay, move entries into replay_rules list, it
5769 	 * will allow adding rules entries back to filt_rules list,
5770 	 * which is operational list.
5771 	 */
5772 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5773 		list_replace_init(&sw->recp_list[i].filt_rules,
5774 				  &sw->recp_list[i].filt_replay_rules);
5775 	ice_sched_replay_agg_vsi_preinit(hw);
5776 
5777 	return 0;
5778 }
5779 
5780 /**
5781  * ice_replay_vsi - replay VSI configuration
5782  * @hw: pointer to the HW struct
5783  * @vsi_handle: driver VSI handle
5784  *
5785  * Restore all VSI configuration after reset. It is required to call this
5786  * function with main VSI first.
5787  */
5788 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5789 {
5790 	int status;
5791 
5792 	if (!ice_is_vsi_valid(hw, vsi_handle))
5793 		return -EINVAL;
5794 
5795 	/* Replay pre-initialization if there is any */
5796 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
5797 		status = ice_replay_pre_init(hw);
5798 		if (status)
5799 			return status;
5800 	}
5801 	/* Replay per VSI all RSS configurations */
5802 	status = ice_replay_rss_cfg(hw, vsi_handle);
5803 	if (status)
5804 		return status;
5805 	/* Replay per VSI all filters */
5806 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
5807 	if (!status)
5808 		status = ice_replay_vsi_agg(hw, vsi_handle);
5809 	return status;
5810 }
5811 
5812 /**
5813  * ice_replay_post - post replay configuration cleanup
5814  * @hw: pointer to the HW struct
5815  *
5816  * Post replay cleanup.
5817  */
5818 void ice_replay_post(struct ice_hw *hw)
5819 {
5820 	/* Delete old entries from replay filter list head */
5821 	ice_rm_all_sw_replay_rule_info(hw);
5822 	ice_sched_replay_agg(hw);
5823 }
5824 
5825 /**
5826  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5827  * @hw: ptr to the hardware info
5828  * @reg: offset of 64 bit HW register to read from
5829  * @prev_stat_loaded: bool to specify if previous stats are loaded
5830  * @prev_stat: ptr to previous loaded stat value
5831  * @cur_stat: ptr to current stat value
5832  */
5833 void
5834 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5835 		  u64 *prev_stat, u64 *cur_stat)
5836 {
5837 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5838 
5839 	/* device stats are not reset at PFR, they likely will not be zeroed
5840 	 * when the driver starts. Thus, save the value from the first read
5841 	 * without adding to the statistic value so that we report stats which
5842 	 * count up from zero.
5843 	 */
5844 	if (!prev_stat_loaded) {
5845 		*prev_stat = new_data;
5846 		return;
5847 	}
5848 
5849 	/* Calculate the difference between the new and old values, and then
5850 	 * add it to the software stat value.
5851 	 */
5852 	if (new_data >= *prev_stat)
5853 		*cur_stat += new_data - *prev_stat;
5854 	else
5855 		/* to manage the potential roll-over */
5856 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5857 
5858 	/* Update the previously stored value to prepare for next read */
5859 	*prev_stat = new_data;
5860 }
5861 
5862 /**
5863  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5864  * @hw: ptr to the hardware info
5865  * @reg: offset of HW register to read from
5866  * @prev_stat_loaded: bool to specify if previous stats are loaded
5867  * @prev_stat: ptr to previous loaded stat value
5868  * @cur_stat: ptr to current stat value
5869  */
5870 void
5871 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5872 		  u64 *prev_stat, u64 *cur_stat)
5873 {
5874 	u32 new_data;
5875 
5876 	new_data = rd32(hw, reg);
5877 
5878 	/* device stats are not reset at PFR, they likely will not be zeroed
5879 	 * when the driver starts. Thus, save the value from the first read
5880 	 * without adding to the statistic value so that we report stats which
5881 	 * count up from zero.
5882 	 */
5883 	if (!prev_stat_loaded) {
5884 		*prev_stat = new_data;
5885 		return;
5886 	}
5887 
5888 	/* Calculate the difference between the new and old values, and then
5889 	 * add it to the software stat value.
5890 	 */
5891 	if (new_data >= *prev_stat)
5892 		*cur_stat += new_data - *prev_stat;
5893 	else
5894 		/* to manage the potential roll-over */
5895 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5896 
5897 	/* Update the previously stored value to prepare for next read */
5898 	*prev_stat = new_data;
5899 }
5900 
5901 /**
5902  * ice_sched_query_elem - query element information from HW
5903  * @hw: pointer to the HW struct
5904  * @node_teid: node TEID to be queried
5905  * @buf: buffer to element information
5906  *
5907  * This function queries HW element information
5908  */
5909 int
5910 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5911 		     struct ice_aqc_txsched_elem_data *buf)
5912 {
5913 	u16 buf_size, num_elem_ret = 0;
5914 	int status;
5915 
5916 	buf_size = sizeof(*buf);
5917 	memset(buf, 0, buf_size);
5918 	buf->node_teid = cpu_to_le32(node_teid);
5919 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5920 					  NULL);
5921 	if (status || num_elem_ret != 1)
5922 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5923 	return status;
5924 }
5925 
5926 /**
5927  * ice_aq_read_i2c
5928  * @hw: pointer to the hw struct
5929  * @topo_addr: topology address for a device to communicate with
5930  * @bus_addr: 7-bit I2C bus address
5931  * @addr: I2C memory address (I2C offset) with up to 16 bits
5932  * @params: I2C parameters: bit [7] - Repeated start,
5933  *			    bits [6:5] data offset size,
5934  *			    bit [4] - I2C address type,
5935  *			    bits [3:0] - data size to read (0-16 bytes)
5936  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5937  * @cd: pointer to command details structure or NULL
5938  *
5939  * Read I2C (0x06E2)
5940  */
5941 int
5942 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5943 		u16 bus_addr, __le16 addr, u8 params, u8 *data,
5944 		struct ice_sq_cd *cd)
5945 {
5946 	struct libie_aq_desc desc = { 0 };
5947 	struct ice_aqc_i2c *cmd;
5948 	u8 data_size;
5949 	int status;
5950 
5951 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5952 	cmd = libie_aq_raw(&desc);
5953 
5954 	if (!data)
5955 		return -EINVAL;
5956 
5957 	data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5958 
5959 	cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5960 	cmd->topo_addr = topo_addr;
5961 	cmd->i2c_params = params;
5962 	cmd->i2c_addr = addr;
5963 
5964 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5965 	if (!status) {
5966 		struct ice_aqc_read_i2c_resp *resp;
5967 		u8 i;
5968 
5969 		resp = libie_aq_raw(&desc);
5970 		for (i = 0; i < data_size; i++) {
5971 			*data = resp->i2c_data[i];
5972 			data++;
5973 		}
5974 	}
5975 
5976 	return status;
5977 }
5978 
5979 /**
5980  * ice_aq_write_i2c
5981  * @hw: pointer to the hw struct
5982  * @topo_addr: topology address for a device to communicate with
5983  * @bus_addr: 7-bit I2C bus address
5984  * @addr: I2C memory address (I2C offset) with up to 16 bits
5985  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5986  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5987  * @cd: pointer to command details structure or NULL
5988  *
5989  * Write I2C (0x06E3)
5990  *
5991  * * Return:
5992  * * 0             - Successful write to the i2c device
5993  * * -EINVAL       - Data size greater than 4 bytes
5994  * * -EIO          - FW error
5995  */
5996 int
5997 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5998 		 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
5999 		 struct ice_sq_cd *cd)
6000 {
6001 	struct libie_aq_desc desc = { 0 };
6002 	struct ice_aqc_i2c *cmd;
6003 	u8 data_size;
6004 
6005 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
6006 	cmd = libie_aq_raw(&desc);
6007 
6008 	data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
6009 
6010 	/* data_size limited to 4 */
6011 	if (data_size > 4)
6012 		return -EINVAL;
6013 
6014 	cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
6015 	cmd->topo_addr = topo_addr;
6016 	cmd->i2c_params = params;
6017 	cmd->i2c_addr = addr;
6018 
6019 	memcpy(cmd->i2c_data, data, data_size);
6020 
6021 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6022 }
6023 
6024 /**
6025  * ice_get_pca9575_handle - find and return the PCA9575 controller
6026  * @hw: pointer to the hw struct
6027  * @pca9575_handle: GPIO controller's handle
6028  *
6029  * Find and return the GPIO controller's handle in the netlist.
6030  * When found - the value will be cached in the hw structure and following calls
6031  * will return cached value.
6032  *
6033  * Return: 0 on success, -ENXIO when there's no PCA9575 present.
6034  */
6035 int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
6036 {
6037 	struct ice_aqc_get_link_topo *cmd;
6038 	struct libie_aq_desc desc;
6039 	int err;
6040 	u8 idx;
6041 
6042 	/* If handle was read previously return cached value */
6043 	if (hw->io_expander_handle) {
6044 		*pca9575_handle = hw->io_expander_handle;
6045 		return 0;
6046 	}
6047 
6048 #define SW_PCA9575_SFP_TOPO_IDX		2
6049 #define SW_PCA9575_QSFP_TOPO_IDX	1
6050 
6051 	/* Check if the SW IO expander controlling SMA exists in the netlist. */
6052 	if (hw->device_id == ICE_DEV_ID_E810C_SFP)
6053 		idx = SW_PCA9575_SFP_TOPO_IDX;
6054 	else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
6055 		idx = SW_PCA9575_QSFP_TOPO_IDX;
6056 	else
6057 		return -ENXIO;
6058 
6059 	/* If handle was not detected read it from the netlist */
6060 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
6061 	cmd = libie_aq_raw(&desc);
6062 	cmd->addr.topo_params.node_type_ctx =
6063 		ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
6064 	cmd->addr.topo_params.index = idx;
6065 
6066 	err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6067 	if (err)
6068 		return -ENXIO;
6069 
6070 	/* Verify if we found the right IO expander type */
6071 	if (cmd->node_part_num != ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
6072 		return -ENXIO;
6073 
6074 	/* If present save the handle and return it */
6075 	hw->io_expander_handle =
6076 		le16_to_cpu(cmd->addr.handle);
6077 	*pca9575_handle = hw->io_expander_handle;
6078 
6079 	return 0;
6080 }
6081 
6082 /**
6083  * ice_read_pca9575_reg - read the register from the PCA9575 controller
6084  * @hw: pointer to the hw struct
6085  * @offset: GPIO controller register offset
6086  * @data: pointer to data to be read from the GPIO controller
6087  *
6088  * Return: 0 on success, negative error code otherwise.
6089  */
6090 int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
6091 {
6092 	struct ice_aqc_link_topo_addr link_topo;
6093 	__le16 addr;
6094 	u16 handle;
6095 	int err;
6096 
6097 	memset(&link_topo, 0, sizeof(link_topo));
6098 
6099 	err = ice_get_pca9575_handle(hw, &handle);
6100 	if (err)
6101 		return err;
6102 
6103 	link_topo.handle = cpu_to_le16(handle);
6104 	link_topo.topo_params.node_type_ctx =
6105 		FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
6106 			   ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
6107 
6108 	addr = cpu_to_le16((u16)offset);
6109 
6110 	return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
6111 }
6112 
6113 /**
6114  * ice_aq_set_gpio
6115  * @hw: pointer to the hw struct
6116  * @gpio_ctrl_handle: GPIO controller node handle
6117  * @pin_idx: IO Number of the GPIO that needs to be set
6118  * @value: SW provide IO value to set in the LSB
6119  * @cd: pointer to command details structure or NULL
6120  *
6121  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6122  */
6123 int
6124 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6125 		struct ice_sq_cd *cd)
6126 {
6127 	struct libie_aq_desc desc;
6128 	struct ice_aqc_gpio *cmd;
6129 
6130 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6131 	cmd = libie_aq_raw(&desc);
6132 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
6133 	cmd->gpio_num = pin_idx;
6134 	cmd->gpio_val = value ? 1 : 0;
6135 
6136 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6137 }
6138 
6139 /**
6140  * ice_aq_get_gpio
6141  * @hw: pointer to the hw struct
6142  * @gpio_ctrl_handle: GPIO controller node handle
6143  * @pin_idx: IO Number of the GPIO that needs to be set
6144  * @value: IO value read
6145  * @cd: pointer to command details structure or NULL
6146  *
6147  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6148  * the topology
6149  */
6150 int
6151 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6152 		bool *value, struct ice_sq_cd *cd)
6153 {
6154 	struct libie_aq_desc desc;
6155 	struct ice_aqc_gpio *cmd;
6156 	int status;
6157 
6158 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6159 	cmd = libie_aq_raw(&desc);
6160 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
6161 	cmd->gpio_num = pin_idx;
6162 
6163 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6164 	if (status)
6165 		return status;
6166 
6167 	*value = !!cmd->gpio_val;
6168 	return 0;
6169 }
6170 
6171 /**
6172  * ice_is_fw_api_min_ver
6173  * @hw: pointer to the hardware structure
6174  * @maj: major version
6175  * @min: minor version
6176  * @patch: patch version
6177  *
6178  * Checks if the firmware API is minimum version
6179  */
6180 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6181 {
6182 	if (hw->api_maj_ver == maj) {
6183 		if (hw->api_min_ver > min)
6184 			return true;
6185 		if (hw->api_min_ver == min && hw->api_patch >= patch)
6186 			return true;
6187 	} else if (hw->api_maj_ver > maj) {
6188 		return true;
6189 	}
6190 
6191 	return false;
6192 }
6193 
6194 /**
6195  * ice_fw_supports_link_override
6196  * @hw: pointer to the hardware structure
6197  *
6198  * Checks if the firmware supports link override
6199  */
6200 bool ice_fw_supports_link_override(struct ice_hw *hw)
6201 {
6202 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6203 				     ICE_FW_API_LINK_OVERRIDE_MIN,
6204 				     ICE_FW_API_LINK_OVERRIDE_PATCH);
6205 }
6206 
6207 /**
6208  * ice_get_link_default_override
6209  * @ldo: pointer to the link default override struct
6210  * @pi: pointer to the port info struct
6211  *
6212  * Gets the link default override for a port
6213  */
6214 int
6215 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6216 			      struct ice_port_info *pi)
6217 {
6218 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
6219 	struct ice_hw *hw = pi->hw;
6220 	int status;
6221 
6222 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6223 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6224 	if (status) {
6225 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6226 		return status;
6227 	}
6228 
6229 	/* Each port has its own config; calculate for our port */
6230 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6231 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6232 
6233 	/* link options first */
6234 	status = ice_read_sr_word(hw, tlv_start, &buf);
6235 	if (status) {
6236 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6237 		return status;
6238 	}
6239 	ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
6240 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6241 		ICE_LINK_OVERRIDE_PHY_CFG_S;
6242 
6243 	/* link PHY config */
6244 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6245 	status = ice_read_sr_word(hw, offset, &buf);
6246 	if (status) {
6247 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6248 		return status;
6249 	}
6250 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6251 
6252 	/* PHY types low */
6253 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6254 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6255 		status = ice_read_sr_word(hw, (offset + i), &buf);
6256 		if (status) {
6257 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6258 			return status;
6259 		}
6260 		/* shift 16 bits at a time to fill 64 bits */
6261 		ldo->phy_type_low |= ((u64)buf << (i * 16));
6262 	}
6263 
6264 	/* PHY types high */
6265 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6266 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6267 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6268 		status = ice_read_sr_word(hw, (offset + i), &buf);
6269 		if (status) {
6270 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6271 			return status;
6272 		}
6273 		/* shift 16 bits at a time to fill 64 bits */
6274 		ldo->phy_type_high |= ((u64)buf << (i * 16));
6275 	}
6276 
6277 	return status;
6278 }
6279 
6280 /**
6281  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6282  * @caps: get PHY capability data
6283  */
6284 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6285 {
6286 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6287 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6288 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
6289 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
6290 		return true;
6291 
6292 	return false;
6293 }
6294 
6295 /**
6296  * ice_is_fw_health_report_supported - checks if firmware supports health events
6297  * @hw: pointer to the hardware structure
6298  *
6299  * Return: true if firmware supports health status reports,
6300  * false otherwise
6301  */
6302 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6303 {
6304 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
6305 				     ICE_FW_API_HEALTH_REPORT_MIN,
6306 				     ICE_FW_API_HEALTH_REPORT_PATCH);
6307 }
6308 
6309 /**
6310  * ice_aq_set_health_status_cfg - Configure FW health events
6311  * @hw: pointer to the HW struct
6312  * @event_source: type of diagnostic events to enable
6313  *
6314  * Configure the health status event types that the firmware will send to this
6315  * PF. The supported event types are: PF-specific, all PFs, and global.
6316  *
6317  * Return: 0 on success, negative error code otherwise.
6318  */
6319 int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
6320 {
6321 	struct ice_aqc_set_health_status_cfg *cmd;
6322 	struct libie_aq_desc desc;
6323 
6324 	cmd = libie_aq_raw(&desc);
6325 
6326 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
6327 
6328 	cmd->event_source = event_source;
6329 
6330 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6331 }
6332 
6333 /**
6334  * ice_aq_set_lldp_mib - Set the LLDP MIB
6335  * @hw: pointer to the HW struct
6336  * @mib_type: Local, Remote or both Local and Remote MIBs
6337  * @buf: pointer to the caller-supplied buffer to store the MIB block
6338  * @buf_size: size of the buffer (in bytes)
6339  * @cd: pointer to command details structure or NULL
6340  *
6341  * Set the LLDP MIB. (0x0A08)
6342  */
6343 int
6344 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6345 		    struct ice_sq_cd *cd)
6346 {
6347 	struct ice_aqc_lldp_set_local_mib *cmd;
6348 	struct libie_aq_desc desc;
6349 
6350 	cmd = libie_aq_raw(&desc);
6351 
6352 	if (buf_size == 0 || !buf)
6353 		return -EINVAL;
6354 
6355 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6356 
6357 	desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
6358 	desc.datalen = cpu_to_le16(buf_size);
6359 
6360 	cmd->type = mib_type;
6361 	cmd->length = cpu_to_le16(buf_size);
6362 
6363 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6364 }
6365 
6366 /**
6367  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6368  * @hw: pointer to HW struct
6369  */
6370 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6371 {
6372 	if (hw->mac_type != ICE_MAC_E810)
6373 		return false;
6374 
6375 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6376 				     ICE_FW_API_LLDP_FLTR_MIN,
6377 				     ICE_FW_API_LLDP_FLTR_PATCH);
6378 }
6379 
6380 /**
6381  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6382  * @hw: pointer to HW struct
6383  * @vsi: VSI to add the filter to
6384  * @add: boolean for if adding or removing a filter
6385  *
6386  * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
6387  *	   with this HW or VSI, otherwise an error corresponding to
6388  *	   the AQ transaction result.
6389  */
6390 int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
6391 {
6392 	struct ice_aqc_lldp_filter_ctrl *cmd;
6393 	struct libie_aq_desc desc;
6394 
6395 	if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
6396 		return -EOPNOTSUPP;
6397 
6398 	cmd = libie_aq_raw(&desc);
6399 
6400 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6401 
6402 	if (add)
6403 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6404 	else
6405 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6406 
6407 	cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
6408 
6409 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6410 }
6411 
6412 /**
6413  * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6414  * @hw: pointer to HW struct
6415  */
6416 int ice_lldp_execute_pending_mib(struct ice_hw *hw)
6417 {
6418 	struct libie_aq_desc desc;
6419 
6420 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
6421 
6422 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6423 }
6424 
6425 /**
6426  * ice_fw_supports_report_dflt_cfg
6427  * @hw: pointer to the hardware structure
6428  *
6429  * Checks if the firmware supports report default configuration
6430  */
6431 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6432 {
6433 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6434 				     ICE_FW_API_REPORT_DFLT_CFG_MIN,
6435 				     ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6436 }
6437 
6438 /* each of the indexes into the following array match the speed of a return
6439  * value from the list of AQ returned speeds like the range:
6440  * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6441  * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
6442  * array. The array is defined as 15 elements long because the link_speed
6443  * returned by the firmware is a 16 bit * value, but is indexed
6444  * by [fls(speed) - 1]
6445  */
6446 static const u32 ice_aq_to_link_speed[] = {
6447 	SPEED_10,	/* BIT(0) */
6448 	SPEED_100,
6449 	SPEED_1000,
6450 	SPEED_2500,
6451 	SPEED_5000,
6452 	SPEED_10000,
6453 	SPEED_20000,
6454 	SPEED_25000,
6455 	SPEED_40000,
6456 	SPEED_50000,
6457 	SPEED_100000,	/* BIT(10) */
6458 	SPEED_200000,
6459 };
6460 
6461 /**
6462  * ice_get_link_speed - get integer speed from table
6463  * @index: array index from fls(aq speed) - 1
6464  *
6465  * Returns: u32 value containing integer speed
6466  */
6467 u32 ice_get_link_speed(u16 index)
6468 {
6469 	if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
6470 		return 0;
6471 
6472 	return ice_aq_to_link_speed[index];
6473 }
6474 
6475 /**
6476  * ice_get_dest_cgu - get destination CGU dev for given HW
6477  * @hw: pointer to the HW struct
6478  *
6479  * Get CGU client id for CGU register read/write operations.
6480  *
6481  * Return: CGU device id to use in SBQ transactions.
6482  */
6483 static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
6484 {
6485 	/* On dual complex E825 only complex 0 has functional CGU powering all
6486 	 * the PHYs.
6487 	 * SBQ destination device cgu points to CGU on a current complex and to
6488 	 * access primary CGU from the secondary complex, the driver should use
6489 	 * cgu_peer as a destination device.
6490 	 */
6491 	if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
6492 	    !ice_is_primary(hw))
6493 		return ice_sbq_dev_cgu_peer;
6494 	return ice_sbq_dev_cgu;
6495 }
6496 
6497 /**
6498  * ice_read_cgu_reg - Read a CGU register
6499  * @hw: Pointer to the HW struct
6500  * @addr: Register address to read
6501  * @val: Storage for register value read
6502  *
6503  * Read the contents of a register of the Clock Generation Unit. Only
6504  * applicable to E82X devices.
6505  *
6506  * Return: 0 on success, other error codes when failed to read from CGU.
6507  */
6508 int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
6509 {
6510 	struct ice_sbq_msg_input cgu_msg = {
6511 		.dest_dev = ice_get_dest_cgu(hw),
6512 		.opcode = ice_sbq_msg_rd,
6513 		.msg_addr_low = addr
6514 	};
6515 	int err;
6516 
6517 	err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
6518 	if (err) {
6519 		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
6520 			  addr, err);
6521 		return err;
6522 	}
6523 
6524 	*val = cgu_msg.data;
6525 
6526 	return 0;
6527 }
6528 
6529 /**
6530  * ice_write_cgu_reg - Write a CGU register
6531  * @hw: Pointer to the HW struct
6532  * @addr: Register address to write
6533  * @val: Value to write into the register
6534  *
6535  * Write the specified value to a register of the Clock Generation Unit. Only
6536  * applicable to E82X devices.
6537  *
6538  * Return: 0 on success, other error codes when failed to write to CGU.
6539  */
6540 int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
6541 {
6542 	struct ice_sbq_msg_input cgu_msg = {
6543 		.dest_dev = ice_get_dest_cgu(hw),
6544 		.opcode = ice_sbq_msg_wr,
6545 		.msg_addr_low = addr,
6546 		.data = val
6547 	};
6548 	int err;
6549 
6550 	err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
6551 	if (err)
6552 		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
6553 			  addr, err);
6554 
6555 	return err;
6556 }
6557