xref: /freebsd/sys/dev/ice/ice_common.c (revision 744bfb213144c63cbaf38d91a1c4f7aebb9b9fbc)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36 
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39 
40 #define ICE_PF_RESET_WAIT_COUNT	300
41 
42 /**
43  * dump_phy_type - helper function that prints PHY type strings
44  * @hw: pointer to the HW structure
45  * @phy: 64 bit PHY type to decipher
46  * @i: bit index within phy
47  * @phy_string: string corresponding to bit i in phy
48  * @prefix: prefix string to differentiate multiple dumps
49  */
50 static void
51 dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
52 	      const char *prefix)
53 {
54 	if (phy & BIT_ULL(i))
55 		ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
56 			  phy_string);
57 }
58 
59 /**
60  * ice_dump_phy_type_low - helper function to dump phy_type_low
61  * @hw: pointer to the HW structure
62  * @low: 64 bit value for phy_type_low
63  * @prefix: prefix string to differentiate multiple dumps
64  */
65 static void
66 ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
67 {
68 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
69 		  (unsigned long long)low);
70 
71 	dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
72 	dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
73 	dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
74 	dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
75 	dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
76 	dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
77 	dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
78 	dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
79 	dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
80 	dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
81 	dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
82 	dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
83 	dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
84 	dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
85 	dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
86 	dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
87 	dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
88 	dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
89 	dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
90 	dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
91 	dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
92 	dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
93 	dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
94 	dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
95 	dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
96 	dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
97 	dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
98 	dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
99 	dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
100 	dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
101 	dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
102 	dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
103 	dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
104 	dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
105 	dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
106 	dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
107 	dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
108 	dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
109 	dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
110 	dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
111 	dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
112 	dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
113 	dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
114 	dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
115 	dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
116 	dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
117 	dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
118 	dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
119 	dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
120 	dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
121 	dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
122 	dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
123 	dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
124 	dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
125 	dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
126 	dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
127 	dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
128 	dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
129 	dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
130 	dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
131 	dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
132 	dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
133 	dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
134 	dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
135 }
136 
137 /**
138  * ice_dump_phy_type_high - helper function to dump phy_type_high
139  * @hw: pointer to the HW structure
140  * @high: 64 bit value for phy_type_high
141  * @prefix: prefix string to differentiate multiple dumps
142  */
143 static void
144 ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
145 {
146 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
147 		  (unsigned long long)high);
148 
149 	dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
150 	dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
151 	dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
152 	dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
153 	dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
154 }
155 
156 /**
157  * ice_set_mac_type - Sets MAC type
158  * @hw: pointer to the HW structure
159  *
160  * This function sets the MAC type of the adapter based on the
161  * vendor ID and device ID stored in the HW structure.
162  */
163 enum ice_status ice_set_mac_type(struct ice_hw *hw)
164 {
165 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
166 
167 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
168 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
169 
170 	switch (hw->device_id) {
171 	case ICE_DEV_ID_E810C_BACKPLANE:
172 	case ICE_DEV_ID_E810C_QSFP:
173 	case ICE_DEV_ID_E810C_SFP:
174 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
175 	case ICE_DEV_ID_E810_XXV_QSFP:
176 	case ICE_DEV_ID_E810_XXV_SFP:
177 		hw->mac_type = ICE_MAC_E810;
178 		break;
179 	case ICE_DEV_ID_E822C_10G_BASE_T:
180 	case ICE_DEV_ID_E822C_BACKPLANE:
181 	case ICE_DEV_ID_E822C_QSFP:
182 	case ICE_DEV_ID_E822C_SFP:
183 	case ICE_DEV_ID_E822C_SGMII:
184 	case ICE_DEV_ID_E822L_10G_BASE_T:
185 	case ICE_DEV_ID_E822L_BACKPLANE:
186 	case ICE_DEV_ID_E822L_SFP:
187 	case ICE_DEV_ID_E822L_SGMII:
188 	case ICE_DEV_ID_E823L_10G_BASE_T:
189 	case ICE_DEV_ID_E823L_1GBE:
190 	case ICE_DEV_ID_E823L_BACKPLANE:
191 	case ICE_DEV_ID_E823L_QSFP:
192 	case ICE_DEV_ID_E823L_SFP:
193 	case ICE_DEV_ID_E823C_10G_BASE_T:
194 	case ICE_DEV_ID_E823C_BACKPLANE:
195 	case ICE_DEV_ID_E823C_QSFP:
196 	case ICE_DEV_ID_E823C_SFP:
197 	case ICE_DEV_ID_E823C_SGMII:
198 		hw->mac_type = ICE_MAC_GENERIC;
199 		break;
200 	default:
201 		hw->mac_type = ICE_MAC_UNKNOWN;
202 		break;
203 	}
204 
205 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
206 	return ICE_SUCCESS;
207 }
208 
209 /**
210  * ice_is_e810
211  * @hw: pointer to the hardware structure
212  *
213  * returns true if the device is E810 based, false if not.
214  */
215 bool ice_is_e810(struct ice_hw *hw)
216 {
217 	return hw->mac_type == ICE_MAC_E810;
218 }
219 
220 /**
221  * ice_is_e810t
222  * @hw: pointer to the hardware structure
223  *
224  * returns true if the device is E810T based, false if not.
225  */
226 bool ice_is_e810t(struct ice_hw *hw)
227 {
228 	switch (hw->device_id) {
229 	case ICE_DEV_ID_E810C_SFP:
230 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
231 		    hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
232 			return true;
233 		break;
234 	case ICE_DEV_ID_E810C_QSFP:
235 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
236 			return true;
237 		break;
238 	default:
239 		break;
240 	}
241 
242 	return false;
243 }
244 
245 /**
246  * ice_clear_pf_cfg - Clear PF configuration
247  * @hw: pointer to the hardware structure
248  *
249  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
250  * configuration, flow director filters, etc.).
251  */
252 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
253 {
254 	struct ice_aq_desc desc;
255 
256 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
257 
258 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
259 }
260 
261 /**
262  * ice_aq_manage_mac_read - manage MAC address read command
263  * @hw: pointer to the HW struct
264  * @buf: a virtual buffer to hold the manage MAC read response
265  * @buf_size: Size of the virtual buffer
266  * @cd: pointer to command details structure or NULL
267  *
268  * This function is used to return per PF station MAC address (0x0107).
269  * NOTE: Upon successful completion of this command, MAC address information
270  * is returned in user specified buffer. Please interpret user specified
271  * buffer as "manage_mac_read" response.
272  * Response such as various MAC addresses are stored in HW struct (port.mac)
273  * ice_discover_dev_caps is expected to be called before this function is
274  * called.
275  */
276 enum ice_status
277 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
278 		       struct ice_sq_cd *cd)
279 {
280 	struct ice_aqc_manage_mac_read_resp *resp;
281 	struct ice_aqc_manage_mac_read *cmd;
282 	struct ice_aq_desc desc;
283 	enum ice_status status;
284 	u16 flags;
285 	u8 i;
286 
287 	cmd = &desc.params.mac_read;
288 
289 	if (buf_size < sizeof(*resp))
290 		return ICE_ERR_BUF_TOO_SHORT;
291 
292 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
293 
294 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
295 	if (status)
296 		return status;
297 
298 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
299 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
300 
301 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
302 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
303 		return ICE_ERR_CFG;
304 	}
305 
306 	/* A single port can report up to two (LAN and WoL) addresses */
307 	for (i = 0; i < cmd->num_addr; i++)
308 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
309 			ice_memcpy(hw->port_info->mac.lan_addr,
310 				   resp[i].mac_addr, ETH_ALEN,
311 				   ICE_DMA_TO_NONDMA);
312 			ice_memcpy(hw->port_info->mac.perm_addr,
313 				   resp[i].mac_addr,
314 				   ETH_ALEN, ICE_DMA_TO_NONDMA);
315 			break;
316 		}
317 	return ICE_SUCCESS;
318 }
319 
320 /**
321  * ice_aq_get_phy_caps - returns PHY capabilities
322  * @pi: port information structure
323  * @qual_mods: report qualified modules
324  * @report_mode: report mode capabilities
325  * @pcaps: structure for PHY capabilities to be filled
326  * @cd: pointer to command details structure or NULL
327  *
328  * Returns the various PHY capabilities supported on the Port (0x0600)
329  */
330 enum ice_status
331 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
332 		    struct ice_aqc_get_phy_caps_data *pcaps,
333 		    struct ice_sq_cd *cd)
334 {
335 	struct ice_aqc_get_phy_caps *cmd;
336 	u16 pcaps_size = sizeof(*pcaps);
337 	struct ice_aq_desc desc;
338 	enum ice_status status;
339 	const char *prefix;
340 	struct ice_hw *hw;
341 
342 	cmd = &desc.params.get_phy;
343 
344 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
345 		return ICE_ERR_PARAM;
346 	hw = pi->hw;
347 
348 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
349 	    !ice_fw_supports_report_dflt_cfg(hw))
350 		return ICE_ERR_PARAM;
351 
352 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
353 
354 	if (qual_mods)
355 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
356 
357 	cmd->param0 |= CPU_TO_LE16(report_mode);
358 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
359 
360 	ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
361 
362 	if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
363 		prefix = "phy_caps_media";
364 	else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
365 		prefix = "phy_caps_no_media";
366 	else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
367 		prefix = "phy_caps_active";
368 	else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
369 		prefix = "phy_caps_default";
370 	else
371 		prefix = "phy_caps_invalid";
372 
373 	ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
374 	ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
375 
376 	ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
377 		  prefix, report_mode);
378 	ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
379 	ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
380 		  pcaps->low_power_ctrl_an);
381 	ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
382 		  pcaps->eee_cap);
383 	ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
384 		  pcaps->eeer_value);
385 	ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
386 		  pcaps->link_fec_options);
387 	ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
388 		  prefix, pcaps->module_compliance_enforcement);
389 	ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
390 		  prefix, pcaps->extended_compliance_code);
391 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
392 		  pcaps->module_type[0]);
393 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
394 		  pcaps->module_type[1]);
395 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
396 		  pcaps->module_type[2]);
397 
398 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
399 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
400 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
401 		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
402 			   sizeof(pi->phy.link_info.module_type),
403 			   ICE_NONDMA_TO_NONDMA);
404 	}
405 
406 	return status;
407 }
408 
409 /**
410  * ice_aq_get_netlist_node
411  * @hw: pointer to the hw struct
412  * @cmd: get_link_topo AQ structure
413  * @node_part_number: output node part number if node found
414  * @node_handle: output node handle parameter if node found
415  */
416 enum ice_status
417 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
418 			u8 *node_part_number, u16 *node_handle)
419 {
420 	struct ice_aq_desc desc;
421 
422 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
423 	desc.params.get_link_topo = *cmd;
424 
425 	if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
426 		return ICE_ERR_NOT_SUPPORTED;
427 
428 	if (node_handle)
429 		*node_handle =
430 			LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
431 	if (node_part_number)
432 		*node_part_number = desc.params.get_link_topo.node_part_num;
433 
434 	return ICE_SUCCESS;
435 }
436 
437 #define MAX_NETLIST_SIZE 10
438 /**
439  * ice_find_netlist_node
440  * @hw: pointer to the hw struct
441  * @node_type_ctx: type of netlist node to look for
442  * @node_part_number: node part number to look for
443  * @node_handle: output parameter if node found - optional
444  *
445  * Find and return the node handle for a given node type and part number in the
446  * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
447  * otherwise. If @node_handle provided, it would be set to found node handle.
448  */
449 enum ice_status
450 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
451 		      u16 *node_handle)
452 {
453 	struct ice_aqc_get_link_topo cmd;
454 	u8 rec_node_part_number;
455 	enum ice_status status;
456 	u16 rec_node_handle;
457 	u8 idx;
458 
459 	for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
460 		memset(&cmd, 0, sizeof(cmd));
461 
462 		cmd.addr.topo_params.node_type_ctx =
463 			(node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
464 		cmd.addr.topo_params.index = idx;
465 
466 		status = ice_aq_get_netlist_node(hw, &cmd,
467 						 &rec_node_part_number,
468 						 &rec_node_handle);
469 		if (status)
470 			return status;
471 
472 		if (rec_node_part_number == node_part_number) {
473 			if (node_handle)
474 				*node_handle = rec_node_handle;
475 			return ICE_SUCCESS;
476 		}
477 	}
478 
479 	return ICE_ERR_DOES_NOT_EXIST;
480 }
481 
482 /**
483  * ice_is_media_cage_present
484  * @pi: port information structure
485  *
486  * Returns true if media cage is present, else false. If no cage, then
487  * media type is backplane or BASE-T.
488  */
489 static bool ice_is_media_cage_present(struct ice_port_info *pi)
490 {
491 	struct ice_aqc_get_link_topo *cmd;
492 	struct ice_aq_desc desc;
493 
494 	cmd = &desc.params.get_link_topo;
495 
496 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
497 
498 	cmd->addr.topo_params.node_type_ctx =
499 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
500 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
501 
502 	/* set node type */
503 	cmd->addr.topo_params.node_type_ctx |=
504 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
505 		 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
506 
507 	/* Node type cage can be used to determine if cage is present. If AQC
508 	 * returns error (ENOENT), then no cage present. If no cage present then
509 	 * connection type is backplane or BASE-T.
510 	 */
511 	return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
512 }
513 
514 /**
515  * ice_get_media_type - Gets media type
516  * @pi: port information structure
517  */
518 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
519 {
520 	struct ice_link_status *hw_link_info;
521 
522 	if (!pi)
523 		return ICE_MEDIA_UNKNOWN;
524 
525 	hw_link_info = &pi->phy.link_info;
526 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
527 		/* If more than one media type is selected, report unknown */
528 		return ICE_MEDIA_UNKNOWN;
529 
530 	if (hw_link_info->phy_type_low) {
531 		/* 1G SGMII is a special case where some DA cable PHYs
532 		 * may show this as an option when it really shouldn't
533 		 * be since SGMII is meant to be between a MAC and a PHY
534 		 * in a backplane. Try to detect this case and handle it
535 		 */
536 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
537 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
538 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
539 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
540 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
541 			return ICE_MEDIA_DA;
542 
543 		switch (hw_link_info->phy_type_low) {
544 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
545 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
546 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
547 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
548 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
549 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
550 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
551 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
552 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
553 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
554 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
555 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
556 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
557 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
558 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
559 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
560 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
561 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
562 			return ICE_MEDIA_FIBER;
563 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
564 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
565 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
566 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
567 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
568 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
569 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
570 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
571 			return ICE_MEDIA_FIBER;
572 		case ICE_PHY_TYPE_LOW_100BASE_TX:
573 		case ICE_PHY_TYPE_LOW_1000BASE_T:
574 		case ICE_PHY_TYPE_LOW_2500BASE_T:
575 		case ICE_PHY_TYPE_LOW_5GBASE_T:
576 		case ICE_PHY_TYPE_LOW_10GBASE_T:
577 		case ICE_PHY_TYPE_LOW_25GBASE_T:
578 			return ICE_MEDIA_BASET;
579 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
580 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
581 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
582 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
583 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
584 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
585 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
586 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
587 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
588 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
589 			return ICE_MEDIA_DA;
590 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
591 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
592 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
593 		case ICE_PHY_TYPE_LOW_50G_AUI2:
594 		case ICE_PHY_TYPE_LOW_50G_AUI1:
595 		case ICE_PHY_TYPE_LOW_100G_AUI4:
596 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
597 			if (ice_is_media_cage_present(pi))
598 				return ICE_MEDIA_AUI;
599 			/* fall-through */
600 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
601 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
602 		case ICE_PHY_TYPE_LOW_2500BASE_X:
603 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
604 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
605 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
606 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
607 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
608 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
609 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
610 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
611 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
612 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
613 			return ICE_MEDIA_BACKPLANE;
614 		}
615 	} else {
616 		switch (hw_link_info->phy_type_high) {
617 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
618 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
619 			if (ice_is_media_cage_present(pi))
620 				return ICE_MEDIA_AUI;
621 			/* fall-through */
622 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
623 			return ICE_MEDIA_BACKPLANE;
624 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
625 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
626 			return ICE_MEDIA_FIBER;
627 		}
628 	}
629 	return ICE_MEDIA_UNKNOWN;
630 }
631 
632 /**
633  * ice_aq_get_link_info
634  * @pi: port information structure
635  * @ena_lse: enable/disable LinkStatusEvent reporting
636  * @link: pointer to link status structure - optional
637  * @cd: pointer to command details structure or NULL
638  *
639  * Get Link Status (0x607). Returns the link status of the adapter.
640  */
641 enum ice_status
642 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
643 		     struct ice_link_status *link, struct ice_sq_cd *cd)
644 {
645 	struct ice_aqc_get_link_status_data link_data = { 0 };
646 	struct ice_aqc_get_link_status *resp;
647 	struct ice_link_status *li_old, *li;
648 	enum ice_media_type *hw_media_type;
649 	struct ice_fc_info *hw_fc_info;
650 	bool tx_pause, rx_pause;
651 	struct ice_aq_desc desc;
652 	enum ice_status status;
653 	struct ice_hw *hw;
654 	u16 cmd_flags;
655 
656 	if (!pi)
657 		return ICE_ERR_PARAM;
658 	hw = pi->hw;
659 
660 	li_old = &pi->phy.link_info_old;
661 	hw_media_type = &pi->phy.media_type;
662 	li = &pi->phy.link_info;
663 	hw_fc_info = &pi->fc;
664 
665 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
666 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
667 	resp = &desc.params.get_link_status;
668 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
669 	resp->lport_num = pi->lport;
670 
671 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
672 
673 	if (status != ICE_SUCCESS)
674 		return status;
675 
676 	/* save off old link status information */
677 	*li_old = *li;
678 
679 	/* update current link status information */
680 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
681 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
682 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
683 	*hw_media_type = ice_get_media_type(pi);
684 	li->link_info = link_data.link_info;
685 	li->link_cfg_err = link_data.link_cfg_err;
686 	li->an_info = link_data.an_info;
687 	li->ext_info = link_data.ext_info;
688 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
689 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
690 	li->topo_media_conflict = link_data.topo_media_conflict;
691 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
692 				      ICE_AQ_CFG_PACING_TYPE_M);
693 
694 	/* update fc info */
695 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
696 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
697 	if (tx_pause && rx_pause)
698 		hw_fc_info->current_mode = ICE_FC_FULL;
699 	else if (tx_pause)
700 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
701 	else if (rx_pause)
702 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
703 	else
704 		hw_fc_info->current_mode = ICE_FC_NONE;
705 
706 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
707 
708 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
709 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
710 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
711 		  (unsigned long long)li->phy_type_low);
712 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
713 		  (unsigned long long)li->phy_type_high);
714 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
715 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
716 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
717 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
718 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
719 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
720 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
721 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
722 		  li->max_frame_size);
723 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
724 
725 	/* save link status information */
726 	if (link)
727 		*link = *li;
728 
729 	/* flag cleared so calling functions don't call AQ again */
730 	pi->phy.get_link_info = false;
731 
732 	return ICE_SUCCESS;
733 }
734 
735 /**
736  * ice_fill_tx_timer_and_fc_thresh
737  * @hw: pointer to the HW struct
738  * @cmd: pointer to MAC cfg structure
739  *
740  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
741  * descriptor
742  */
743 static void
744 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
745 				struct ice_aqc_set_mac_cfg *cmd)
746 {
747 	u16 fc_thres_val, tx_timer_val;
748 	u32 val;
749 
750 	/* We read back the transmit timer and fc threshold value of
751 	 * LFC. Thus, we will use index =
752 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
753 	 *
754 	 * Also, because we are operating on transmit timer and fc
755 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
756 	 */
757 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
758 
759 	/* Retrieve the transmit timer */
760 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
761 	tx_timer_val = val &
762 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
763 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
764 
765 	/* Retrieve the fc threshold */
766 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
767 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
768 
769 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
770 }
771 
772 /**
773  * ice_aq_set_mac_cfg
774  * @hw: pointer to the HW struct
775  * @max_frame_size: Maximum Frame Size to be supported
776  * @auto_drop: Tell HW to drop packets if TC queue is blocked
777  * @cd: pointer to command details structure or NULL
778  *
779  * Set MAC configuration (0x0603)
780  */
781 enum ice_status
782 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
783 		   struct ice_sq_cd *cd)
784 {
785 	struct ice_aqc_set_mac_cfg *cmd;
786 	struct ice_aq_desc desc;
787 
788 	cmd = &desc.params.set_mac_cfg;
789 
790 	if (max_frame_size == 0)
791 		return ICE_ERR_PARAM;
792 
793 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
794 
795 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
796 
797 	if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
798 		cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
799 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
800 
801 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
802 }
803 
804 /**
805  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
806  * @hw: pointer to the HW struct
807  */
808 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
809 {
810 	struct ice_switch_info *sw;
811 	enum ice_status status;
812 
813 	hw->switch_info = (struct ice_switch_info *)
814 			  ice_malloc(hw, sizeof(*hw->switch_info));
815 
816 	sw = hw->switch_info;
817 
818 	if (!sw)
819 		return ICE_ERR_NO_MEMORY;
820 
821 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
822 	sw->prof_res_bm_init = 0;
823 
824 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
825 	if (status) {
826 		ice_free(hw, hw->switch_info);
827 		return status;
828 	}
829 	return ICE_SUCCESS;
830 }
831 
832 /**
833  * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
834  * @hw: pointer to the HW struct
835  * @sw: pointer to switch info struct for which function clears filters
836  */
837 static void
838 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
839 {
840 	struct ice_vsi_list_map_info *v_pos_map;
841 	struct ice_vsi_list_map_info *v_tmp_map;
842 	struct ice_sw_recipe *recps;
843 	u8 i;
844 
845 	if (!sw)
846 		return;
847 
848 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
849 				 ice_vsi_list_map_info, list_entry) {
850 		LIST_DEL(&v_pos_map->list_entry);
851 		ice_free(hw, v_pos_map);
852 	}
853 	recps = sw->recp_list;
854 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
855 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
856 
857 		recps[i].root_rid = i;
858 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
859 					 &recps[i].rg_list, ice_recp_grp_entry,
860 					 l_entry) {
861 			LIST_DEL(&rg_entry->l_entry);
862 			ice_free(hw, rg_entry);
863 		}
864 
865 		if (recps[i].adv_rule) {
866 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
867 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
868 
869 			ice_destroy_lock(&recps[i].filt_rule_lock);
870 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
871 						 &recps[i].filt_rules,
872 						 ice_adv_fltr_mgmt_list_entry,
873 						 list_entry) {
874 				LIST_DEL(&lst_itr->list_entry);
875 				ice_free(hw, lst_itr->lkups);
876 				ice_free(hw, lst_itr);
877 			}
878 		} else {
879 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
880 
881 			ice_destroy_lock(&recps[i].filt_rule_lock);
882 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
883 						 &recps[i].filt_rules,
884 						 ice_fltr_mgmt_list_entry,
885 						 list_entry) {
886 				LIST_DEL(&lst_itr->list_entry);
887 				ice_free(hw, lst_itr);
888 			}
889 		}
890 		if (recps[i].root_buf)
891 			ice_free(hw, recps[i].root_buf);
892 	}
893 	ice_rm_sw_replay_rule_info(hw, sw);
894 	ice_free(hw, sw->recp_list);
895 	ice_free(hw, sw);
896 }
897 
898 /**
899  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
900  * @hw: pointer to the HW struct
901  */
902 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
903 {
904 	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
905 }
906 
907 /**
908  * ice_get_itr_intrl_gran
909  * @hw: pointer to the HW struct
910  *
911  * Determines the ITR/INTRL granularities based on the maximum aggregate
912  * bandwidth according to the device's configuration during power-on.
913  */
914 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
915 {
916 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
917 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
918 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
919 
920 	switch (max_agg_bw) {
921 	case ICE_MAX_AGG_BW_200G:
922 	case ICE_MAX_AGG_BW_100G:
923 	case ICE_MAX_AGG_BW_50G:
924 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
925 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
926 		break;
927 	case ICE_MAX_AGG_BW_25G:
928 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
929 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
930 		break;
931 	}
932 }
933 
934 /**
935  * ice_print_rollback_msg - print FW rollback message
936  * @hw: pointer to the hardware structure
937  */
938 void ice_print_rollback_msg(struct ice_hw *hw)
939 {
940 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
941 	struct ice_orom_info *orom;
942 	struct ice_nvm_info *nvm;
943 
944 	orom = &hw->flash.orom;
945 	nvm = &hw->flash.nvm;
946 
947 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
948 		 nvm->major, nvm->minor, nvm->eetrack, orom->major,
949 		 orom->build, orom->patch);
950 	ice_warn(hw,
951 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
952 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
953 }
954 
955 /**
956  * ice_set_umac_shared
957  * @hw: pointer to the hw struct
958  *
959  * Set boolean flag to allow unicast MAC sharing
960  */
961 void ice_set_umac_shared(struct ice_hw *hw)
962 {
963 	hw->umac_shared = true;
964 }
965 
966 /**
967  * ice_init_hw - main hardware initialization routine
968  * @hw: pointer to the hardware structure
969  */
970 enum ice_status ice_init_hw(struct ice_hw *hw)
971 {
972 	struct ice_aqc_get_phy_caps_data *pcaps;
973 	enum ice_status status;
974 	u16 mac_buf_len;
975 	void *mac_buf;
976 
977 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
978 
979 	/* Set MAC type based on DeviceID */
980 	status = ice_set_mac_type(hw);
981 	if (status)
982 		return status;
983 
984 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
985 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
986 		PF_FUNC_RID_FUNCTION_NUMBER_S;
987 
988 	status = ice_reset(hw, ICE_RESET_PFR);
989 	if (status)
990 		return status;
991 	ice_get_itr_intrl_gran(hw);
992 
993 	status = ice_create_all_ctrlq(hw);
994 	if (status)
995 		goto err_unroll_cqinit;
996 
997 	ice_fwlog_set_support_ena(hw);
998 	status = ice_fwlog_set(hw, &hw->fwlog_cfg);
999 	if (status) {
1000 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1001 			  status);
1002 	} else {
1003 		if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1004 			status = ice_fwlog_register(hw);
1005 			if (status)
1006 				ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1007 					  status);
1008 		} else {
1009 			status = ice_fwlog_unregister(hw);
1010 			if (status)
1011 				ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1012 					  status);
1013 		}
1014 	}
1015 
1016 	status = ice_init_nvm(hw);
1017 	if (status)
1018 		goto err_unroll_cqinit;
1019 
1020 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1021 		ice_print_rollback_msg(hw);
1022 
1023 	status = ice_clear_pf_cfg(hw);
1024 	if (status)
1025 		goto err_unroll_cqinit;
1026 
1027 	ice_clear_pxe_mode(hw);
1028 
1029 	status = ice_get_caps(hw);
1030 	if (status)
1031 		goto err_unroll_cqinit;
1032 
1033 	hw->port_info = (struct ice_port_info *)
1034 			ice_malloc(hw, sizeof(*hw->port_info));
1035 	if (!hw->port_info) {
1036 		status = ICE_ERR_NO_MEMORY;
1037 		goto err_unroll_cqinit;
1038 	}
1039 
1040 	/* set the back pointer to HW */
1041 	hw->port_info->hw = hw;
1042 
1043 	/* Initialize port_info struct with switch configuration data */
1044 	status = ice_get_initial_sw_cfg(hw);
1045 	if (status)
1046 		goto err_unroll_alloc;
1047 
1048 	hw->evb_veb = true;
1049 	/* Query the allocated resources for Tx scheduler */
1050 	status = ice_sched_query_res_alloc(hw);
1051 	if (status) {
1052 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1053 		goto err_unroll_alloc;
1054 	}
1055 	ice_sched_get_psm_clk_freq(hw);
1056 
1057 	/* Initialize port_info struct with scheduler data */
1058 	status = ice_sched_init_port(hw->port_info);
1059 	if (status)
1060 		goto err_unroll_sched;
1061 	pcaps = (struct ice_aqc_get_phy_caps_data *)
1062 		ice_malloc(hw, sizeof(*pcaps));
1063 	if (!pcaps) {
1064 		status = ICE_ERR_NO_MEMORY;
1065 		goto err_unroll_sched;
1066 	}
1067 
1068 	/* Initialize port_info struct with PHY capabilities */
1069 	status = ice_aq_get_phy_caps(hw->port_info, false,
1070 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1071 	ice_free(hw, pcaps);
1072 	if (status)
1073 		ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1074 			 status);
1075 
1076 	/* Initialize port_info struct with link information */
1077 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1078 	if (status)
1079 		goto err_unroll_sched;
1080 	/* need a valid SW entry point to build a Tx tree */
1081 	if (!hw->sw_entry_point_layer) {
1082 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1083 		status = ICE_ERR_CFG;
1084 		goto err_unroll_sched;
1085 	}
1086 	INIT_LIST_HEAD(&hw->agg_list);
1087 	/* Initialize max burst size */
1088 	if (!hw->max_burst_size)
1089 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1090 	status = ice_init_fltr_mgmt_struct(hw);
1091 	if (status)
1092 		goto err_unroll_sched;
1093 
1094 	/* Get MAC information */
1095 
1096 	/* A single port can report up to two (LAN and WoL) addresses */
1097 	mac_buf = ice_calloc(hw, 2,
1098 			     sizeof(struct ice_aqc_manage_mac_read_resp));
1099 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1100 
1101 	if (!mac_buf) {
1102 		status = ICE_ERR_NO_MEMORY;
1103 		goto err_unroll_fltr_mgmt_struct;
1104 	}
1105 
1106 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1107 	ice_free(hw, mac_buf);
1108 
1109 	if (status)
1110 		goto err_unroll_fltr_mgmt_struct;
1111 
1112 	/* enable jumbo frame support at MAC level */
1113 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1114 				    NULL);
1115 	if (status)
1116 		goto err_unroll_fltr_mgmt_struct;
1117 
1118 	status = ice_init_hw_tbls(hw);
1119 	if (status)
1120 		goto err_unroll_fltr_mgmt_struct;
1121 	ice_init_lock(&hw->tnl_lock);
1122 
1123 	return ICE_SUCCESS;
1124 
1125 err_unroll_fltr_mgmt_struct:
1126 	ice_cleanup_fltr_mgmt_struct(hw);
1127 err_unroll_sched:
1128 	ice_sched_cleanup_all(hw);
1129 err_unroll_alloc:
1130 	ice_free(hw, hw->port_info);
1131 	hw->port_info = NULL;
1132 err_unroll_cqinit:
1133 	ice_destroy_all_ctrlq(hw);
1134 	return status;
1135 }
1136 
1137 /**
1138  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1139  * @hw: pointer to the hardware structure
1140  *
1141  * This should be called only during nominal operation, not as a result of
1142  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1143  * applicable initializations if it fails for any reason.
1144  */
1145 void ice_deinit_hw(struct ice_hw *hw)
1146 {
1147 	ice_cleanup_fltr_mgmt_struct(hw);
1148 
1149 	ice_sched_cleanup_all(hw);
1150 	ice_sched_clear_agg(hw);
1151 	ice_free_seg(hw);
1152 	ice_free_hw_tbls(hw);
1153 	ice_destroy_lock(&hw->tnl_lock);
1154 
1155 	if (hw->port_info) {
1156 		ice_free(hw, hw->port_info);
1157 		hw->port_info = NULL;
1158 	}
1159 
1160 	ice_destroy_all_ctrlq(hw);
1161 
1162 	/* Clear VSI contexts if not already cleared */
1163 	ice_clear_all_vsi_ctx(hw);
1164 }
1165 
1166 /**
1167  * ice_check_reset - Check to see if a global reset is complete
1168  * @hw: pointer to the hardware structure
1169  */
1170 enum ice_status ice_check_reset(struct ice_hw *hw)
1171 {
1172 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1173 
1174 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1175 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1176 	 * Add 1sec for outstanding AQ commands that can take a long time.
1177 	 */
1178 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1179 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1180 
1181 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1182 		ice_msec_delay(100, true);
1183 		reg = rd32(hw, GLGEN_RSTAT);
1184 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1185 			break;
1186 	}
1187 
1188 	if (cnt == grst_timeout) {
1189 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1190 		return ICE_ERR_RESET_FAILED;
1191 	}
1192 
1193 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1194 				 GLNVM_ULD_PCIER_DONE_1_M |\
1195 				 GLNVM_ULD_CORER_DONE_M |\
1196 				 GLNVM_ULD_GLOBR_DONE_M |\
1197 				 GLNVM_ULD_POR_DONE_M |\
1198 				 GLNVM_ULD_POR_DONE_1_M |\
1199 				 GLNVM_ULD_PCIER_DONE_2_M)
1200 
1201 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1202 					  GLNVM_ULD_PE_DONE_M : 0);
1203 
1204 	/* Device is Active; check Global Reset processes are done */
1205 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1206 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1207 		if (reg == uld_mask) {
1208 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1209 			break;
1210 		}
1211 		ice_msec_delay(10, true);
1212 	}
1213 
1214 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1215 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1216 			  reg);
1217 		return ICE_ERR_RESET_FAILED;
1218 	}
1219 
1220 	return ICE_SUCCESS;
1221 }
1222 
1223 /**
1224  * ice_pf_reset - Reset the PF
1225  * @hw: pointer to the hardware structure
1226  *
1227  * If a global reset has been triggered, this function checks
1228  * for its completion and then issues the PF reset
1229  */
1230 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1231 {
1232 	u32 cnt, reg;
1233 
1234 	/* If at function entry a global reset was already in progress, i.e.
1235 	 * state is not 'device active' or any of the reset done bits are not
1236 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1237 	 * global reset is done.
1238 	 */
1239 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1240 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1241 		/* poll on global reset currently in progress until done */
1242 		if (ice_check_reset(hw))
1243 			return ICE_ERR_RESET_FAILED;
1244 
1245 		return ICE_SUCCESS;
1246 	}
1247 
1248 	/* Reset the PF */
1249 	reg = rd32(hw, PFGEN_CTRL);
1250 
1251 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1252 
1253 	/* Wait for the PFR to complete. The wait time is the global config lock
1254 	 * timeout plus the PFR timeout which will account for a possible reset
1255 	 * that is occurring during a download package operation.
1256 	 */
1257 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1258 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1259 		reg = rd32(hw, PFGEN_CTRL);
1260 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1261 			break;
1262 
1263 		ice_msec_delay(1, true);
1264 	}
1265 
1266 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1267 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1268 		return ICE_ERR_RESET_FAILED;
1269 	}
1270 
1271 	return ICE_SUCCESS;
1272 }
1273 
1274 /**
1275  * ice_reset - Perform different types of reset
1276  * @hw: pointer to the hardware structure
1277  * @req: reset request
1278  *
1279  * This function triggers a reset as specified by the req parameter.
1280  *
1281  * Note:
1282  * If anything other than a PF reset is triggered, PXE mode is restored.
1283  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1284  * interface has been restored in the rebuild flow.
1285  */
1286 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1287 {
1288 	u32 val = 0;
1289 
1290 	switch (req) {
1291 	case ICE_RESET_PFR:
1292 		return ice_pf_reset(hw);
1293 	case ICE_RESET_CORER:
1294 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1295 		val = GLGEN_RTRIG_CORER_M;
1296 		break;
1297 	case ICE_RESET_GLOBR:
1298 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1299 		val = GLGEN_RTRIG_GLOBR_M;
1300 		break;
1301 	default:
1302 		return ICE_ERR_PARAM;
1303 	}
1304 
1305 	val |= rd32(hw, GLGEN_RTRIG);
1306 	wr32(hw, GLGEN_RTRIG, val);
1307 	ice_flush(hw);
1308 
1309 	/* wait for the FW to be ready */
1310 	return ice_check_reset(hw);
1311 }
1312 
1313 /**
1314  * ice_copy_rxq_ctx_to_hw
1315  * @hw: pointer to the hardware structure
1316  * @ice_rxq_ctx: pointer to the rxq context
1317  * @rxq_index: the index of the Rx queue
1318  *
1319  * Copies rxq context from dense structure to HW register space
1320  */
1321 static enum ice_status
1322 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1323 {
1324 	u8 i;
1325 
1326 	if (!ice_rxq_ctx)
1327 		return ICE_ERR_BAD_PTR;
1328 
1329 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1330 		return ICE_ERR_PARAM;
1331 
1332 	/* Copy each dword separately to HW */
1333 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1334 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1335 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1336 
1337 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1338 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1339 	}
1340 
1341 	return ICE_SUCCESS;
1342 }
1343 
1344 /* LAN Rx Queue Context */
1345 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1346 	/* Field		Width	LSB */
1347 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1348 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1349 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1350 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1351 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1352 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1353 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1354 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1355 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1356 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1357 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1358 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1359 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1360 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1361 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1362 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1363 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1364 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1365 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1366 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1367 	{ 0 }
1368 };
1369 
1370 /**
1371  * ice_write_rxq_ctx
1372  * @hw: pointer to the hardware structure
1373  * @rlan_ctx: pointer to the rxq context
1374  * @rxq_index: the index of the Rx queue
1375  *
1376  * Converts rxq context from sparse to dense structure and then writes
1377  * it to HW register space and enables the hardware to prefetch descriptors
1378  * instead of only fetching them on demand
1379  */
1380 enum ice_status
1381 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1382 		  u32 rxq_index)
1383 {
1384 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1385 
1386 	if (!rlan_ctx)
1387 		return ICE_ERR_BAD_PTR;
1388 
1389 	rlan_ctx->prefena = 1;
1390 
1391 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1392 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1393 }
1394 
1395 /**
1396  * ice_clear_rxq_ctx
1397  * @hw: pointer to the hardware structure
1398  * @rxq_index: the index of the Rx queue to clear
1399  *
1400  * Clears rxq context in HW register space
1401  */
1402 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1403 {
1404 	u8 i;
1405 
1406 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1407 		return ICE_ERR_PARAM;
1408 
1409 	/* Clear each dword register separately */
1410 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1411 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1412 
1413 	return ICE_SUCCESS;
1414 }
1415 
1416 /* LAN Tx Queue Context */
1417 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1418 				    /* Field			Width	LSB */
1419 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1420 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1421 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1422 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1423 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1424 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1425 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1426 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1427 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1428 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1429 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1430 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1431 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1432 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1433 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1434 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1435 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1436 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1437 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1438 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1439 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1440 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1441 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1442 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1443 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1444 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1445 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1446 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1447 	{ 0 }
1448 };
1449 
1450 /**
1451  * ice_copy_tx_cmpltnq_ctx_to_hw
1452  * @hw: pointer to the hardware structure
1453  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1454  * @tx_cmpltnq_index: the index of the completion queue
1455  *
1456  * Copies Tx completion queue context from dense structure to HW register space
1457  */
1458 static enum ice_status
1459 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1460 			      u32 tx_cmpltnq_index)
1461 {
1462 	u8 i;
1463 
1464 	if (!ice_tx_cmpltnq_ctx)
1465 		return ICE_ERR_BAD_PTR;
1466 
1467 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1468 		return ICE_ERR_PARAM;
1469 
1470 	/* Copy each dword separately to HW */
1471 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1472 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1473 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1474 
1475 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1476 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1477 	}
1478 
1479 	return ICE_SUCCESS;
1480 }
1481 
1482 /* LAN Tx Completion Queue Context */
1483 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1484 				       /* Field			Width   LSB */
1485 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1486 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1487 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1488 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1489 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1490 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1491 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1492 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1493 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1494 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1495 	{ 0 }
1496 };
1497 
1498 /**
1499  * ice_write_tx_cmpltnq_ctx
1500  * @hw: pointer to the hardware structure
1501  * @tx_cmpltnq_ctx: pointer to the completion queue context
1502  * @tx_cmpltnq_index: the index of the completion queue
1503  *
1504  * Converts completion queue context from sparse to dense structure and then
1505  * writes it to HW register space
1506  */
1507 enum ice_status
1508 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1509 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1510 			 u32 tx_cmpltnq_index)
1511 {
1512 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1513 
1514 	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1515 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1516 }
1517 
1518 /**
1519  * ice_clear_tx_cmpltnq_ctx
1520  * @hw: pointer to the hardware structure
1521  * @tx_cmpltnq_index: the index of the completion queue to clear
1522  *
1523  * Clears Tx completion queue context in HW register space
1524  */
1525 enum ice_status
1526 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1527 {
1528 	u8 i;
1529 
1530 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1531 		return ICE_ERR_PARAM;
1532 
1533 	/* Clear each dword register separately */
1534 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1535 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1536 
1537 	return ICE_SUCCESS;
1538 }
1539 
1540 /**
1541  * ice_copy_tx_drbell_q_ctx_to_hw
1542  * @hw: pointer to the hardware structure
1543  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1544  * @tx_drbell_q_index: the index of the doorbell queue
1545  *
1546  * Copies doorbell queue context from dense structure to HW register space
1547  */
1548 static enum ice_status
1549 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1550 			       u32 tx_drbell_q_index)
1551 {
1552 	u8 i;
1553 
1554 	if (!ice_tx_drbell_q_ctx)
1555 		return ICE_ERR_BAD_PTR;
1556 
1557 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1558 		return ICE_ERR_PARAM;
1559 
1560 	/* Copy each dword separately to HW */
1561 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1562 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1563 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1564 
1565 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1566 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1567 	}
1568 
1569 	return ICE_SUCCESS;
1570 }
1571 
1572 /* LAN Tx Doorbell Queue Context info */
1573 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1574 					/* Field		Width   LSB */
1575 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1576 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1577 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1578 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1579 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1580 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1581 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1582 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1583 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1584 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1585 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1586 	{ 0 }
1587 };
1588 
1589 /**
1590  * ice_write_tx_drbell_q_ctx
1591  * @hw: pointer to the hardware structure
1592  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1593  * @tx_drbell_q_index: the index of the doorbell queue
1594  *
1595  * Converts doorbell queue context from sparse to dense structure and then
1596  * writes it to HW register space
1597  */
1598 enum ice_status
1599 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1600 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1601 			  u32 tx_drbell_q_index)
1602 {
1603 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1604 
1605 	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1606 		    ice_tx_drbell_q_ctx_info);
1607 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1608 }
1609 
1610 /**
1611  * ice_clear_tx_drbell_q_ctx
1612  * @hw: pointer to the hardware structure
1613  * @tx_drbell_q_index: the index of the doorbell queue to clear
1614  *
1615  * Clears doorbell queue context in HW register space
1616  */
1617 enum ice_status
1618 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1619 {
1620 	u8 i;
1621 
1622 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1623 		return ICE_ERR_PARAM;
1624 
1625 	/* Clear each dword register separately */
1626 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1627 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1628 
1629 	return ICE_SUCCESS;
1630 }
1631 
1632 /* FW Admin Queue command wrappers */
1633 
1634 /**
1635  * ice_should_retry_sq_send_cmd
1636  * @opcode: AQ opcode
1637  *
1638  * Decide if we should retry the send command routine for the ATQ, depending
1639  * on the opcode.
1640  */
1641 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1642 {
1643 	switch (opcode) {
1644 	case ice_aqc_opc_dnl_get_status:
1645 	case ice_aqc_opc_dnl_run:
1646 	case ice_aqc_opc_dnl_call:
1647 	case ice_aqc_opc_dnl_read_sto:
1648 	case ice_aqc_opc_dnl_write_sto:
1649 	case ice_aqc_opc_dnl_set_breakpoints:
1650 	case ice_aqc_opc_dnl_read_log:
1651 	case ice_aqc_opc_get_link_topo:
1652 	case ice_aqc_opc_done_alt_write:
1653 	case ice_aqc_opc_lldp_stop:
1654 	case ice_aqc_opc_lldp_start:
1655 	case ice_aqc_opc_lldp_filter_ctrl:
1656 		return true;
1657 	}
1658 
1659 	return false;
1660 }
1661 
1662 /**
1663  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1664  * @hw: pointer to the HW struct
1665  * @cq: pointer to the specific Control queue
1666  * @desc: prefilled descriptor describing the command
1667  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1668  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1669  * @cd: pointer to command details structure
1670  *
1671  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1672  * Queue if the EBUSY AQ error is returned.
1673  */
1674 static enum ice_status
1675 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1676 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1677 		      struct ice_sq_cd *cd)
1678 {
1679 	struct ice_aq_desc desc_cpy;
1680 	enum ice_status status;
1681 	bool is_cmd_for_retry;
1682 	u8 *buf_cpy = NULL;
1683 	u8 idx = 0;
1684 	u16 opcode;
1685 
1686 	opcode = LE16_TO_CPU(desc->opcode);
1687 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1688 	ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1689 
1690 	if (is_cmd_for_retry) {
1691 		if (buf) {
1692 			buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1693 			if (!buf_cpy)
1694 				return ICE_ERR_NO_MEMORY;
1695 		}
1696 
1697 		ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1698 			   ICE_NONDMA_TO_NONDMA);
1699 	}
1700 
1701 	do {
1702 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1703 
1704 		if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1705 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1706 			break;
1707 
1708 		if (buf_cpy)
1709 			ice_memcpy(buf, buf_cpy, buf_size,
1710 				   ICE_NONDMA_TO_NONDMA);
1711 
1712 		ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1713 			   ICE_NONDMA_TO_NONDMA);
1714 
1715 		ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1716 
1717 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1718 
1719 	if (buf_cpy)
1720 		ice_free(hw, buf_cpy);
1721 
1722 	return status;
1723 }
1724 
1725 /**
1726  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1727  * @hw: pointer to the HW struct
1728  * @desc: descriptor describing the command
1729  * @buf: buffer to use for indirect commands (NULL for direct commands)
1730  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1731  * @cd: pointer to command details structure
1732  *
1733  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1734  */
1735 enum ice_status
1736 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1737 		u16 buf_size, struct ice_sq_cd *cd)
1738 {
1739 	return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1740 }
1741 
1742 /**
1743  * ice_aq_get_fw_ver
1744  * @hw: pointer to the HW struct
1745  * @cd: pointer to command details structure or NULL
1746  *
1747  * Get the firmware version (0x0001) from the admin queue commands
1748  */
1749 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1750 {
1751 	struct ice_aqc_get_ver *resp;
1752 	struct ice_aq_desc desc;
1753 	enum ice_status status;
1754 
1755 	resp = &desc.params.get_ver;
1756 
1757 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1758 
1759 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1760 
1761 	if (!status) {
1762 		hw->fw_branch = resp->fw_branch;
1763 		hw->fw_maj_ver = resp->fw_major;
1764 		hw->fw_min_ver = resp->fw_minor;
1765 		hw->fw_patch = resp->fw_patch;
1766 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1767 		hw->api_branch = resp->api_branch;
1768 		hw->api_maj_ver = resp->api_major;
1769 		hw->api_min_ver = resp->api_minor;
1770 		hw->api_patch = resp->api_patch;
1771 	}
1772 
1773 	return status;
1774 }
1775 
1776 /**
1777  * ice_aq_send_driver_ver
1778  * @hw: pointer to the HW struct
1779  * @dv: driver's major, minor version
1780  * @cd: pointer to command details structure or NULL
1781  *
1782  * Send the driver version (0x0002) to the firmware
1783  */
1784 enum ice_status
1785 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1786 		       struct ice_sq_cd *cd)
1787 {
1788 	struct ice_aqc_driver_ver *cmd;
1789 	struct ice_aq_desc desc;
1790 	u16 len;
1791 
1792 	cmd = &desc.params.driver_ver;
1793 
1794 	if (!dv)
1795 		return ICE_ERR_PARAM;
1796 
1797 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1798 
1799 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1800 	cmd->major_ver = dv->major_ver;
1801 	cmd->minor_ver = dv->minor_ver;
1802 	cmd->build_ver = dv->build_ver;
1803 	cmd->subbuild_ver = dv->subbuild_ver;
1804 
1805 	len = 0;
1806 	while (len < sizeof(dv->driver_string) &&
1807 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1808 		len++;
1809 
1810 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1811 }
1812 
1813 /**
1814  * ice_aq_q_shutdown
1815  * @hw: pointer to the HW struct
1816  * @unloading: is the driver unloading itself
1817  *
1818  * Tell the Firmware that we're shutting down the AdminQ and whether
1819  * or not the driver is unloading as well (0x0003).
1820  */
1821 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1822 {
1823 	struct ice_aqc_q_shutdown *cmd;
1824 	struct ice_aq_desc desc;
1825 
1826 	cmd = &desc.params.q_shutdown;
1827 
1828 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1829 
1830 	if (unloading)
1831 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1832 
1833 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1834 }
1835 
1836 /**
1837  * ice_aq_req_res
1838  * @hw: pointer to the HW struct
1839  * @res: resource ID
1840  * @access: access type
1841  * @sdp_number: resource number
1842  * @timeout: the maximum time in ms that the driver may hold the resource
1843  * @cd: pointer to command details structure or NULL
1844  *
1845  * Requests common resource using the admin queue commands (0x0008).
1846  * When attempting to acquire the Global Config Lock, the driver can
1847  * learn of three states:
1848  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1849  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1850  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1851  *                          successfully downloaded the package; the driver does
1852  *                          not have to download the package and can continue
1853  *                          loading
1854  *
1855  * Note that if the caller is in an acquire lock, perform action, release lock
1856  * phase of operation, it is possible that the FW may detect a timeout and issue
1857  * a CORER. In this case, the driver will receive a CORER interrupt and will
1858  * have to determine its cause. The calling thread that is handling this flow
1859  * will likely get an error propagated back to it indicating the Download
1860  * Package, Update Package or the Release Resource AQ commands timed out.
1861  */
1862 static enum ice_status
1863 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1864 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1865 	       struct ice_sq_cd *cd)
1866 {
1867 	struct ice_aqc_req_res *cmd_resp;
1868 	struct ice_aq_desc desc;
1869 	enum ice_status status;
1870 
1871 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1872 
1873 	cmd_resp = &desc.params.res_owner;
1874 
1875 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1876 
1877 	cmd_resp->res_id = CPU_TO_LE16(res);
1878 	cmd_resp->access_type = CPU_TO_LE16(access);
1879 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1880 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1881 	*timeout = 0;
1882 
1883 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1884 
1885 	/* The completion specifies the maximum time in ms that the driver
1886 	 * may hold the resource in the Timeout field.
1887 	 */
1888 
1889 	/* Global config lock response utilizes an additional status field.
1890 	 *
1891 	 * If the Global config lock resource is held by some other driver, the
1892 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1893 	 * and the timeout field indicates the maximum time the current owner
1894 	 * of the resource has to free it.
1895 	 */
1896 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1897 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1898 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1899 			return ICE_SUCCESS;
1900 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1901 			   ICE_AQ_RES_GLBL_IN_PROG) {
1902 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1903 			return ICE_ERR_AQ_ERROR;
1904 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1905 			   ICE_AQ_RES_GLBL_DONE) {
1906 			return ICE_ERR_AQ_NO_WORK;
1907 		}
1908 
1909 		/* invalid FW response, force a timeout immediately */
1910 		*timeout = 0;
1911 		return ICE_ERR_AQ_ERROR;
1912 	}
1913 
1914 	/* If the resource is held by some other driver, the command completes
1915 	 * with a busy return value and the timeout field indicates the maximum
1916 	 * time the current owner of the resource has to free it.
1917 	 */
1918 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1919 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1920 
1921 	return status;
1922 }
1923 
1924 /**
1925  * ice_aq_release_res
1926  * @hw: pointer to the HW struct
1927  * @res: resource ID
1928  * @sdp_number: resource number
1929  * @cd: pointer to command details structure or NULL
1930  *
1931  * release common resource using the admin queue commands (0x0009)
1932  */
1933 static enum ice_status
1934 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1935 		   struct ice_sq_cd *cd)
1936 {
1937 	struct ice_aqc_req_res *cmd;
1938 	struct ice_aq_desc desc;
1939 
1940 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1941 
1942 	cmd = &desc.params.res_owner;
1943 
1944 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1945 
1946 	cmd->res_id = CPU_TO_LE16(res);
1947 	cmd->res_number = CPU_TO_LE32(sdp_number);
1948 
1949 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1950 }
1951 
1952 /**
1953  * ice_acquire_res
1954  * @hw: pointer to the HW structure
1955  * @res: resource ID
1956  * @access: access type (read or write)
1957  * @timeout: timeout in milliseconds
1958  *
1959  * This function will attempt to acquire the ownership of a resource.
1960  */
1961 enum ice_status
1962 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1963 		enum ice_aq_res_access_type access, u32 timeout)
1964 {
1965 #define ICE_RES_POLLING_DELAY_MS	10
1966 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1967 	u32 time_left = timeout;
1968 	enum ice_status status;
1969 
1970 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1971 
1972 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1973 
1974 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1975 	 * previously acquired the resource and performed any necessary updates;
1976 	 * in this case the caller does not obtain the resource and has no
1977 	 * further work to do.
1978 	 */
1979 	if (status == ICE_ERR_AQ_NO_WORK)
1980 		goto ice_acquire_res_exit;
1981 
1982 	if (status)
1983 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1984 
1985 	/* If necessary, poll until the current lock owner timeouts */
1986 	timeout = time_left;
1987 	while (status && timeout && time_left) {
1988 		ice_msec_delay(delay, true);
1989 		timeout = (timeout > delay) ? timeout - delay : 0;
1990 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1991 
1992 		if (status == ICE_ERR_AQ_NO_WORK)
1993 			/* lock free, but no work to do */
1994 			break;
1995 
1996 		if (!status)
1997 			/* lock acquired */
1998 			break;
1999 	}
2000 	if (status && status != ICE_ERR_AQ_NO_WORK)
2001 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2002 
2003 ice_acquire_res_exit:
2004 	if (status == ICE_ERR_AQ_NO_WORK) {
2005 		if (access == ICE_RES_WRITE)
2006 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2007 		else
2008 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2009 	}
2010 	return status;
2011 }
2012 
2013 /**
2014  * ice_release_res
2015  * @hw: pointer to the HW structure
2016  * @res: resource ID
2017  *
2018  * This function will release a resource using the proper Admin Command.
2019  */
2020 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2021 {
2022 	enum ice_status status;
2023 	u32 total_delay = 0;
2024 
2025 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2026 
2027 	status = ice_aq_release_res(hw, res, 0, NULL);
2028 
2029 	/* there are some rare cases when trying to release the resource
2030 	 * results in an admin queue timeout, so handle them correctly
2031 	 */
2032 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
2033 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
2034 		ice_msec_delay(1, true);
2035 		status = ice_aq_release_res(hw, res, 0, NULL);
2036 		total_delay++;
2037 	}
2038 }
2039 
2040 /**
2041  * ice_aq_alloc_free_res - command to allocate/free resources
2042  * @hw: pointer to the HW struct
2043  * @num_entries: number of resource entries in buffer
2044  * @buf: Indirect buffer to hold data parameters and response
2045  * @buf_size: size of buffer for indirect commands
2046  * @opc: pass in the command opcode
2047  * @cd: pointer to command details structure or NULL
2048  *
2049  * Helper function to allocate/free resources using the admin queue commands
2050  */
2051 enum ice_status
2052 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2053 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2054 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2055 {
2056 	struct ice_aqc_alloc_free_res_cmd *cmd;
2057 	struct ice_aq_desc desc;
2058 
2059 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2060 
2061 	cmd = &desc.params.sw_res_ctrl;
2062 
2063 	if (!buf)
2064 		return ICE_ERR_PARAM;
2065 
2066 	if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2067 		return ICE_ERR_PARAM;
2068 
2069 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2070 
2071 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2072 
2073 	cmd->num_entries = CPU_TO_LE16(num_entries);
2074 
2075 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2076 }
2077 
2078 /**
2079  * ice_alloc_hw_res - allocate resource
2080  * @hw: pointer to the HW struct
2081  * @type: type of resource
2082  * @num: number of resources to allocate
2083  * @btm: allocate from bottom
2084  * @res: pointer to array that will receive the resources
2085  */
2086 enum ice_status
2087 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2088 {
2089 	struct ice_aqc_alloc_free_res_elem *buf;
2090 	enum ice_status status;
2091 	u16 buf_len;
2092 
2093 	buf_len = ice_struct_size(buf, elem, num);
2094 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2095 	if (!buf)
2096 		return ICE_ERR_NO_MEMORY;
2097 
2098 	/* Prepare buffer to allocate resource. */
2099 	buf->num_elems = CPU_TO_LE16(num);
2100 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2101 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2102 	if (btm)
2103 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2104 
2105 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2106 				       ice_aqc_opc_alloc_res, NULL);
2107 	if (status)
2108 		goto ice_alloc_res_exit;
2109 
2110 	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2111 		   ICE_NONDMA_TO_NONDMA);
2112 
2113 ice_alloc_res_exit:
2114 	ice_free(hw, buf);
2115 	return status;
2116 }
2117 
2118 /**
2119  * ice_free_hw_res - free allocated HW resource
2120  * @hw: pointer to the HW struct
2121  * @type: type of resource to free
2122  * @num: number of resources
2123  * @res: pointer to array that contains the resources to free
2124  */
2125 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2126 {
2127 	struct ice_aqc_alloc_free_res_elem *buf;
2128 	enum ice_status status;
2129 	u16 buf_len;
2130 
2131 	buf_len = ice_struct_size(buf, elem, num);
2132 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2133 	if (!buf)
2134 		return ICE_ERR_NO_MEMORY;
2135 
2136 	/* Prepare buffer to free resource. */
2137 	buf->num_elems = CPU_TO_LE16(num);
2138 	buf->res_type = CPU_TO_LE16(type);
2139 	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2140 		   ICE_NONDMA_TO_NONDMA);
2141 
2142 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2143 				       ice_aqc_opc_free_res, NULL);
2144 	if (status)
2145 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2146 
2147 	ice_free(hw, buf);
2148 	return status;
2149 }
2150 
2151 /**
2152  * ice_get_num_per_func - determine number of resources per PF
2153  * @hw: pointer to the HW structure
2154  * @max: value to be evenly split between each PF
2155  *
2156  * Determine the number of valid functions by going through the bitmap returned
2157  * from parsing capabilities and use this to calculate the number of resources
2158  * per PF based on the max value passed in.
2159  */
2160 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2161 {
2162 	u8 funcs;
2163 
2164 #define ICE_CAPS_VALID_FUNCS_M	0xFF
2165 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2166 			     ICE_CAPS_VALID_FUNCS_M);
2167 
2168 	if (!funcs)
2169 		return 0;
2170 
2171 	return max / funcs;
2172 }
2173 
2174 /**
2175  * ice_print_led_caps - print LED capabilities
2176  * @hw: pointer to the ice_hw instance
2177  * @caps: pointer to common caps instance
2178  * @prefix: string to prefix when printing
2179  * @dbg: set to indicate debug print
2180  */
2181 static void
2182 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2183 		   char const *prefix, bool dbg)
2184 {
2185 	u8 i;
2186 
2187 	if (dbg)
2188 		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
2189 			  caps->led_pin_num);
2190 	else
2191 		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
2192 			 caps->led_pin_num);
2193 
2194 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2195 		if (!caps->led[i])
2196 			continue;
2197 
2198 		if (dbg)
2199 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
2200 				  prefix, i, caps->led[i]);
2201 		else
2202 			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
2203 				 caps->led[i]);
2204 	}
2205 }
2206 
2207 /**
2208  * ice_print_sdp_caps - print SDP capabilities
2209  * @hw: pointer to the ice_hw instance
2210  * @caps: pointer to common caps instance
2211  * @prefix: string to prefix when printing
2212  * @dbg: set to indicate debug print
2213  */
2214 static void
2215 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2216 		   char const *prefix, bool dbg)
2217 {
2218 	u8 i;
2219 
2220 	if (dbg)
2221 		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
2222 			  caps->sdp_pin_num);
2223 	else
2224 		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
2225 			 caps->sdp_pin_num);
2226 
2227 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2228 		if (!caps->sdp[i])
2229 			continue;
2230 
2231 		if (dbg)
2232 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
2233 				  prefix, i, caps->sdp[i]);
2234 		else
2235 			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
2236 				 i, caps->sdp[i]);
2237 	}
2238 }
2239 
2240 /**
2241  * ice_parse_common_caps - parse common device/function capabilities
2242  * @hw: pointer to the HW struct
2243  * @caps: pointer to common capabilities structure
2244  * @elem: the capability element to parse
2245  * @prefix: message prefix for tracing capabilities
2246  *
2247  * Given a capability element, extract relevant details into the common
2248  * capability structure.
2249  *
2250  * Returns: true if the capability matches one of the common capability ids,
2251  * false otherwise.
2252  */
2253 static bool
2254 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2255 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
2256 {
2257 	u32 logical_id = LE32_TO_CPU(elem->logical_id);
2258 	u32 phys_id = LE32_TO_CPU(elem->phys_id);
2259 	u32 number = LE32_TO_CPU(elem->number);
2260 	u16 cap = LE16_TO_CPU(elem->cap);
2261 	bool found = true;
2262 
2263 	switch (cap) {
2264 	case ICE_AQC_CAPS_SWITCHING_MODE:
2265 		caps->switching_mode = number;
2266 		ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2267 			  caps->switching_mode);
2268 		break;
2269 	case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2270 		caps->mgmt_mode = number;
2271 		caps->mgmt_protocols_mctp = logical_id;
2272 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2273 			  caps->mgmt_mode);
2274 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2275 			  caps->mgmt_protocols_mctp);
2276 		break;
2277 	case ICE_AQC_CAPS_OS2BMC:
2278 		caps->os2bmc = number;
2279 		ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2280 		break;
2281 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
2282 		caps->valid_functions = number;
2283 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2284 			  caps->valid_functions);
2285 		break;
2286 	case ICE_AQC_CAPS_SRIOV:
2287 		caps->sr_iov_1_1 = (number == 1);
2288 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2289 			  caps->sr_iov_1_1);
2290 		break;
2291 	case ICE_AQC_CAPS_802_1QBG:
2292 		caps->evb_802_1_qbg = (number == 1);
2293 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2294 		break;
2295 	case ICE_AQC_CAPS_802_1BR:
2296 		caps->evb_802_1_qbh = (number == 1);
2297 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2298 		break;
2299 	case ICE_AQC_CAPS_DCB:
2300 		caps->dcb = (number == 1);
2301 		caps->active_tc_bitmap = logical_id;
2302 		caps->maxtc = phys_id;
2303 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2304 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2305 			  caps->active_tc_bitmap);
2306 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2307 		break;
2308 	case ICE_AQC_CAPS_ISCSI:
2309 		caps->iscsi = (number == 1);
2310 		ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2311 		break;
2312 	case ICE_AQC_CAPS_RSS:
2313 		caps->rss_table_size = number;
2314 		caps->rss_table_entry_width = logical_id;
2315 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2316 			  caps->rss_table_size);
2317 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2318 			  caps->rss_table_entry_width);
2319 		break;
2320 	case ICE_AQC_CAPS_RXQS:
2321 		caps->num_rxq = number;
2322 		caps->rxq_first_id = phys_id;
2323 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2324 			  caps->num_rxq);
2325 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2326 			  caps->rxq_first_id);
2327 		break;
2328 	case ICE_AQC_CAPS_TXQS:
2329 		caps->num_txq = number;
2330 		caps->txq_first_id = phys_id;
2331 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2332 			  caps->num_txq);
2333 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2334 			  caps->txq_first_id);
2335 		break;
2336 	case ICE_AQC_CAPS_MSIX:
2337 		caps->num_msix_vectors = number;
2338 		caps->msix_vector_first_id = phys_id;
2339 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2340 			  caps->num_msix_vectors);
2341 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2342 			  caps->msix_vector_first_id);
2343 		break;
2344 	case ICE_AQC_CAPS_NVM_VER:
2345 		break;
2346 	case ICE_AQC_CAPS_NVM_MGMT:
2347 		caps->sec_rev_disabled =
2348 			(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2349 			true : false;
2350 		ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2351 			  caps->sec_rev_disabled);
2352 		caps->update_disabled =
2353 			(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2354 			true : false;
2355 		ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2356 			  caps->update_disabled);
2357 		caps->nvm_unified_update =
2358 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2359 			true : false;
2360 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2361 			  caps->nvm_unified_update);
2362 		break;
2363 	case ICE_AQC_CAPS_CEM:
2364 		caps->mgmt_cem = (number == 1);
2365 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2366 			  caps->mgmt_cem);
2367 		break;
2368 	case ICE_AQC_CAPS_IWARP:
2369 		caps->iwarp = (number == 1);
2370 		ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
2371 		break;
2372 	case ICE_AQC_CAPS_LED:
2373 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2374 			caps->led[phys_id] = true;
2375 			caps->led_pin_num++;
2376 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2377 		}
2378 		break;
2379 	case ICE_AQC_CAPS_SDP:
2380 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2381 			caps->sdp[phys_id] = true;
2382 			caps->sdp_pin_num++;
2383 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2384 		}
2385 		break;
2386 	case ICE_AQC_CAPS_WR_CSR_PROT:
2387 		caps->wr_csr_prot = number;
2388 		caps->wr_csr_prot |= (u64)logical_id << 32;
2389 		ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2390 			  (unsigned long long)caps->wr_csr_prot);
2391 		break;
2392 	case ICE_AQC_CAPS_WOL_PROXY:
2393 		caps->num_wol_proxy_fltr = number;
2394 		caps->wol_proxy_vsi_seid = logical_id;
2395 		caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2396 		caps->acpi_prog_mthd = !!(phys_id &
2397 					  ICE_ACPI_PROG_MTHD_M);
2398 		caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2399 		ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2400 			  caps->num_wol_proxy_fltr);
2401 		ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2402 			  caps->wol_proxy_vsi_seid);
2403 		ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
2404 			  prefix, caps->apm_wol_support);
2405 		break;
2406 	case ICE_AQC_CAPS_MAX_MTU:
2407 		caps->max_mtu = number;
2408 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2409 			  prefix, caps->max_mtu);
2410 		break;
2411 	case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2412 		caps->pcie_reset_avoidance = (number > 0);
2413 		ice_debug(hw, ICE_DBG_INIT,
2414 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2415 			  caps->pcie_reset_avoidance);
2416 		break;
2417 	case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2418 		caps->reset_restrict_support = (number == 1);
2419 		ice_debug(hw, ICE_DBG_INIT,
2420 			  "%s: reset_restrict_support = %d\n", prefix,
2421 			  caps->reset_restrict_support);
2422 		break;
2423 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2424 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2425 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2426 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2427 	{
2428 		u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2429 
2430 		caps->ext_topo_dev_img_ver_high[index] = number;
2431 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
2432 		caps->ext_topo_dev_img_part_num[index] =
2433 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2434 			ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2435 		caps->ext_topo_dev_img_load_en[index] =
2436 			(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2437 		caps->ext_topo_dev_img_prog_en[index] =
2438 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2439 		ice_debug(hw, ICE_DBG_INIT,
2440 			  "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2441 			  prefix, index,
2442 			  caps->ext_topo_dev_img_ver_high[index]);
2443 		ice_debug(hw, ICE_DBG_INIT,
2444 			  "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2445 			  prefix, index,
2446 			  caps->ext_topo_dev_img_ver_low[index]);
2447 		ice_debug(hw, ICE_DBG_INIT,
2448 			  "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2449 			  prefix, index,
2450 			  caps->ext_topo_dev_img_part_num[index]);
2451 		ice_debug(hw, ICE_DBG_INIT,
2452 			  "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2453 			  prefix, index,
2454 			  caps->ext_topo_dev_img_load_en[index]);
2455 		ice_debug(hw, ICE_DBG_INIT,
2456 			  "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2457 			  prefix, index,
2458 			  caps->ext_topo_dev_img_prog_en[index]);
2459 		break;
2460 	}
2461 	default:
2462 		/* Not one of the recognized common capabilities */
2463 		found = false;
2464 	}
2465 
2466 	return found;
2467 }
2468 
2469 /**
2470  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2471  * @hw: pointer to the HW structure
2472  * @caps: pointer to capabilities structure to fix
2473  *
2474  * Re-calculate the capabilities that are dependent on the number of physical
2475  * ports; i.e. some features are not supported or function differently on
2476  * devices with more than 4 ports.
2477  */
2478 static void
2479 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2480 {
2481 	/* This assumes device capabilities are always scanned before function
2482 	 * capabilities during the initialization flow.
2483 	 */
2484 	if (hw->dev_caps.num_funcs > 4) {
2485 		/* Max 4 TCs per port */
2486 		caps->maxtc = 4;
2487 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2488 			  caps->maxtc);
2489 		if (caps->iwarp) {
2490 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2491 			caps->iwarp = 0;
2492 		}
2493 
2494 		/* print message only when processing device capabilities
2495 		 * during initialization.
2496 		 */
2497 		if (caps == &hw->dev_caps.common_cap)
2498 			ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2499 	}
2500 }
2501 
2502 /**
2503  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2504  * @hw: pointer to the HW struct
2505  * @func_p: pointer to function capabilities structure
2506  * @cap: pointer to the capability element to parse
2507  *
2508  * Extract function capabilities for ICE_AQC_CAPS_VF.
2509  */
2510 static void
2511 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2512 		       struct ice_aqc_list_caps_elem *cap)
2513 {
2514 	u32 number = LE32_TO_CPU(cap->number);
2515 	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2516 
2517 	func_p->num_allocd_vfs = number;
2518 	func_p->vf_base_id = logical_id;
2519 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2520 		  func_p->num_allocd_vfs);
2521 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2522 		  func_p->vf_base_id);
2523 }
2524 
2525 /**
2526  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2527  * @hw: pointer to the HW struct
2528  * @func_p: pointer to function capabilities structure
2529  * @cap: pointer to the capability element to parse
2530  *
2531  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2532  */
2533 static void
2534 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2535 			struct ice_aqc_list_caps_elem *cap)
2536 {
2537 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2538 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2539 		  LE32_TO_CPU(cap->number));
2540 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2541 		  func_p->guar_num_vsi);
2542 }
2543 
2544 /**
2545  * ice_parse_func_caps - Parse function capabilities
2546  * @hw: pointer to the HW struct
2547  * @func_p: pointer to function capabilities structure
2548  * @buf: buffer containing the function capability records
2549  * @cap_count: the number of capabilities
2550  *
2551  * Helper function to parse function (0x000A) capabilities list. For
2552  * capabilities shared between device and function, this relies on
2553  * ice_parse_common_caps.
2554  *
2555  * Loop through the list of provided capabilities and extract the relevant
2556  * data into the function capabilities structured.
2557  */
2558 static void
2559 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2560 		    void *buf, u32 cap_count)
2561 {
2562 	struct ice_aqc_list_caps_elem *cap_resp;
2563 	u32 i;
2564 
2565 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2566 
2567 	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2568 
2569 	for (i = 0; i < cap_count; i++) {
2570 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2571 		bool found;
2572 
2573 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2574 					      &cap_resp[i], "func caps");
2575 
2576 		switch (cap) {
2577 		case ICE_AQC_CAPS_VF:
2578 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2579 			break;
2580 		case ICE_AQC_CAPS_VSI:
2581 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2582 			break;
2583 		default:
2584 			/* Don't list common capabilities as unknown */
2585 			if (!found)
2586 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2587 					  i, cap);
2588 			break;
2589 		}
2590 	}
2591 
2592 	ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2593 	ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2594 
2595 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2596 }
2597 
2598 /**
2599  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2600  * @hw: pointer to the HW struct
2601  * @dev_p: pointer to device capabilities structure
2602  * @cap: capability element to parse
2603  *
2604  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2605  */
2606 static void
2607 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2608 			      struct ice_aqc_list_caps_elem *cap)
2609 {
2610 	u32 number = LE32_TO_CPU(cap->number);
2611 
2612 	dev_p->num_funcs = ice_hweight32(number);
2613 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2614 		  dev_p->num_funcs);
2615 
2616 }
2617 
2618 /**
2619  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2620  * @hw: pointer to the HW struct
2621  * @dev_p: pointer to device capabilities structure
2622  * @cap: capability element to parse
2623  *
2624  * Parse ICE_AQC_CAPS_VF for device capabilities.
2625  */
2626 static void
2627 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2628 		      struct ice_aqc_list_caps_elem *cap)
2629 {
2630 	u32 number = LE32_TO_CPU(cap->number);
2631 
2632 	dev_p->num_vfs_exposed = number;
2633 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2634 		  dev_p->num_vfs_exposed);
2635 }
2636 
2637 /**
2638  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2639  * @hw: pointer to the HW struct
2640  * @dev_p: pointer to device capabilities structure
2641  * @cap: capability element to parse
2642  *
2643  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2644  */
2645 static void
2646 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2647 		       struct ice_aqc_list_caps_elem *cap)
2648 {
2649 	u32 number = LE32_TO_CPU(cap->number);
2650 
2651 	dev_p->num_vsi_allocd_to_host = number;
2652 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2653 		  dev_p->num_vsi_allocd_to_host);
2654 }
2655 
2656 /**
2657  * ice_parse_dev_caps - Parse device capabilities
2658  * @hw: pointer to the HW struct
2659  * @dev_p: pointer to device capabilities structure
2660  * @buf: buffer containing the device capability records
2661  * @cap_count: the number of capabilities
2662  *
2663  * Helper device to parse device (0x000B) capabilities list. For
2664  * capabilities shared between device and function, this relies on
2665  * ice_parse_common_caps.
2666  *
2667  * Loop through the list of provided capabilities and extract the relevant
2668  * data into the device capabilities structured.
2669  */
2670 static void
2671 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2672 		   void *buf, u32 cap_count)
2673 {
2674 	struct ice_aqc_list_caps_elem *cap_resp;
2675 	u32 i;
2676 
2677 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2678 
2679 	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2680 
2681 	for (i = 0; i < cap_count; i++) {
2682 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2683 		bool found;
2684 
2685 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2686 					      &cap_resp[i], "dev caps");
2687 
2688 		switch (cap) {
2689 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2690 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2691 			break;
2692 		case ICE_AQC_CAPS_VF:
2693 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2694 			break;
2695 		case ICE_AQC_CAPS_VSI:
2696 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2697 			break;
2698 		default:
2699 			/* Don't list common capabilities as unknown */
2700 			if (!found)
2701 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2702 					  i, cap);
2703 			break;
2704 		}
2705 	}
2706 
2707 	ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2708 	ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2709 
2710 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2711 }
2712 
2713 /**
2714  * ice_aq_list_caps - query function/device capabilities
2715  * @hw: pointer to the HW struct
2716  * @buf: a buffer to hold the capabilities
2717  * @buf_size: size of the buffer
2718  * @cap_count: if not NULL, set to the number of capabilities reported
2719  * @opc: capabilities type to discover, device or function
2720  * @cd: pointer to command details structure or NULL
2721  *
2722  * Get the function (0x000A) or device (0x000B) capabilities description from
2723  * firmware and store it in the buffer.
2724  *
2725  * If the cap_count pointer is not NULL, then it is set to the number of
2726  * capabilities firmware will report. Note that if the buffer size is too
2727  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2728  * cap_count will still be updated in this case. It is recommended that the
2729  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2730  * firmware could return) to avoid this.
2731  */
2732 static enum ice_status
2733 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2734 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2735 {
2736 	struct ice_aqc_list_caps *cmd;
2737 	struct ice_aq_desc desc;
2738 	enum ice_status status;
2739 
2740 	cmd = &desc.params.get_cap;
2741 
2742 	if (opc != ice_aqc_opc_list_func_caps &&
2743 	    opc != ice_aqc_opc_list_dev_caps)
2744 		return ICE_ERR_PARAM;
2745 
2746 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2747 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2748 
2749 	if (cap_count)
2750 		*cap_count = LE32_TO_CPU(cmd->count);
2751 
2752 	return status;
2753 }
2754 
2755 /**
2756  * ice_discover_dev_caps - Read and extract device capabilities
2757  * @hw: pointer to the hardware structure
2758  * @dev_caps: pointer to device capabilities structure
2759  *
2760  * Read the device capabilities and extract them into the dev_caps structure
2761  * for later use.
2762  */
2763 static enum ice_status
2764 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2765 {
2766 	enum ice_status status;
2767 	u32 cap_count = 0;
2768 	void *cbuf;
2769 
2770 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2771 	if (!cbuf)
2772 		return ICE_ERR_NO_MEMORY;
2773 
2774 	/* Although the driver doesn't know the number of capabilities the
2775 	 * device will return, we can simply send a 4KB buffer, the maximum
2776 	 * possible size that firmware can return.
2777 	 */
2778 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2779 
2780 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2781 				  ice_aqc_opc_list_dev_caps, NULL);
2782 	if (!status)
2783 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2784 	ice_free(hw, cbuf);
2785 
2786 	return status;
2787 }
2788 
2789 /**
2790  * ice_discover_func_caps - Read and extract function capabilities
2791  * @hw: pointer to the hardware structure
2792  * @func_caps: pointer to function capabilities structure
2793  *
2794  * Read the function capabilities and extract them into the func_caps structure
2795  * for later use.
2796  */
2797 static enum ice_status
2798 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2799 {
2800 	enum ice_status status;
2801 	u32 cap_count = 0;
2802 	void *cbuf;
2803 
2804 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2805 	if (!cbuf)
2806 		return ICE_ERR_NO_MEMORY;
2807 
2808 	/* Although the driver doesn't know the number of capabilities the
2809 	 * device will return, we can simply send a 4KB buffer, the maximum
2810 	 * possible size that firmware can return.
2811 	 */
2812 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2813 
2814 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2815 				  ice_aqc_opc_list_func_caps, NULL);
2816 	if (!status)
2817 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2818 	ice_free(hw, cbuf);
2819 
2820 	return status;
2821 }
2822 
2823 /**
2824  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2825  * @hw: pointer to the hardware structure
2826  */
2827 void ice_set_safe_mode_caps(struct ice_hw *hw)
2828 {
2829 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2830 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2831 	struct ice_hw_common_caps cached_caps;
2832 	u32 num_funcs;
2833 
2834 	/* cache some func_caps values that should be restored after memset */
2835 	cached_caps = func_caps->common_cap;
2836 
2837 	/* unset func capabilities */
2838 	memset(func_caps, 0, sizeof(*func_caps));
2839 
2840 #define ICE_RESTORE_FUNC_CAP(name) \
2841 	func_caps->common_cap.name = cached_caps.name
2842 
2843 	/* restore cached values */
2844 	ICE_RESTORE_FUNC_CAP(valid_functions);
2845 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2846 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2847 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2848 	ICE_RESTORE_FUNC_CAP(max_mtu);
2849 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2850 
2851 	/* one Tx and one Rx queue in safe mode */
2852 	func_caps->common_cap.num_rxq = 1;
2853 	func_caps->common_cap.num_txq = 1;
2854 
2855 	/* two MSIX vectors, one for traffic and one for misc causes */
2856 	func_caps->common_cap.num_msix_vectors = 2;
2857 	func_caps->guar_num_vsi = 1;
2858 
2859 	/* cache some dev_caps values that should be restored after memset */
2860 	cached_caps = dev_caps->common_cap;
2861 	num_funcs = dev_caps->num_funcs;
2862 
2863 	/* unset dev capabilities */
2864 	memset(dev_caps, 0, sizeof(*dev_caps));
2865 
2866 #define ICE_RESTORE_DEV_CAP(name) \
2867 	dev_caps->common_cap.name = cached_caps.name
2868 
2869 	/* restore cached values */
2870 	ICE_RESTORE_DEV_CAP(valid_functions);
2871 	ICE_RESTORE_DEV_CAP(txq_first_id);
2872 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2873 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2874 	ICE_RESTORE_DEV_CAP(max_mtu);
2875 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2876 	dev_caps->num_funcs = num_funcs;
2877 
2878 	/* one Tx and one Rx queue per function in safe mode */
2879 	dev_caps->common_cap.num_rxq = num_funcs;
2880 	dev_caps->common_cap.num_txq = num_funcs;
2881 
2882 	/* two MSIX vectors per function */
2883 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2884 }
2885 
2886 /**
2887  * ice_get_caps - get info about the HW
2888  * @hw: pointer to the hardware structure
2889  */
2890 enum ice_status ice_get_caps(struct ice_hw *hw)
2891 {
2892 	enum ice_status status;
2893 
2894 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2895 	if (status)
2896 		return status;
2897 
2898 	return ice_discover_func_caps(hw, &hw->func_caps);
2899 }
2900 
2901 /**
2902  * ice_aq_manage_mac_write - manage MAC address write command
2903  * @hw: pointer to the HW struct
2904  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2905  * @flags: flags to control write behavior
2906  * @cd: pointer to command details structure or NULL
2907  *
2908  * This function is used to write MAC address to the NVM (0x0108).
2909  */
2910 enum ice_status
2911 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2912 			struct ice_sq_cd *cd)
2913 {
2914 	struct ice_aqc_manage_mac_write *cmd;
2915 	struct ice_aq_desc desc;
2916 
2917 	cmd = &desc.params.mac_write;
2918 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2919 
2920 	cmd->flags = flags;
2921 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2922 
2923 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2924 }
2925 
2926 /**
2927  * ice_aq_clear_pxe_mode
2928  * @hw: pointer to the HW struct
2929  *
2930  * Tell the firmware that the driver is taking over from PXE (0x0110).
2931  */
2932 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2933 {
2934 	struct ice_aq_desc desc;
2935 
2936 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2937 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2938 
2939 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2940 }
2941 
2942 /**
2943  * ice_clear_pxe_mode - clear pxe operations mode
2944  * @hw: pointer to the HW struct
2945  *
2946  * Make sure all PXE mode settings are cleared, including things
2947  * like descriptor fetch/write-back mode.
2948  */
2949 void ice_clear_pxe_mode(struct ice_hw *hw)
2950 {
2951 	if (ice_check_sq_alive(hw, &hw->adminq))
2952 		ice_aq_clear_pxe_mode(hw);
2953 }
2954 
2955 /**
2956  * ice_aq_set_port_params - set physical port parameters.
2957  * @pi: pointer to the port info struct
2958  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2959  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2960  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2961  * @double_vlan: if set double VLAN is enabled
2962  * @cd: pointer to command details structure or NULL
2963  *
2964  * Set Physical port parameters (0x0203)
2965  */
2966 enum ice_status
2967 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2968 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2969 		       struct ice_sq_cd *cd)
2970 
2971 {
2972 	struct ice_aqc_set_port_params *cmd;
2973 	struct ice_hw *hw = pi->hw;
2974 	struct ice_aq_desc desc;
2975 	u16 cmd_flags = 0;
2976 
2977 	cmd = &desc.params.set_port_params;
2978 
2979 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2980 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2981 	if (save_bad_pac)
2982 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2983 	if (pad_short_pac)
2984 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2985 	if (double_vlan)
2986 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2987 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2988 
2989 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2990 }
2991 
2992 /**
2993  * ice_is_100m_speed_supported
2994  * @hw: pointer to the HW struct
2995  *
2996  * returns true if 100M speeds are supported by the device,
2997  * false otherwise.
2998  */
2999 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3000 {
3001 	switch (hw->device_id) {
3002 	case ICE_DEV_ID_E822C_10G_BASE_T:
3003 	case ICE_DEV_ID_E822C_SGMII:
3004 	case ICE_DEV_ID_E822L_10G_BASE_T:
3005 	case ICE_DEV_ID_E822L_SGMII:
3006 	case ICE_DEV_ID_E823L_10G_BASE_T:
3007 	case ICE_DEV_ID_E823L_1GBE:
3008 		return true;
3009 	default:
3010 		return false;
3011 	}
3012 }
3013 
3014 /**
3015  * ice_get_link_speed_based_on_phy_type - returns link speed
3016  * @phy_type_low: lower part of phy_type
3017  * @phy_type_high: higher part of phy_type
3018  *
3019  * This helper function will convert an entry in PHY type structure
3020  * [phy_type_low, phy_type_high] to its corresponding link speed.
3021  * Note: In the structure of [phy_type_low, phy_type_high], there should
3022  * be one bit set, as this function will convert one PHY type to its
3023  * speed.
3024  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3025  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3026  */
3027 static u16
3028 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3029 {
3030 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3031 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3032 
3033 	switch (phy_type_low) {
3034 	case ICE_PHY_TYPE_LOW_100BASE_TX:
3035 	case ICE_PHY_TYPE_LOW_100M_SGMII:
3036 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3037 		break;
3038 	case ICE_PHY_TYPE_LOW_1000BASE_T:
3039 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
3040 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
3041 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
3042 	case ICE_PHY_TYPE_LOW_1G_SGMII:
3043 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3044 		break;
3045 	case ICE_PHY_TYPE_LOW_2500BASE_T:
3046 	case ICE_PHY_TYPE_LOW_2500BASE_X:
3047 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
3048 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3049 		break;
3050 	case ICE_PHY_TYPE_LOW_5GBASE_T:
3051 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
3052 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3053 		break;
3054 	case ICE_PHY_TYPE_LOW_10GBASE_T:
3055 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3056 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
3057 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
3058 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3059 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3060 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3061 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3062 		break;
3063 	case ICE_PHY_TYPE_LOW_25GBASE_T:
3064 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
3065 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3066 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3067 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
3068 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
3069 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
3070 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3071 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3072 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3073 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3074 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3075 		break;
3076 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3077 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3078 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3079 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3080 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3081 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
3082 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3083 		break;
3084 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3085 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3086 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3087 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3088 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3089 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
3090 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3091 	case ICE_PHY_TYPE_LOW_50G_AUI2:
3092 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
3093 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
3094 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
3095 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
3096 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3097 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3098 	case ICE_PHY_TYPE_LOW_50G_AUI1:
3099 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3100 		break;
3101 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3102 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3103 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3104 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3105 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3106 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
3107 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3108 	case ICE_PHY_TYPE_LOW_100G_AUI4:
3109 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3110 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3111 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3112 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3113 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
3114 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3115 		break;
3116 	default:
3117 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3118 		break;
3119 	}
3120 
3121 	switch (phy_type_high) {
3122 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3123 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3124 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3125 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3126 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
3127 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3128 		break;
3129 	default:
3130 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3131 		break;
3132 	}
3133 
3134 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3135 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3136 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3137 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3138 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3139 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3140 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3141 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3142 		return speed_phy_type_low;
3143 	else
3144 		return speed_phy_type_high;
3145 }
3146 
3147 /**
3148  * ice_update_phy_type
3149  * @phy_type_low: pointer to the lower part of phy_type
3150  * @phy_type_high: pointer to the higher part of phy_type
3151  * @link_speeds_bitmap: targeted link speeds bitmap
3152  *
3153  * Note: For the link_speeds_bitmap structure, you can check it at
3154  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3155  * link_speeds_bitmap include multiple speeds.
3156  *
3157  * Each entry in this [phy_type_low, phy_type_high] structure will
3158  * present a certain link speed. This helper function will turn on bits
3159  * in [phy_type_low, phy_type_high] structure based on the value of
3160  * link_speeds_bitmap input parameter.
3161  */
3162 void
3163 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3164 		    u16 link_speeds_bitmap)
3165 {
3166 	u64 pt_high;
3167 	u64 pt_low;
3168 	int index;
3169 	u16 speed;
3170 
3171 	/* We first check with low part of phy_type */
3172 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3173 		pt_low = BIT_ULL(index);
3174 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3175 
3176 		if (link_speeds_bitmap & speed)
3177 			*phy_type_low |= BIT_ULL(index);
3178 	}
3179 
3180 	/* We then check with high part of phy_type */
3181 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3182 		pt_high = BIT_ULL(index);
3183 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3184 
3185 		if (link_speeds_bitmap & speed)
3186 			*phy_type_high |= BIT_ULL(index);
3187 	}
3188 }
3189 
3190 /**
3191  * ice_aq_set_phy_cfg
3192  * @hw: pointer to the HW struct
3193  * @pi: port info structure of the interested logical port
3194  * @cfg: structure with PHY configuration data to be set
3195  * @cd: pointer to command details structure or NULL
3196  *
3197  * Set the various PHY configuration parameters supported on the Port.
3198  * One or more of the Set PHY config parameters may be ignored in an MFP
3199  * mode as the PF may not have the privilege to set some of the PHY Config
3200  * parameters. This status will be indicated by the command response (0x0601).
3201  */
3202 enum ice_status
3203 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3204 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3205 {
3206 	struct ice_aq_desc desc;
3207 	enum ice_status status;
3208 
3209 	if (!cfg)
3210 		return ICE_ERR_PARAM;
3211 
3212 	/* Ensure that only valid bits of cfg->caps can be turned on. */
3213 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3214 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3215 			  cfg->caps);
3216 
3217 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3218 	}
3219 
3220 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3221 	desc.params.set_phy.lport_num = pi->lport;
3222 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3223 
3224 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3225 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
3226 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3227 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
3228 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3229 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
3230 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
3231 		  cfg->low_power_ctrl_an);
3232 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
3233 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
3234 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
3235 		  cfg->link_fec_opt);
3236 
3237 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3238 
3239 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3240 		status = ICE_SUCCESS;
3241 
3242 	if (!status)
3243 		pi->phy.curr_user_phy_cfg = *cfg;
3244 
3245 	return status;
3246 }
3247 
3248 /**
3249  * ice_update_link_info - update status of the HW network link
3250  * @pi: port info structure of the interested logical port
3251  */
3252 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3253 {
3254 	struct ice_link_status *li;
3255 	enum ice_status status;
3256 
3257 	if (!pi)
3258 		return ICE_ERR_PARAM;
3259 
3260 	li = &pi->phy.link_info;
3261 
3262 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
3263 	if (status)
3264 		return status;
3265 
3266 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3267 		struct ice_aqc_get_phy_caps_data *pcaps;
3268 		struct ice_hw *hw;
3269 
3270 		hw = pi->hw;
3271 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3272 			ice_malloc(hw, sizeof(*pcaps));
3273 		if (!pcaps)
3274 			return ICE_ERR_NO_MEMORY;
3275 
3276 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3277 					     pcaps, NULL);
3278 
3279 		if (status == ICE_SUCCESS)
3280 			ice_memcpy(li->module_type, &pcaps->module_type,
3281 				   sizeof(li->module_type),
3282 				   ICE_NONDMA_TO_NONDMA);
3283 
3284 		ice_free(hw, pcaps);
3285 	}
3286 
3287 	return status;
3288 }
3289 
3290 /**
3291  * ice_cache_phy_user_req
3292  * @pi: port information structure
3293  * @cache_data: PHY logging data
3294  * @cache_mode: PHY logging mode
3295  *
3296  * Log the user request on (FC, FEC, SPEED) for later user.
3297  */
3298 static void
3299 ice_cache_phy_user_req(struct ice_port_info *pi,
3300 		       struct ice_phy_cache_mode_data cache_data,
3301 		       enum ice_phy_cache_mode cache_mode)
3302 {
3303 	if (!pi)
3304 		return;
3305 
3306 	switch (cache_mode) {
3307 	case ICE_FC_MODE:
3308 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3309 		break;
3310 	case ICE_SPEED_MODE:
3311 		pi->phy.curr_user_speed_req =
3312 			cache_data.data.curr_user_speed_req;
3313 		break;
3314 	case ICE_FEC_MODE:
3315 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3316 		break;
3317 	default:
3318 		break;
3319 	}
3320 }
3321 
3322 /**
3323  * ice_caps_to_fc_mode
3324  * @caps: PHY capabilities
3325  *
3326  * Convert PHY FC capabilities to ice FC mode
3327  */
3328 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3329 {
3330 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3331 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3332 		return ICE_FC_FULL;
3333 
3334 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3335 		return ICE_FC_TX_PAUSE;
3336 
3337 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3338 		return ICE_FC_RX_PAUSE;
3339 
3340 	return ICE_FC_NONE;
3341 }
3342 
3343 /**
3344  * ice_caps_to_fec_mode
3345  * @caps: PHY capabilities
3346  * @fec_options: Link FEC options
3347  *
3348  * Convert PHY FEC capabilities to ice FEC mode
3349  */
3350 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3351 {
3352 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3353 		return ICE_FEC_AUTO;
3354 
3355 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3356 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3357 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3358 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3359 		return ICE_FEC_BASER;
3360 
3361 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3362 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3363 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3364 		return ICE_FEC_RS;
3365 
3366 	return ICE_FEC_NONE;
3367 }
3368 
3369 /**
3370  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3371  * @pi: port information structure
3372  * @cfg: PHY configuration data to set FC mode
3373  * @req_mode: FC mode to configure
3374  */
3375 static enum ice_status
3376 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3377 	       enum ice_fc_mode req_mode)
3378 {
3379 	struct ice_phy_cache_mode_data cache_data;
3380 	u8 pause_mask = 0x0;
3381 
3382 	if (!pi || !cfg)
3383 		return ICE_ERR_BAD_PTR;
3384 	switch (req_mode) {
3385 	case ICE_FC_AUTO:
3386 	{
3387 		struct ice_aqc_get_phy_caps_data *pcaps;
3388 		enum ice_status status;
3389 
3390 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3391 			ice_malloc(pi->hw, sizeof(*pcaps));
3392 		if (!pcaps)
3393 			return ICE_ERR_NO_MEMORY;
3394 		/* Query the value of FC that both the NIC and attached media
3395 		 * can do.
3396 		 */
3397 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3398 					     pcaps, NULL);
3399 		if (status) {
3400 			ice_free(pi->hw, pcaps);
3401 			return status;
3402 		}
3403 
3404 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3405 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3406 
3407 		ice_free(pi->hw, pcaps);
3408 		break;
3409 	}
3410 	case ICE_FC_FULL:
3411 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3412 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3413 		break;
3414 	case ICE_FC_RX_PAUSE:
3415 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3416 		break;
3417 	case ICE_FC_TX_PAUSE:
3418 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3419 		break;
3420 	default:
3421 		break;
3422 	}
3423 
3424 	/* clear the old pause settings */
3425 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3426 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3427 
3428 	/* set the new capabilities */
3429 	cfg->caps |= pause_mask;
3430 
3431 	/* Cache user FC request */
3432 	cache_data.data.curr_user_fc_req = req_mode;
3433 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3434 
3435 	return ICE_SUCCESS;
3436 }
3437 
3438 /**
3439  * ice_set_fc
3440  * @pi: port information structure
3441  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3442  * @ena_auto_link_update: enable automatic link update
3443  *
3444  * Set the requested flow control mode.
3445  */
3446 enum ice_status
3447 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3448 {
3449 	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
3450 	struct ice_aqc_get_phy_caps_data *pcaps;
3451 	enum ice_status status;
3452 	struct ice_hw *hw;
3453 
3454 	if (!pi || !aq_failures)
3455 		return ICE_ERR_BAD_PTR;
3456 
3457 	*aq_failures = 0;
3458 	hw = pi->hw;
3459 
3460 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3461 		ice_malloc(hw, sizeof(*pcaps));
3462 	if (!pcaps)
3463 		return ICE_ERR_NO_MEMORY;
3464 
3465 	/* Get the current PHY config */
3466 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3467 				     pcaps, NULL);
3468 
3469 	if (status) {
3470 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3471 		goto out;
3472 	}
3473 
3474 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3475 
3476 	/* Configure the set PHY data */
3477 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3478 	if (status) {
3479 		if (status != ICE_ERR_BAD_PTR)
3480 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3481 
3482 		goto out;
3483 	}
3484 
3485 	/* If the capabilities have changed, then set the new config */
3486 	if (cfg.caps != pcaps->caps) {
3487 		int retry_count, retry_max = 10;
3488 
3489 		/* Auto restart link so settings take effect */
3490 		if (ena_auto_link_update)
3491 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3492 
3493 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3494 		if (status) {
3495 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3496 			goto out;
3497 		}
3498 
3499 		/* Update the link info
3500 		 * It sometimes takes a really long time for link to
3501 		 * come back from the atomic reset. Thus, we wait a
3502 		 * little bit.
3503 		 */
3504 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3505 			status = ice_update_link_info(pi);
3506 
3507 			if (status == ICE_SUCCESS)
3508 				break;
3509 
3510 			ice_msec_delay(100, true);
3511 		}
3512 
3513 		if (status)
3514 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3515 	}
3516 
3517 out:
3518 	ice_free(hw, pcaps);
3519 	return status;
3520 }
3521 
3522 /**
3523  * ice_phy_caps_equals_cfg
3524  * @phy_caps: PHY capabilities
3525  * @phy_cfg: PHY configuration
3526  *
3527  * Helper function to determine if PHY capabilities matches PHY
3528  * configuration
3529  */
3530 bool
3531 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3532 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3533 {
3534 	u8 caps_mask, cfg_mask;
3535 
3536 	if (!phy_caps || !phy_cfg)
3537 		return false;
3538 
3539 	/* These bits are not common between capabilities and configuration.
3540 	 * Do not use them to determine equality.
3541 	 */
3542 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3543 					      ICE_AQC_PHY_EN_MOD_QUAL);
3544 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3545 
3546 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3547 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3548 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3549 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3550 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3551 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3552 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3553 		return false;
3554 
3555 	return true;
3556 }
3557 
3558 /**
3559  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3560  * @pi: port information structure
3561  * @caps: PHY ability structure to copy data from
3562  * @cfg: PHY configuration structure to copy data to
3563  *
3564  * Helper function to copy AQC PHY get ability data to PHY set configuration
3565  * data structure
3566  */
3567 void
3568 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3569 			 struct ice_aqc_get_phy_caps_data *caps,
3570 			 struct ice_aqc_set_phy_cfg_data *cfg)
3571 {
3572 	if (!pi || !caps || !cfg)
3573 		return;
3574 
3575 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3576 	cfg->phy_type_low = caps->phy_type_low;
3577 	cfg->phy_type_high = caps->phy_type_high;
3578 	cfg->caps = caps->caps;
3579 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3580 	cfg->eee_cap = caps->eee_cap;
3581 	cfg->eeer_value = caps->eeer_value;
3582 	cfg->link_fec_opt = caps->link_fec_options;
3583 	cfg->module_compliance_enforcement =
3584 		caps->module_compliance_enforcement;
3585 }
3586 
3587 /**
3588  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3589  * @pi: port information structure
3590  * @cfg: PHY configuration data to set FEC mode
3591  * @fec: FEC mode to configure
3592  */
3593 enum ice_status
3594 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3595 		enum ice_fec_mode fec)
3596 {
3597 	struct ice_aqc_get_phy_caps_data *pcaps;
3598 	enum ice_status status = ICE_SUCCESS;
3599 	struct ice_hw *hw;
3600 
3601 	if (!pi || !cfg)
3602 		return ICE_ERR_BAD_PTR;
3603 
3604 	hw = pi->hw;
3605 
3606 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3607 		ice_malloc(hw, sizeof(*pcaps));
3608 	if (!pcaps)
3609 		return ICE_ERR_NO_MEMORY;
3610 
3611 	status = ice_aq_get_phy_caps(pi, false,
3612 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3613 				      ICE_AQC_REPORT_DFLT_CFG :
3614 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3615 
3616 	if (status)
3617 		goto out;
3618 
3619 	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3620 	cfg->link_fec_opt = pcaps->link_fec_options;
3621 
3622 	switch (fec) {
3623 	case ICE_FEC_BASER:
3624 		/* Clear RS bits, and AND BASE-R ability
3625 		 * bits and OR request bits.
3626 		 */
3627 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3628 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3629 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3630 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3631 		break;
3632 	case ICE_FEC_RS:
3633 		/* Clear BASE-R bits, and AND RS ability
3634 		 * bits and OR request bits.
3635 		 */
3636 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3637 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3638 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3639 		break;
3640 	case ICE_FEC_NONE:
3641 		/* Clear all FEC option bits. */
3642 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3643 		break;
3644 	case ICE_FEC_AUTO:
3645 		/* AND auto FEC bit, and all caps bits. */
3646 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3647 		cfg->link_fec_opt |= pcaps->link_fec_options;
3648 		break;
3649 	default:
3650 		status = ICE_ERR_PARAM;
3651 		break;
3652 	}
3653 
3654 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3655 	    !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3656 		struct ice_link_default_override_tlv tlv;
3657 
3658 		if (ice_get_link_default_override(&tlv, pi))
3659 			goto out;
3660 
3661 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3662 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3663 			cfg->link_fec_opt = tlv.fec_options;
3664 	}
3665 
3666 out:
3667 	ice_free(hw, pcaps);
3668 
3669 	return status;
3670 }
3671 
3672 /**
3673  * ice_get_link_status - get status of the HW network link
3674  * @pi: port information structure
3675  * @link_up: pointer to bool (true/false = linkup/linkdown)
3676  *
3677  * Variable link_up is true if link is up, false if link is down.
3678  * The variable link_up is invalid if status is non zero. As a
3679  * result of this call, link status reporting becomes enabled
3680  */
3681 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3682 {
3683 	struct ice_phy_info *phy_info;
3684 	enum ice_status status = ICE_SUCCESS;
3685 
3686 	if (!pi || !link_up)
3687 		return ICE_ERR_PARAM;
3688 
3689 	phy_info = &pi->phy;
3690 
3691 	if (phy_info->get_link_info) {
3692 		status = ice_update_link_info(pi);
3693 
3694 		if (status)
3695 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3696 				  status);
3697 	}
3698 
3699 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3700 
3701 	return status;
3702 }
3703 
3704 /**
3705  * ice_aq_set_link_restart_an
3706  * @pi: pointer to the port information structure
3707  * @ena_link: if true: enable link, if false: disable link
3708  * @cd: pointer to command details structure or NULL
3709  *
3710  * Sets up the link and restarts the Auto-Negotiation over the link.
3711  */
3712 enum ice_status
3713 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3714 			   struct ice_sq_cd *cd)
3715 {
3716 	struct ice_aqc_restart_an *cmd;
3717 	struct ice_aq_desc desc;
3718 
3719 	cmd = &desc.params.restart_an;
3720 
3721 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3722 
3723 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3724 	cmd->lport_num = pi->lport;
3725 	if (ena_link)
3726 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3727 	else
3728 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3729 
3730 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3731 }
3732 
3733 /**
3734  * ice_aq_set_event_mask
3735  * @hw: pointer to the HW struct
3736  * @port_num: port number of the physical function
3737  * @mask: event mask to be set
3738  * @cd: pointer to command details structure or NULL
3739  *
3740  * Set event mask (0x0613)
3741  */
3742 enum ice_status
3743 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3744 		      struct ice_sq_cd *cd)
3745 {
3746 	struct ice_aqc_set_event_mask *cmd;
3747 	struct ice_aq_desc desc;
3748 
3749 	cmd = &desc.params.set_event_mask;
3750 
3751 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3752 
3753 	cmd->lport_num = port_num;
3754 
3755 	cmd->event_mask = CPU_TO_LE16(mask);
3756 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3757 }
3758 
3759 /**
3760  * ice_aq_set_mac_loopback
3761  * @hw: pointer to the HW struct
3762  * @ena_lpbk: Enable or Disable loopback
3763  * @cd: pointer to command details structure or NULL
3764  *
3765  * Enable/disable loopback on a given port
3766  */
3767 enum ice_status
3768 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3769 {
3770 	struct ice_aqc_set_mac_lb *cmd;
3771 	struct ice_aq_desc desc;
3772 
3773 	cmd = &desc.params.set_mac_lb;
3774 
3775 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3776 	if (ena_lpbk)
3777 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3778 
3779 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3780 }
3781 
3782 /**
3783  * ice_aq_set_port_id_led
3784  * @pi: pointer to the port information
3785  * @is_orig_mode: is this LED set to original mode (by the net-list)
3786  * @cd: pointer to command details structure or NULL
3787  *
3788  * Set LED value for the given port (0x06e9)
3789  */
3790 enum ice_status
3791 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3792 		       struct ice_sq_cd *cd)
3793 {
3794 	struct ice_aqc_set_port_id_led *cmd;
3795 	struct ice_hw *hw = pi->hw;
3796 	struct ice_aq_desc desc;
3797 
3798 	cmd = &desc.params.set_port_id_led;
3799 
3800 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3801 
3802 	if (is_orig_mode)
3803 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3804 	else
3805 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3806 
3807 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3808 }
3809 
3810 /**
3811  * ice_aq_sff_eeprom
3812  * @hw: pointer to the HW struct
3813  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3814  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3815  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3816  * @page: QSFP page
3817  * @set_page: set or ignore the page
3818  * @data: pointer to data buffer to be read/written to the I2C device.
3819  * @length: 1-16 for read, 1 for write.
3820  * @write: 0 read, 1 for write.
3821  * @cd: pointer to command details structure or NULL
3822  *
3823  * Read/Write SFF EEPROM (0x06EE)
3824  */
3825 enum ice_status
3826 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3827 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3828 		  bool write, struct ice_sq_cd *cd)
3829 {
3830 	struct ice_aqc_sff_eeprom *cmd;
3831 	struct ice_aq_desc desc;
3832 	enum ice_status status;
3833 
3834 	if (!data || (mem_addr & 0xff00))
3835 		return ICE_ERR_PARAM;
3836 
3837 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3838 	cmd = &desc.params.read_write_sff_param;
3839 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3840 	cmd->lport_num = (u8)(lport & 0xff);
3841 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3842 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3843 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3844 					((set_page <<
3845 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3846 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3847 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3848 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3849 	if (write)
3850 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3851 
3852 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3853 	return status;
3854 }
3855 
3856 /**
3857  * ice_aq_prog_topo_dev_nvm
3858  * @hw: pointer to the hardware structure
3859  * @topo_params: pointer to structure storing topology parameters for a device
3860  * @cd: pointer to command details structure or NULL
3861  *
3862  * Program Topology Device NVM (0x06F2)
3863  *
3864  */
3865 enum ice_status
3866 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3867 			 struct ice_aqc_link_topo_params *topo_params,
3868 			 struct ice_sq_cd *cd)
3869 {
3870 	struct ice_aqc_prog_topo_dev_nvm *cmd;
3871 	struct ice_aq_desc desc;
3872 
3873 	cmd = &desc.params.prog_topo_dev_nvm;
3874 
3875 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3876 
3877 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3878 		   ICE_NONDMA_TO_NONDMA);
3879 
3880 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3881 }
3882 
3883 /**
3884  * ice_aq_read_topo_dev_nvm
3885  * @hw: pointer to the hardware structure
3886  * @topo_params: pointer to structure storing topology parameters for a device
3887  * @start_address: byte offset in the topology device NVM
3888  * @data: pointer to data buffer
3889  * @data_size: number of bytes to be read from the topology device NVM
3890  * @cd: pointer to command details structure or NULL
3891  * Read Topology Device NVM (0x06F3)
3892  *
3893  */
3894 enum ice_status
3895 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3896 			 struct ice_aqc_link_topo_params *topo_params,
3897 			 u32 start_address, u8 *data, u8 data_size,
3898 			 struct ice_sq_cd *cd)
3899 {
3900 	struct ice_aqc_read_topo_dev_nvm *cmd;
3901 	struct ice_aq_desc desc;
3902 	enum ice_status status;
3903 
3904 	if (!data || data_size == 0 ||
3905 	    data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3906 		return ICE_ERR_PARAM;
3907 
3908 	cmd = &desc.params.read_topo_dev_nvm;
3909 
3910 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3911 
3912 	desc.datalen = data_size;
3913 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3914 		   ICE_NONDMA_TO_NONDMA);
3915 	cmd->start_address = CPU_TO_LE32(start_address);
3916 
3917 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3918 	if (status)
3919 		return status;
3920 
3921 	ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3922 
3923 	return ICE_SUCCESS;
3924 }
3925 
3926 /**
3927  * __ice_aq_get_set_rss_lut
3928  * @hw: pointer to the hardware structure
3929  * @params: RSS LUT parameters
3930  * @set: set true to set the table, false to get the table
3931  *
3932  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3933  */
3934 static enum ice_status
3935 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3936 {
3937 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3938 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3939 	struct ice_aq_desc desc;
3940 	enum ice_status status;
3941 	u8 *lut;
3942 
3943 	if (!params)
3944 		return ICE_ERR_PARAM;
3945 
3946 	vsi_handle = params->vsi_handle;
3947 	lut = params->lut;
3948 
3949 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3950 		return ICE_ERR_PARAM;
3951 
3952 	lut_size = params->lut_size;
3953 	lut_type = params->lut_type;
3954 	glob_lut_idx = params->global_lut_id;
3955 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3956 
3957 	cmd_resp = &desc.params.get_set_rss_lut;
3958 
3959 	if (set) {
3960 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3961 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3962 	} else {
3963 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3964 	}
3965 
3966 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3967 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3968 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3969 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3970 
3971 	switch (lut_type) {
3972 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3973 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3974 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3975 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3976 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3977 		break;
3978 	default:
3979 		status = ICE_ERR_PARAM;
3980 		goto ice_aq_get_set_rss_lut_exit;
3981 	}
3982 
3983 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3984 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3985 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3986 
3987 		if (!set)
3988 			goto ice_aq_get_set_rss_lut_send;
3989 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3990 		if (!set)
3991 			goto ice_aq_get_set_rss_lut_send;
3992 	} else {
3993 		goto ice_aq_get_set_rss_lut_send;
3994 	}
3995 
3996 	/* LUT size is only valid for Global and PF table types */
3997 	switch (lut_size) {
3998 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3999 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
4000 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4001 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4002 		break;
4003 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
4004 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
4005 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4006 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4007 		break;
4008 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
4009 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4010 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
4011 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4012 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4013 			break;
4014 		}
4015 		/* fall-through */
4016 	default:
4017 		status = ICE_ERR_PARAM;
4018 		goto ice_aq_get_set_rss_lut_exit;
4019 	}
4020 
4021 ice_aq_get_set_rss_lut_send:
4022 	cmd_resp->flags = CPU_TO_LE16(flags);
4023 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4024 
4025 ice_aq_get_set_rss_lut_exit:
4026 	return status;
4027 }
4028 
4029 /**
4030  * ice_aq_get_rss_lut
4031  * @hw: pointer to the hardware structure
4032  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4033  *
4034  * get the RSS lookup table, PF or VSI type
4035  */
4036 enum ice_status
4037 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4038 {
4039 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
4040 }
4041 
4042 /**
4043  * ice_aq_set_rss_lut
4044  * @hw: pointer to the hardware structure
4045  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4046  *
4047  * set the RSS lookup table, PF or VSI type
4048  */
4049 enum ice_status
4050 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4051 {
4052 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
4053 }
4054 
4055 /**
4056  * __ice_aq_get_set_rss_key
4057  * @hw: pointer to the HW struct
4058  * @vsi_id: VSI FW index
4059  * @key: pointer to key info struct
4060  * @set: set true to set the key, false to get the key
4061  *
4062  * get (0x0B04) or set (0x0B02) the RSS key per VSI
4063  */
4064 static enum
4065 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4066 				    struct ice_aqc_get_set_rss_keys *key,
4067 				    bool set)
4068 {
4069 	struct ice_aqc_get_set_rss_key *cmd_resp;
4070 	u16 key_size = sizeof(*key);
4071 	struct ice_aq_desc desc;
4072 
4073 	cmd_resp = &desc.params.get_set_rss_key;
4074 
4075 	if (set) {
4076 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4077 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4078 	} else {
4079 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4080 	}
4081 
4082 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4083 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4084 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4085 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4086 
4087 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4088 }
4089 
4090 /**
4091  * ice_aq_get_rss_key
4092  * @hw: pointer to the HW struct
4093  * @vsi_handle: software VSI handle
4094  * @key: pointer to key info struct
4095  *
4096  * get the RSS key per VSI
4097  */
4098 enum ice_status
4099 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4100 		   struct ice_aqc_get_set_rss_keys *key)
4101 {
4102 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4103 		return ICE_ERR_PARAM;
4104 
4105 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4106 					key, false);
4107 }
4108 
4109 /**
4110  * ice_aq_set_rss_key
4111  * @hw: pointer to the HW struct
4112  * @vsi_handle: software VSI handle
4113  * @keys: pointer to key info struct
4114  *
4115  * set the RSS key per VSI
4116  */
4117 enum ice_status
4118 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4119 		   struct ice_aqc_get_set_rss_keys *keys)
4120 {
4121 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4122 		return ICE_ERR_PARAM;
4123 
4124 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4125 					keys, true);
4126 }
4127 
4128 /**
4129  * ice_aq_add_lan_txq
4130  * @hw: pointer to the hardware structure
4131  * @num_qgrps: Number of added queue groups
4132  * @qg_list: list of queue groups to be added
4133  * @buf_size: size of buffer for indirect command
4134  * @cd: pointer to command details structure or NULL
4135  *
4136  * Add Tx LAN queue (0x0C30)
4137  *
4138  * NOTE:
4139  * Prior to calling add Tx LAN queue:
4140  * Initialize the following as part of the Tx queue context:
4141  * Completion queue ID if the queue uses Completion queue, Quanta profile,
4142  * Cache profile and Packet shaper profile.
4143  *
4144  * After add Tx LAN queue AQ command is completed:
4145  * Interrupts should be associated with specific queues,
4146  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4147  * flow.
4148  */
4149 enum ice_status
4150 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4151 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4152 		   struct ice_sq_cd *cd)
4153 {
4154 	struct ice_aqc_add_tx_qgrp *list;
4155 	struct ice_aqc_add_txqs *cmd;
4156 	struct ice_aq_desc desc;
4157 	u16 i, sum_size = 0;
4158 
4159 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4160 
4161 	cmd = &desc.params.add_txqs;
4162 
4163 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4164 
4165 	if (!qg_list)
4166 		return ICE_ERR_PARAM;
4167 
4168 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4169 		return ICE_ERR_PARAM;
4170 
4171 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
4172 		sum_size += ice_struct_size(list, txqs, list->num_txqs);
4173 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4174 						      list->num_txqs);
4175 	}
4176 
4177 	if (buf_size != sum_size)
4178 		return ICE_ERR_PARAM;
4179 
4180 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4181 
4182 	cmd->num_qgrps = num_qgrps;
4183 
4184 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4185 }
4186 
4187 /**
4188  * ice_aq_dis_lan_txq
4189  * @hw: pointer to the hardware structure
4190  * @num_qgrps: number of groups in the list
4191  * @qg_list: the list of groups to disable
4192  * @buf_size: the total size of the qg_list buffer in bytes
4193  * @rst_src: if called due to reset, specifies the reset source
4194  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4195  * @cd: pointer to command details structure or NULL
4196  *
4197  * Disable LAN Tx queue (0x0C31)
4198  */
4199 static enum ice_status
4200 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4201 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4202 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
4203 		   struct ice_sq_cd *cd)
4204 {
4205 	struct ice_aqc_dis_txq_item *item;
4206 	struct ice_aqc_dis_txqs *cmd;
4207 	struct ice_aq_desc desc;
4208 	enum ice_status status;
4209 	u16 i, sz = 0;
4210 
4211 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4212 	cmd = &desc.params.dis_txqs;
4213 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4214 
4215 	/* qg_list can be NULL only in VM/VF reset flow */
4216 	if (!qg_list && !rst_src)
4217 		return ICE_ERR_PARAM;
4218 
4219 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4220 		return ICE_ERR_PARAM;
4221 
4222 	cmd->num_entries = num_qgrps;
4223 
4224 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4225 					    ICE_AQC_Q_DIS_TIMEOUT_M);
4226 
4227 	switch (rst_src) {
4228 	case ICE_VM_RESET:
4229 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4230 		cmd->vmvf_and_timeout |=
4231 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4232 		break;
4233 	case ICE_VF_RESET:
4234 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4235 		/* In this case, FW expects vmvf_num to be absolute VF ID */
4236 		cmd->vmvf_and_timeout |=
4237 			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4238 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
4239 		break;
4240 	case ICE_NO_RESET:
4241 	default:
4242 		break;
4243 	}
4244 
4245 	/* flush pipe on time out */
4246 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4247 	/* If no queue group info, we are in a reset flow. Issue the AQ */
4248 	if (!qg_list)
4249 		goto do_aq;
4250 
4251 	/* set RD bit to indicate that command buffer is provided by the driver
4252 	 * and it needs to be read by the firmware
4253 	 */
4254 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4255 
4256 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
4257 		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4258 
4259 		/* If the num of queues is even, add 2 bytes of padding */
4260 		if ((item->num_qs % 2) == 0)
4261 			item_size += 2;
4262 
4263 		sz += item_size;
4264 
4265 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4266 	}
4267 
4268 	if (buf_size != sz)
4269 		return ICE_ERR_PARAM;
4270 
4271 do_aq:
4272 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4273 	if (status) {
4274 		if (!qg_list)
4275 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4276 				  vmvf_num, hw->adminq.sq_last_status);
4277 		else
4278 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4279 				  LE16_TO_CPU(qg_list[0].q_id[0]),
4280 				  hw->adminq.sq_last_status);
4281 	}
4282 	return status;
4283 }
4284 
4285 /**
4286  * ice_aq_move_recfg_lan_txq
4287  * @hw: pointer to the hardware structure
4288  * @num_qs: number of queues to move/reconfigure
4289  * @is_move: true if this operation involves node movement
4290  * @is_tc_change: true if this operation involves a TC change
4291  * @subseq_call: true if this operation is a subsequent call
4292  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4293  * @timeout: timeout in units of 100 usec (valid values 0-50)
4294  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4295  * @buf: struct containing src/dest TEID and per-queue info
4296  * @buf_size: size of buffer for indirect command
4297  * @txqs_moved: out param, number of queues successfully moved
4298  * @cd: pointer to command details structure or NULL
4299  *
4300  * Move / Reconfigure Tx LAN queues (0x0C32)
4301  */
4302 enum ice_status
4303 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4304 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
4305 			  u8 timeout, u32 *blocked_cgds,
4306 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4307 			  u8 *txqs_moved, struct ice_sq_cd *cd)
4308 {
4309 	struct ice_aqc_move_txqs *cmd;
4310 	struct ice_aq_desc desc;
4311 	enum ice_status status;
4312 
4313 	cmd = &desc.params.move_txqs;
4314 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4315 
4316 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4317 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4318 		return ICE_ERR_PARAM;
4319 
4320 	if (is_tc_change && !flush_pipe && !blocked_cgds)
4321 		return ICE_ERR_PARAM;
4322 
4323 	if (!is_move && !is_tc_change)
4324 		return ICE_ERR_PARAM;
4325 
4326 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4327 
4328 	if (is_move)
4329 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4330 
4331 	if (is_tc_change)
4332 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4333 
4334 	if (subseq_call)
4335 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4336 
4337 	if (flush_pipe)
4338 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4339 
4340 	cmd->num_qs = num_qs;
4341 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4342 			ICE_AQC_Q_CMD_TIMEOUT_M);
4343 
4344 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4345 
4346 	if (!status && txqs_moved)
4347 		*txqs_moved = cmd->num_qs;
4348 
4349 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4350 	    is_tc_change && !flush_pipe)
4351 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4352 
4353 	return status;
4354 }
4355 
4356 /**
4357  * ice_aq_add_rdma_qsets
4358  * @hw: pointer to the hardware structure
4359  * @num_qset_grps: Number of RDMA Qset groups
4360  * @qset_list: list of qset groups to be added
4361  * @buf_size: size of buffer for indirect command
4362  * @cd: pointer to command details structure or NULL
4363  *
4364  * Add Tx RDMA Qsets (0x0C33)
4365  */
4366 enum ice_status
4367 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4368 		      struct ice_aqc_add_rdma_qset_data *qset_list,
4369 		      u16 buf_size, struct ice_sq_cd *cd)
4370 {
4371 	struct ice_aqc_add_rdma_qset_data *list;
4372 	struct ice_aqc_add_rdma_qset *cmd;
4373 	struct ice_aq_desc desc;
4374 	u16 i, sum_size = 0;
4375 
4376 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4377 
4378 	cmd = &desc.params.add_rdma_qset;
4379 
4380 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4381 
4382 	if (!qset_list)
4383 		return ICE_ERR_PARAM;
4384 
4385 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4386 		return ICE_ERR_PARAM;
4387 
4388 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4389 		u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4390 
4391 		sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4392 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4393 							     num_qsets);
4394 	}
4395 
4396 	if (buf_size != sum_size)
4397 		return ICE_ERR_PARAM;
4398 
4399 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4400 
4401 	cmd->num_qset_grps = num_qset_grps;
4402 
4403 	return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4404 }
4405 
4406 /* End of FW Admin Queue command wrappers */
4407 
4408 /**
4409  * ice_write_byte - write a byte to a packed context structure
4410  * @src_ctx:  the context structure to read from
4411  * @dest_ctx: the context to be written to
4412  * @ce_info:  a description of the struct to be filled
4413  */
4414 static void
4415 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4416 {
4417 	u8 src_byte, dest_byte, mask;
4418 	u8 *from, *dest;
4419 	u16 shift_width;
4420 
4421 	/* copy from the next struct field */
4422 	from = src_ctx + ce_info->offset;
4423 
4424 	/* prepare the bits and mask */
4425 	shift_width = ce_info->lsb % 8;
4426 	mask = (u8)(BIT(ce_info->width) - 1);
4427 
4428 	src_byte = *from;
4429 	src_byte &= mask;
4430 
4431 	/* shift to correct alignment */
4432 	mask <<= shift_width;
4433 	src_byte <<= shift_width;
4434 
4435 	/* get the current bits from the target bit string */
4436 	dest = dest_ctx + (ce_info->lsb / 8);
4437 
4438 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4439 
4440 	dest_byte &= ~mask;	/* get the bits not changing */
4441 	dest_byte |= src_byte;	/* add in the new bits */
4442 
4443 	/* put it all back */
4444 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4445 }
4446 
4447 /**
4448  * ice_write_word - write a word to a packed context structure
4449  * @src_ctx:  the context structure to read from
4450  * @dest_ctx: the context to be written to
4451  * @ce_info:  a description of the struct to be filled
4452  */
4453 static void
4454 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4455 {
4456 	u16 src_word, mask;
4457 	__le16 dest_word;
4458 	u8 *from, *dest;
4459 	u16 shift_width;
4460 
4461 	/* copy from the next struct field */
4462 	from = src_ctx + ce_info->offset;
4463 
4464 	/* prepare the bits and mask */
4465 	shift_width = ce_info->lsb % 8;
4466 	mask = BIT(ce_info->width) - 1;
4467 
4468 	/* don't swizzle the bits until after the mask because the mask bits
4469 	 * will be in a different bit position on big endian machines
4470 	 */
4471 	src_word = *(u16 *)from;
4472 	src_word &= mask;
4473 
4474 	/* shift to correct alignment */
4475 	mask <<= shift_width;
4476 	src_word <<= shift_width;
4477 
4478 	/* get the current bits from the target bit string */
4479 	dest = dest_ctx + (ce_info->lsb / 8);
4480 
4481 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4482 
4483 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
4484 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
4485 
4486 	/* put it all back */
4487 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4488 }
4489 
4490 /**
4491  * ice_write_dword - write a dword to a packed context structure
4492  * @src_ctx:  the context structure to read from
4493  * @dest_ctx: the context to be written to
4494  * @ce_info:  a description of the struct to be filled
4495  */
4496 static void
4497 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4498 {
4499 	u32 src_dword, mask;
4500 	__le32 dest_dword;
4501 	u8 *from, *dest;
4502 	u16 shift_width;
4503 
4504 	/* copy from the next struct field */
4505 	from = src_ctx + ce_info->offset;
4506 
4507 	/* prepare the bits and mask */
4508 	shift_width = ce_info->lsb % 8;
4509 
4510 	/* if the field width is exactly 32 on an x86 machine, then the shift
4511 	 * operation will not work because the SHL instructions count is masked
4512 	 * to 5 bits so the shift will do nothing
4513 	 */
4514 	if (ce_info->width < 32)
4515 		mask = BIT(ce_info->width) - 1;
4516 	else
4517 		mask = (u32)~0;
4518 
4519 	/* don't swizzle the bits until after the mask because the mask bits
4520 	 * will be in a different bit position on big endian machines
4521 	 */
4522 	src_dword = *(u32 *)from;
4523 	src_dword &= mask;
4524 
4525 	/* shift to correct alignment */
4526 	mask <<= shift_width;
4527 	src_dword <<= shift_width;
4528 
4529 	/* get the current bits from the target bit string */
4530 	dest = dest_ctx + (ce_info->lsb / 8);
4531 
4532 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4533 
4534 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
4535 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
4536 
4537 	/* put it all back */
4538 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4539 }
4540 
4541 /**
4542  * ice_write_qword - write a qword to a packed context structure
4543  * @src_ctx:  the context structure to read from
4544  * @dest_ctx: the context to be written to
4545  * @ce_info:  a description of the struct to be filled
4546  */
4547 static void
4548 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4549 {
4550 	u64 src_qword, mask;
4551 	__le64 dest_qword;
4552 	u8 *from, *dest;
4553 	u16 shift_width;
4554 
4555 	/* copy from the next struct field */
4556 	from = src_ctx + ce_info->offset;
4557 
4558 	/* prepare the bits and mask */
4559 	shift_width = ce_info->lsb % 8;
4560 
4561 	/* if the field width is exactly 64 on an x86 machine, then the shift
4562 	 * operation will not work because the SHL instructions count is masked
4563 	 * to 6 bits so the shift will do nothing
4564 	 */
4565 	if (ce_info->width < 64)
4566 		mask = BIT_ULL(ce_info->width) - 1;
4567 	else
4568 		mask = (u64)~0;
4569 
4570 	/* don't swizzle the bits until after the mask because the mask bits
4571 	 * will be in a different bit position on big endian machines
4572 	 */
4573 	src_qword = *(u64 *)from;
4574 	src_qword &= mask;
4575 
4576 	/* shift to correct alignment */
4577 	mask <<= shift_width;
4578 	src_qword <<= shift_width;
4579 
4580 	/* get the current bits from the target bit string */
4581 	dest = dest_ctx + (ce_info->lsb / 8);
4582 
4583 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4584 
4585 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4586 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4587 
4588 	/* put it all back */
4589 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4590 }
4591 
4592 /**
4593  * ice_set_ctx - set context bits in packed structure
4594  * @hw: pointer to the hardware structure
4595  * @src_ctx:  pointer to a generic non-packed context structure
4596  * @dest_ctx: pointer to memory for the packed structure
4597  * @ce_info:  a description of the structure to be transformed
4598  */
4599 enum ice_status
4600 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4601 	    const struct ice_ctx_ele *ce_info)
4602 {
4603 	int f;
4604 
4605 	for (f = 0; ce_info[f].width; f++) {
4606 		/* We have to deal with each element of the FW response
4607 		 * using the correct size so that we are correct regardless
4608 		 * of the endianness of the machine.
4609 		 */
4610 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4611 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4612 				  f, ce_info[f].width, ce_info[f].size_of);
4613 			continue;
4614 		}
4615 		switch (ce_info[f].size_of) {
4616 		case sizeof(u8):
4617 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4618 			break;
4619 		case sizeof(u16):
4620 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4621 			break;
4622 		case sizeof(u32):
4623 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4624 			break;
4625 		case sizeof(u64):
4626 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4627 			break;
4628 		default:
4629 			return ICE_ERR_INVAL_SIZE;
4630 		}
4631 	}
4632 
4633 	return ICE_SUCCESS;
4634 }
4635 
4636 /**
4637  * ice_aq_get_internal_data
4638  * @hw: pointer to the hardware structure
4639  * @cluster_id: specific cluster to dump
4640  * @table_id: table ID within cluster
4641  * @start: index of line in the block to read
4642  * @buf: dump buffer
4643  * @buf_size: dump buffer size
4644  * @ret_buf_size: return buffer size (returned by FW)
4645  * @ret_next_table: next block to read (returned by FW)
4646  * @ret_next_index: next index to read (returned by FW)
4647  * @cd: pointer to command details structure
4648  *
4649  * Get internal FW/HW data (0xFF08) for debug purposes.
4650  */
4651 enum ice_status
4652 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4653 			 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4654 			 u16 *ret_next_table, u32 *ret_next_index,
4655 			 struct ice_sq_cd *cd)
4656 {
4657 	struct ice_aqc_debug_dump_internals *cmd;
4658 	struct ice_aq_desc desc;
4659 	enum ice_status status;
4660 
4661 	cmd = &desc.params.debug_dump;
4662 
4663 	if (buf_size == 0 || !buf)
4664 		return ICE_ERR_PARAM;
4665 
4666 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4667 
4668 	cmd->cluster_id = cluster_id;
4669 	cmd->table_id = CPU_TO_LE16(table_id);
4670 	cmd->idx = CPU_TO_LE32(start);
4671 
4672 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4673 
4674 	if (!status) {
4675 		if (ret_buf_size)
4676 			*ret_buf_size = LE16_TO_CPU(desc.datalen);
4677 		if (ret_next_table)
4678 			*ret_next_table = LE16_TO_CPU(cmd->table_id);
4679 		if (ret_next_index)
4680 			*ret_next_index = LE32_TO_CPU(cmd->idx);
4681 	}
4682 
4683 	return status;
4684 }
4685 
4686 /**
4687  * ice_read_byte - read context byte into struct
4688  * @src_ctx:  the context structure to read from
4689  * @dest_ctx: the context to be written to
4690  * @ce_info:  a description of the struct to be filled
4691  */
4692 static void
4693 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4694 {
4695 	u8 dest_byte, mask;
4696 	u8 *src, *target;
4697 	u16 shift_width;
4698 
4699 	/* prepare the bits and mask */
4700 	shift_width = ce_info->lsb % 8;
4701 	mask = (u8)(BIT(ce_info->width) - 1);
4702 
4703 	/* shift to correct alignment */
4704 	mask <<= shift_width;
4705 
4706 	/* get the current bits from the src bit string */
4707 	src = src_ctx + (ce_info->lsb / 8);
4708 
4709 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4710 
4711 	dest_byte &= ~(mask);
4712 
4713 	dest_byte >>= shift_width;
4714 
4715 	/* get the address from the struct field */
4716 	target = dest_ctx + ce_info->offset;
4717 
4718 	/* put it back in the struct */
4719 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4720 }
4721 
4722 /**
4723  * ice_read_word - read context word into struct
4724  * @src_ctx:  the context structure to read from
4725  * @dest_ctx: the context to be written to
4726  * @ce_info:  a description of the struct to be filled
4727  */
4728 static void
4729 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4730 {
4731 	u16 dest_word, mask;
4732 	u8 *src, *target;
4733 	__le16 src_word;
4734 	u16 shift_width;
4735 
4736 	/* prepare the bits and mask */
4737 	shift_width = ce_info->lsb % 8;
4738 	mask = BIT(ce_info->width) - 1;
4739 
4740 	/* shift to correct alignment */
4741 	mask <<= shift_width;
4742 
4743 	/* get the current bits from the src bit string */
4744 	src = src_ctx + (ce_info->lsb / 8);
4745 
4746 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4747 
4748 	/* the data in the memory is stored as little endian so mask it
4749 	 * correctly
4750 	 */
4751 	src_word &= ~(CPU_TO_LE16(mask));
4752 
4753 	/* get the data back into host order before shifting */
4754 	dest_word = LE16_TO_CPU(src_word);
4755 
4756 	dest_word >>= shift_width;
4757 
4758 	/* get the address from the struct field */
4759 	target = dest_ctx + ce_info->offset;
4760 
4761 	/* put it back in the struct */
4762 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4763 }
4764 
4765 /**
4766  * ice_read_dword - read context dword into struct
4767  * @src_ctx:  the context structure to read from
4768  * @dest_ctx: the context to be written to
4769  * @ce_info:  a description of the struct to be filled
4770  */
4771 static void
4772 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4773 {
4774 	u32 dest_dword, mask;
4775 	__le32 src_dword;
4776 	u8 *src, *target;
4777 	u16 shift_width;
4778 
4779 	/* prepare the bits and mask */
4780 	shift_width = ce_info->lsb % 8;
4781 
4782 	/* if the field width is exactly 32 on an x86 machine, then the shift
4783 	 * operation will not work because the SHL instructions count is masked
4784 	 * to 5 bits so the shift will do nothing
4785 	 */
4786 	if (ce_info->width < 32)
4787 		mask = BIT(ce_info->width) - 1;
4788 	else
4789 		mask = (u32)~0;
4790 
4791 	/* shift to correct alignment */
4792 	mask <<= shift_width;
4793 
4794 	/* get the current bits from the src bit string */
4795 	src = src_ctx + (ce_info->lsb / 8);
4796 
4797 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4798 
4799 	/* the data in the memory is stored as little endian so mask it
4800 	 * correctly
4801 	 */
4802 	src_dword &= ~(CPU_TO_LE32(mask));
4803 
4804 	/* get the data back into host order before shifting */
4805 	dest_dword = LE32_TO_CPU(src_dword);
4806 
4807 	dest_dword >>= shift_width;
4808 
4809 	/* get the address from the struct field */
4810 	target = dest_ctx + ce_info->offset;
4811 
4812 	/* put it back in the struct */
4813 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4814 }
4815 
4816 /**
4817  * ice_read_qword - read context qword into struct
4818  * @src_ctx:  the context structure to read from
4819  * @dest_ctx: the context to be written to
4820  * @ce_info:  a description of the struct to be filled
4821  */
4822 static void
4823 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4824 {
4825 	u64 dest_qword, mask;
4826 	__le64 src_qword;
4827 	u8 *src, *target;
4828 	u16 shift_width;
4829 
4830 	/* prepare the bits and mask */
4831 	shift_width = ce_info->lsb % 8;
4832 
4833 	/* if the field width is exactly 64 on an x86 machine, then the shift
4834 	 * operation will not work because the SHL instructions count is masked
4835 	 * to 6 bits so the shift will do nothing
4836 	 */
4837 	if (ce_info->width < 64)
4838 		mask = BIT_ULL(ce_info->width) - 1;
4839 	else
4840 		mask = (u64)~0;
4841 
4842 	/* shift to correct alignment */
4843 	mask <<= shift_width;
4844 
4845 	/* get the current bits from the src bit string */
4846 	src = src_ctx + (ce_info->lsb / 8);
4847 
4848 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4849 
4850 	/* the data in the memory is stored as little endian so mask it
4851 	 * correctly
4852 	 */
4853 	src_qword &= ~(CPU_TO_LE64(mask));
4854 
4855 	/* get the data back into host order before shifting */
4856 	dest_qword = LE64_TO_CPU(src_qword);
4857 
4858 	dest_qword >>= shift_width;
4859 
4860 	/* get the address from the struct field */
4861 	target = dest_ctx + ce_info->offset;
4862 
4863 	/* put it back in the struct */
4864 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4865 }
4866 
4867 /**
4868  * ice_get_ctx - extract context bits from a packed structure
4869  * @src_ctx:  pointer to a generic packed context structure
4870  * @dest_ctx: pointer to a generic non-packed context structure
4871  * @ce_info:  a description of the structure to be read from
4872  */
4873 enum ice_status
4874 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4875 {
4876 	int f;
4877 
4878 	for (f = 0; ce_info[f].width; f++) {
4879 		switch (ce_info[f].size_of) {
4880 		case 1:
4881 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4882 			break;
4883 		case 2:
4884 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4885 			break;
4886 		case 4:
4887 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4888 			break;
4889 		case 8:
4890 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4891 			break;
4892 		default:
4893 			/* nothing to do, just keep going */
4894 			break;
4895 		}
4896 	}
4897 
4898 	return ICE_SUCCESS;
4899 }
4900 
4901 /**
4902  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4903  * @hw: pointer to the HW struct
4904  * @vsi_handle: software VSI handle
4905  * @tc: TC number
4906  * @q_handle: software queue handle
4907  */
4908 struct ice_q_ctx *
4909 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4910 {
4911 	struct ice_vsi_ctx *vsi;
4912 	struct ice_q_ctx *q_ctx;
4913 
4914 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4915 	if (!vsi)
4916 		return NULL;
4917 	if (q_handle >= vsi->num_lan_q_entries[tc])
4918 		return NULL;
4919 	if (!vsi->lan_q_ctx[tc])
4920 		return NULL;
4921 	q_ctx = vsi->lan_q_ctx[tc];
4922 	return &q_ctx[q_handle];
4923 }
4924 
4925 /**
4926  * ice_ena_vsi_txq
4927  * @pi: port information structure
4928  * @vsi_handle: software VSI handle
4929  * @tc: TC number
4930  * @q_handle: software queue handle
4931  * @num_qgrps: Number of added queue groups
4932  * @buf: list of queue groups to be added
4933  * @buf_size: size of buffer for indirect command
4934  * @cd: pointer to command details structure or NULL
4935  *
4936  * This function adds one LAN queue
4937  */
4938 enum ice_status
4939 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4940 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4941 		struct ice_sq_cd *cd)
4942 {
4943 	struct ice_aqc_txsched_elem_data node = { 0 };
4944 	struct ice_sched_node *parent;
4945 	struct ice_q_ctx *q_ctx;
4946 	enum ice_status status;
4947 	struct ice_hw *hw;
4948 
4949 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4950 		return ICE_ERR_CFG;
4951 
4952 	if (num_qgrps > 1 || buf->num_txqs > 1)
4953 		return ICE_ERR_MAX_LIMIT;
4954 
4955 	hw = pi->hw;
4956 
4957 	if (!ice_is_vsi_valid(hw, vsi_handle))
4958 		return ICE_ERR_PARAM;
4959 
4960 	ice_acquire_lock(&pi->sched_lock);
4961 
4962 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4963 	if (!q_ctx) {
4964 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4965 			  q_handle);
4966 		status = ICE_ERR_PARAM;
4967 		goto ena_txq_exit;
4968 	}
4969 
4970 	/* find a parent node */
4971 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4972 					    ICE_SCHED_NODE_OWNER_LAN);
4973 	if (!parent) {
4974 		status = ICE_ERR_PARAM;
4975 		goto ena_txq_exit;
4976 	}
4977 
4978 	buf->parent_teid = parent->info.node_teid;
4979 	node.parent_teid = parent->info.node_teid;
4980 	/* Mark that the values in the "generic" section as valid. The default
4981 	 * value in the "generic" section is zero. This means that :
4982 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4983 	 * - 0 priority among siblings, indicated by Bit 1-3.
4984 	 * - WFQ, indicated by Bit 4.
4985 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4986 	 * Bit 5-6.
4987 	 * - Bit 7 is reserved.
4988 	 * Without setting the generic section as valid in valid_sections, the
4989 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4990 	 */
4991 	buf->txqs[0].info.valid_sections =
4992 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4993 		ICE_AQC_ELEM_VALID_EIR;
4994 	buf->txqs[0].info.generic = 0;
4995 	buf->txqs[0].info.cir_bw.bw_profile_idx =
4996 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4997 	buf->txqs[0].info.cir_bw.bw_alloc =
4998 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4999 	buf->txqs[0].info.eir_bw.bw_profile_idx =
5000 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5001 	buf->txqs[0].info.eir_bw.bw_alloc =
5002 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5003 
5004 	/* add the LAN queue */
5005 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5006 	if (status != ICE_SUCCESS) {
5007 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5008 			  LE16_TO_CPU(buf->txqs[0].txq_id),
5009 			  hw->adminq.sq_last_status);
5010 		goto ena_txq_exit;
5011 	}
5012 
5013 	node.node_teid = buf->txqs[0].q_teid;
5014 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5015 	q_ctx->q_handle = q_handle;
5016 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5017 
5018 	/* add a leaf node into scheduler tree queue layer */
5019 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
5020 	if (!status)
5021 		status = ice_sched_replay_q_bw(pi, q_ctx);
5022 
5023 ena_txq_exit:
5024 	ice_release_lock(&pi->sched_lock);
5025 	return status;
5026 }
5027 
5028 /**
5029  * ice_dis_vsi_txq
5030  * @pi: port information structure
5031  * @vsi_handle: software VSI handle
5032  * @tc: TC number
5033  * @num_queues: number of queues
5034  * @q_handles: pointer to software queue handle array
5035  * @q_ids: pointer to the q_id array
5036  * @q_teids: pointer to queue node teids
5037  * @rst_src: if called due to reset, specifies the reset source
5038  * @vmvf_num: the relative VM or VF number that is undergoing the reset
5039  * @cd: pointer to command details structure or NULL
5040  *
5041  * This function removes queues and their corresponding nodes in SW DB
5042  */
5043 enum ice_status
5044 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5045 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
5046 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
5047 		struct ice_sq_cd *cd)
5048 {
5049 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5050 	struct ice_aqc_dis_txq_item *qg_list;
5051 	struct ice_q_ctx *q_ctx;
5052 	struct ice_hw *hw;
5053 	u16 i, buf_size;
5054 
5055 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5056 		return ICE_ERR_CFG;
5057 
5058 	hw = pi->hw;
5059 
5060 	if (!num_queues) {
5061 		/* if queue is disabled already yet the disable queue command
5062 		 * has to be sent to complete the VF reset, then call
5063 		 * ice_aq_dis_lan_txq without any queue information
5064 		 */
5065 		if (rst_src)
5066 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5067 						  vmvf_num, NULL);
5068 		return ICE_ERR_CFG;
5069 	}
5070 
5071 	buf_size = ice_struct_size(qg_list, q_id, 1);
5072 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5073 	if (!qg_list)
5074 		return ICE_ERR_NO_MEMORY;
5075 
5076 	ice_acquire_lock(&pi->sched_lock);
5077 
5078 	for (i = 0; i < num_queues; i++) {
5079 		struct ice_sched_node *node;
5080 
5081 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5082 		if (!node)
5083 			continue;
5084 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5085 		if (!q_ctx) {
5086 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5087 				  q_handles[i]);
5088 			continue;
5089 		}
5090 		if (q_ctx->q_handle != q_handles[i]) {
5091 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5092 				  q_ctx->q_handle, q_handles[i]);
5093 			continue;
5094 		}
5095 		qg_list->parent_teid = node->info.parent_teid;
5096 		qg_list->num_qs = 1;
5097 		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5098 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5099 					    vmvf_num, cd);
5100 
5101 		if (status != ICE_SUCCESS)
5102 			break;
5103 		ice_free_sched_node(pi, node);
5104 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5105 	}
5106 	ice_release_lock(&pi->sched_lock);
5107 	ice_free(hw, qg_list);
5108 	return status;
5109 }
5110 
5111 /**
5112  * ice_cfg_vsi_qs - configure the new/existing VSI queues
5113  * @pi: port information structure
5114  * @vsi_handle: software VSI handle
5115  * @tc_bitmap: TC bitmap
5116  * @maxqs: max queues array per TC
5117  * @owner: LAN or RDMA
5118  *
5119  * This function adds/updates the VSI queues per TC.
5120  */
5121 static enum ice_status
5122 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5123 	       u16 *maxqs, u8 owner)
5124 {
5125 	enum ice_status status = ICE_SUCCESS;
5126 	u8 i;
5127 
5128 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5129 		return ICE_ERR_CFG;
5130 
5131 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5132 		return ICE_ERR_PARAM;
5133 
5134 	ice_acquire_lock(&pi->sched_lock);
5135 
5136 	ice_for_each_traffic_class(i) {
5137 		/* configuration is possible only if TC node is present */
5138 		if (!ice_sched_get_tc_node(pi, i))
5139 			continue;
5140 
5141 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5142 					   ice_is_tc_ena(tc_bitmap, i));
5143 		if (status)
5144 			break;
5145 	}
5146 
5147 	ice_release_lock(&pi->sched_lock);
5148 	return status;
5149 }
5150 
5151 /**
5152  * ice_cfg_vsi_lan - configure VSI LAN queues
5153  * @pi: port information structure
5154  * @vsi_handle: software VSI handle
5155  * @tc_bitmap: TC bitmap
5156  * @max_lanqs: max LAN queues array per TC
5157  *
5158  * This function adds/updates the VSI LAN queues per TC.
5159  */
5160 enum ice_status
5161 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5162 		u16 *max_lanqs)
5163 {
5164 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5165 			      ICE_SCHED_NODE_OWNER_LAN);
5166 }
5167 
5168 /**
5169  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5170  * @pi: port information structure
5171  * @vsi_handle: software VSI handle
5172  * @tc_bitmap: TC bitmap
5173  * @max_rdmaqs: max RDMA queues array per TC
5174  *
5175  * This function adds/updates the VSI RDMA queues per TC.
5176  */
5177 enum ice_status
5178 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5179 		 u16 *max_rdmaqs)
5180 {
5181 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5182 			      ICE_SCHED_NODE_OWNER_RDMA);
5183 }
5184 
5185 /**
5186  * ice_ena_vsi_rdma_qset
5187  * @pi: port information structure
5188  * @vsi_handle: software VSI handle
5189  * @tc: TC number
5190  * @rdma_qset: pointer to RDMA qset
5191  * @num_qsets: number of RDMA qsets
5192  * @qset_teid: pointer to qset node teids
5193  *
5194  * This function adds RDMA qset
5195  */
5196 enum ice_status
5197 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5198 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5199 {
5200 	struct ice_aqc_txsched_elem_data node = { 0 };
5201 	struct ice_aqc_add_rdma_qset_data *buf;
5202 	struct ice_sched_node *parent;
5203 	enum ice_status status;
5204 	struct ice_hw *hw;
5205 	u16 i, buf_size;
5206 
5207 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5208 		return ICE_ERR_CFG;
5209 	hw = pi->hw;
5210 
5211 	if (!ice_is_vsi_valid(hw, vsi_handle))
5212 		return ICE_ERR_PARAM;
5213 
5214 	buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5215 	buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5216 	if (!buf)
5217 		return ICE_ERR_NO_MEMORY;
5218 	ice_acquire_lock(&pi->sched_lock);
5219 
5220 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5221 					    ICE_SCHED_NODE_OWNER_RDMA);
5222 	if (!parent) {
5223 		status = ICE_ERR_PARAM;
5224 		goto rdma_error_exit;
5225 	}
5226 	buf->parent_teid = parent->info.node_teid;
5227 	node.parent_teid = parent->info.node_teid;
5228 
5229 	buf->num_qsets = CPU_TO_LE16(num_qsets);
5230 	for (i = 0; i < num_qsets; i++) {
5231 		buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5232 		buf->rdma_qsets[i].info.valid_sections =
5233 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5234 			ICE_AQC_ELEM_VALID_EIR;
5235 		buf->rdma_qsets[i].info.generic = 0;
5236 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5237 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5238 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5239 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5240 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5241 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5242 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5243 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5244 	}
5245 	status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5246 	if (status != ICE_SUCCESS) {
5247 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5248 		goto rdma_error_exit;
5249 	}
5250 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5251 	for (i = 0; i < num_qsets; i++) {
5252 		node.node_teid = buf->rdma_qsets[i].qset_teid;
5253 		status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5254 					    &node);
5255 		if (status)
5256 			break;
5257 		qset_teid[i] = LE32_TO_CPU(node.node_teid);
5258 	}
5259 rdma_error_exit:
5260 	ice_release_lock(&pi->sched_lock);
5261 	ice_free(hw, buf);
5262 	return status;
5263 }
5264 
5265 /**
5266  * ice_dis_vsi_rdma_qset - free RDMA resources
5267  * @pi: port_info struct
5268  * @count: number of RDMA qsets to free
5269  * @qset_teid: TEID of qset node
5270  * @q_id: list of queue IDs being disabled
5271  */
5272 enum ice_status
5273 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5274 		      u16 *q_id)
5275 {
5276 	struct ice_aqc_dis_txq_item *qg_list;
5277 	enum ice_status status = ICE_SUCCESS;
5278 	struct ice_hw *hw;
5279 	u16 qg_size;
5280 	int i;
5281 
5282 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5283 		return ICE_ERR_CFG;
5284 
5285 	hw = pi->hw;
5286 
5287 	qg_size = ice_struct_size(qg_list, q_id, 1);
5288 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5289 	if (!qg_list)
5290 		return ICE_ERR_NO_MEMORY;
5291 
5292 	ice_acquire_lock(&pi->sched_lock);
5293 
5294 	for (i = 0; i < count; i++) {
5295 		struct ice_sched_node *node;
5296 
5297 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5298 		if (!node)
5299 			continue;
5300 
5301 		qg_list->parent_teid = node->info.parent_teid;
5302 		qg_list->num_qs = 1;
5303 		qg_list->q_id[0] =
5304 			CPU_TO_LE16(q_id[i] |
5305 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5306 
5307 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5308 					    ICE_NO_RESET, 0, NULL);
5309 		if (status)
5310 			break;
5311 
5312 		ice_free_sched_node(pi, node);
5313 	}
5314 
5315 	ice_release_lock(&pi->sched_lock);
5316 	ice_free(hw, qg_list);
5317 	return status;
5318 }
5319 
5320 /**
5321  * ice_is_main_vsi - checks whether the VSI is main VSI
5322  * @hw: pointer to the HW struct
5323  * @vsi_handle: VSI handle
5324  *
5325  * Checks whether the VSI is the main VSI (the first PF VSI created on
5326  * given PF).
5327  */
5328 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5329 {
5330 	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5331 }
5332 
5333 /**
5334  * ice_replay_pre_init - replay pre initialization
5335  * @hw: pointer to the HW struct
5336  * @sw: pointer to switch info struct for which function initializes filters
5337  *
5338  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5339  */
5340 enum ice_status
5341 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5342 {
5343 	enum ice_status status;
5344 	u8 i;
5345 
5346 	/* Delete old entries from replay filter list head if there is any */
5347 	ice_rm_sw_replay_rule_info(hw, sw);
5348 	/* In start of replay, move entries into replay_rules list, it
5349 	 * will allow adding rules entries back to filt_rules list,
5350 	 * which is operational list.
5351 	 */
5352 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5353 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5354 				  &sw->recp_list[i].filt_replay_rules);
5355 	ice_sched_replay_agg_vsi_preinit(hw);
5356 
5357 	status = ice_sched_replay_root_node_bw(hw->port_info);
5358 	if (status)
5359 		return status;
5360 
5361 	return ice_sched_replay_tc_node_bw(hw->port_info);
5362 }
5363 
5364 /**
5365  * ice_replay_vsi - replay VSI configuration
5366  * @hw: pointer to the HW struct
5367  * @vsi_handle: driver VSI handle
5368  *
5369  * Restore all VSI configuration after reset. It is required to call this
5370  * function with main VSI first.
5371  */
5372 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5373 {
5374 	struct ice_switch_info *sw = hw->switch_info;
5375 	struct ice_port_info *pi = hw->port_info;
5376 	enum ice_status status;
5377 
5378 	if (!ice_is_vsi_valid(hw, vsi_handle))
5379 		return ICE_ERR_PARAM;
5380 
5381 	/* Replay pre-initialization if there is any */
5382 	if (ice_is_main_vsi(hw, vsi_handle)) {
5383 		status = ice_replay_pre_init(hw, sw);
5384 		if (status)
5385 			return status;
5386 	}
5387 	/* Replay per VSI all RSS configurations */
5388 	status = ice_replay_rss_cfg(hw, vsi_handle);
5389 	if (status)
5390 		return status;
5391 	/* Replay per VSI all filters */
5392 	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5393 	if (!status)
5394 		status = ice_replay_vsi_agg(hw, vsi_handle);
5395 	return status;
5396 }
5397 
5398 /**
5399  * ice_replay_post - post replay configuration cleanup
5400  * @hw: pointer to the HW struct
5401  *
5402  * Post replay cleanup.
5403  */
5404 void ice_replay_post(struct ice_hw *hw)
5405 {
5406 	/* Delete old entries from replay filter list head */
5407 	ice_rm_all_sw_replay_rule_info(hw);
5408 	ice_sched_replay_agg(hw);
5409 }
5410 
5411 /**
5412  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5413  * @hw: ptr to the hardware info
5414  * @reg: offset of 64 bit HW register to read from
5415  * @prev_stat_loaded: bool to specify if previous stats are loaded
5416  * @prev_stat: ptr to previous loaded stat value
5417  * @cur_stat: ptr to current stat value
5418  */
5419 void
5420 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5421 		  u64 *prev_stat, u64 *cur_stat)
5422 {
5423 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5424 
5425 	/* device stats are not reset at PFR, they likely will not be zeroed
5426 	 * when the driver starts. Thus, save the value from the first read
5427 	 * without adding to the statistic value so that we report stats which
5428 	 * count up from zero.
5429 	 */
5430 	if (!prev_stat_loaded) {
5431 		*prev_stat = new_data;
5432 		return;
5433 	}
5434 
5435 	/* Calculate the difference between the new and old values, and then
5436 	 * add it to the software stat value.
5437 	 */
5438 	if (new_data >= *prev_stat)
5439 		*cur_stat += new_data - *prev_stat;
5440 	else
5441 		/* to manage the potential roll-over */
5442 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5443 
5444 	/* Update the previously stored value to prepare for next read */
5445 	*prev_stat = new_data;
5446 }
5447 
5448 /**
5449  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5450  * @hw: ptr to the hardware info
5451  * @reg: offset of HW register to read from
5452  * @prev_stat_loaded: bool to specify if previous stats are loaded
5453  * @prev_stat: ptr to previous loaded stat value
5454  * @cur_stat: ptr to current stat value
5455  */
5456 void
5457 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5458 		  u64 *prev_stat, u64 *cur_stat)
5459 {
5460 	u32 new_data;
5461 
5462 	new_data = rd32(hw, reg);
5463 
5464 	/* device stats are not reset at PFR, they likely will not be zeroed
5465 	 * when the driver starts. Thus, save the value from the first read
5466 	 * without adding to the statistic value so that we report stats which
5467 	 * count up from zero.
5468 	 */
5469 	if (!prev_stat_loaded) {
5470 		*prev_stat = new_data;
5471 		return;
5472 	}
5473 
5474 	/* Calculate the difference between the new and old values, and then
5475 	 * add it to the software stat value.
5476 	 */
5477 	if (new_data >= *prev_stat)
5478 		*cur_stat += new_data - *prev_stat;
5479 	else
5480 		/* to manage the potential roll-over */
5481 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5482 
5483 	/* Update the previously stored value to prepare for next read */
5484 	*prev_stat = new_data;
5485 }
5486 
5487 /**
5488  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5489  * @hw: ptr to the hardware info
5490  * @vsi_handle: VSI handle
5491  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5492  * @cur_stats: ptr to current stats structure
5493  *
5494  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5495  * thus cannot be read using the normal ice_stat_update32 function.
5496  *
5497  * Read the GLV_REPC register associated with the given VSI, and update the
5498  * rx_no_desc and rx_error values in the ice_eth_stats structure.
5499  *
5500  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5501  * cleared each time it's read.
5502  *
5503  * Note that the GLV_RDPC register also counts the causes that would trigger
5504  * GLV_REPC. However, it does not give the finer grained detail about why the
5505  * packets are being dropped. The GLV_REPC values can be used to distinguish
5506  * whether Rx packets are dropped due to errors or due to no available
5507  * descriptors.
5508  */
5509 void
5510 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5511 		     struct ice_eth_stats *cur_stats)
5512 {
5513 	u16 vsi_num, no_desc, error_cnt;
5514 	u32 repc;
5515 
5516 	if (!ice_is_vsi_valid(hw, vsi_handle))
5517 		return;
5518 
5519 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5520 
5521 	/* If we haven't loaded stats yet, just clear the current value */
5522 	if (!prev_stat_loaded) {
5523 		wr32(hw, GLV_REPC(vsi_num), 0);
5524 		return;
5525 	}
5526 
5527 	repc = rd32(hw, GLV_REPC(vsi_num));
5528 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5529 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5530 
5531 	/* Clear the count by writing to the stats register */
5532 	wr32(hw, GLV_REPC(vsi_num), 0);
5533 
5534 	cur_stats->rx_no_desc += no_desc;
5535 	cur_stats->rx_errors += error_cnt;
5536 }
5537 
5538 /**
5539  * ice_aq_alternate_write
5540  * @hw: pointer to the hardware structure
5541  * @reg_addr0: address of first dword to be written
5542  * @reg_val0: value to be written under 'reg_addr0'
5543  * @reg_addr1: address of second dword to be written
5544  * @reg_val1: value to be written under 'reg_addr1'
5545  *
5546  * Write one or two dwords to alternate structure. Fields are indicated
5547  * by 'reg_addr0' and 'reg_addr1' register numbers.
5548  */
5549 enum ice_status
5550 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
5551 		       u32 reg_addr1, u32 reg_val1)
5552 {
5553 	struct ice_aqc_read_write_alt_direct *cmd;
5554 	struct ice_aq_desc desc;
5555 	enum ice_status status;
5556 
5557 	cmd = &desc.params.read_write_alt_direct;
5558 
5559 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
5560 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5561 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5562 	cmd->dword0_value = CPU_TO_LE32(reg_val0);
5563 	cmd->dword1_value = CPU_TO_LE32(reg_val1);
5564 
5565 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5566 
5567 	return status;
5568 }
5569 
5570 /**
5571  * ice_aq_alternate_read
5572  * @hw: pointer to the hardware structure
5573  * @reg_addr0: address of first dword to be read
5574  * @reg_val0: pointer for data read from 'reg_addr0'
5575  * @reg_addr1: address of second dword to be read
5576  * @reg_val1: pointer for data read from 'reg_addr1'
5577  *
5578  * Read one or two dwords from alternate structure. Fields are indicated
5579  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
5580  * is not passed then only register at 'reg_addr0' is read.
5581  */
5582 enum ice_status
5583 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
5584 		      u32 reg_addr1, u32 *reg_val1)
5585 {
5586 	struct ice_aqc_read_write_alt_direct *cmd;
5587 	struct ice_aq_desc desc;
5588 	enum ice_status status;
5589 
5590 	cmd = &desc.params.read_write_alt_direct;
5591 
5592 	if (!reg_val0)
5593 		return ICE_ERR_PARAM;
5594 
5595 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
5596 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5597 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5598 
5599 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5600 
5601 	if (status == ICE_SUCCESS) {
5602 		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
5603 
5604 		if (reg_val1)
5605 			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
5606 	}
5607 
5608 	return status;
5609 }
5610 
5611 /**
5612  *  ice_aq_alternate_write_done
5613  *  @hw: pointer to the HW structure.
5614  *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
5615  *  @reset_needed: indicates the SW should trigger GLOBAL reset
5616  *
5617  *  Indicates to the FW that alternate structures have been changed.
5618  */
5619 enum ice_status
5620 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
5621 {
5622 	struct ice_aqc_done_alt_write *cmd;
5623 	struct ice_aq_desc desc;
5624 	enum ice_status status;
5625 
5626 	cmd = &desc.params.done_alt_write;
5627 
5628 	if (!reset_needed)
5629 		return ICE_ERR_PARAM;
5630 
5631 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
5632 	cmd->flags = bios_mode;
5633 
5634 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5635 	if (!status)
5636 		*reset_needed = (LE16_TO_CPU(cmd->flags) &
5637 				 ICE_AQC_RESP_RESET_NEEDED) != 0;
5638 
5639 	return status;
5640 }
5641 
5642 /**
5643  *  ice_aq_alternate_clear
5644  *  @hw: pointer to the HW structure.
5645  *
5646  *  Clear the alternate structures of the port from which the function
5647  *  is called.
5648  */
5649 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
5650 {
5651 	struct ice_aq_desc desc;
5652 	enum ice_status status;
5653 
5654 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
5655 
5656 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5657 
5658 	return status;
5659 }
5660 
5661 /**
5662  * ice_sched_query_elem - query element information from HW
5663  * @hw: pointer to the HW struct
5664  * @node_teid: node TEID to be queried
5665  * @buf: buffer to element information
5666  *
5667  * This function queries HW element information
5668  */
5669 enum ice_status
5670 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5671 		     struct ice_aqc_txsched_elem_data *buf)
5672 {
5673 	u16 buf_size, num_elem_ret = 0;
5674 	enum ice_status status;
5675 
5676 	buf_size = sizeof(*buf);
5677 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5678 	buf->node_teid = CPU_TO_LE32(node_teid);
5679 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5680 					  NULL);
5681 	if (status != ICE_SUCCESS || num_elem_ret != 1)
5682 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5683 	return status;
5684 }
5685 
5686 /**
5687  * ice_get_fw_mode - returns FW mode
5688  * @hw: pointer to the HW struct
5689  */
5690 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5691 {
5692 #define ICE_FW_MODE_DBG_M BIT(0)
5693 #define ICE_FW_MODE_REC_M BIT(1)
5694 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5695 	u32 fw_mode;
5696 
5697 	/* check the current FW mode */
5698 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5699 	if (fw_mode & ICE_FW_MODE_DBG_M)
5700 		return ICE_FW_MODE_DBG;
5701 	else if (fw_mode & ICE_FW_MODE_REC_M)
5702 		return ICE_FW_MODE_REC;
5703 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5704 		return ICE_FW_MODE_ROLLBACK;
5705 	else
5706 		return ICE_FW_MODE_NORMAL;
5707 }
5708 
5709 /**
5710  * ice_cfg_get_cur_lldp_persist_status
5711  * @hw: pointer to the HW struct
5712  * @lldp_status: return value of LLDP persistent status
5713  *
5714  * Get the current status of LLDP persistent
5715  */
5716 enum ice_status
5717 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5718 {
5719 	struct ice_port_info *pi = hw->port_info;
5720 	enum ice_status ret;
5721 	__le32 raw_data;
5722 	u32 data, mask;
5723 
5724 	if (!lldp_status)
5725 		return ICE_ERR_BAD_PTR;
5726 
5727 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5728 	if (ret)
5729 		return ret;
5730 
5731 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5732 			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5733 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5734 			      false, true, NULL);
5735 	if (!ret) {
5736 		data = LE32_TO_CPU(raw_data);
5737 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5738 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5739 		data = data & mask;
5740 		*lldp_status = data >>
5741 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5742 	}
5743 
5744 	ice_release_nvm(hw);
5745 
5746 	return ret;
5747 }
5748 
5749 /**
5750  * ice_get_dflt_lldp_persist_status
5751  * @hw: pointer to the HW struct
5752  * @lldp_status: return value of LLDP persistent status
5753  *
5754  * Get the default status of LLDP persistent
5755  */
5756 enum ice_status
5757 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5758 {
5759 	struct ice_port_info *pi = hw->port_info;
5760 	u32 data, mask, loc_data, loc_data_tmp;
5761 	enum ice_status ret;
5762 	__le16 loc_raw_data;
5763 	__le32 raw_data;
5764 
5765 	if (!lldp_status)
5766 		return ICE_ERR_BAD_PTR;
5767 
5768 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5769 	if (ret)
5770 		return ret;
5771 
5772 	/* Read the offset of EMP_SR_PTR */
5773 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5774 			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5775 			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5776 			      &loc_raw_data, false, true, NULL);
5777 	if (ret)
5778 		goto exit;
5779 
5780 	loc_data = LE16_TO_CPU(loc_raw_data);
5781 	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5782 		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5783 		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5784 	} else {
5785 		loc_data *= ICE_AQC_NVM_WORD_UNIT;
5786 	}
5787 
5788 	/* Read the offset of LLDP configuration pointer */
5789 	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5790 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5791 			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5792 			      false, true, NULL);
5793 	if (ret)
5794 		goto exit;
5795 
5796 	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5797 	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5798 	loc_data += loc_data_tmp;
5799 
5800 	/* We need to skip LLDP configuration section length (2 bytes) */
5801 	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5802 
5803 	/* Read the LLDP Default Configure */
5804 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5805 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5806 			      true, NULL);
5807 	if (!ret) {
5808 		data = LE32_TO_CPU(raw_data);
5809 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5810 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5811 		data = data & mask;
5812 		*lldp_status = data >>
5813 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5814 	}
5815 
5816 exit:
5817 	ice_release_nvm(hw);
5818 
5819 	return ret;
5820 }
5821 
5822 /**
5823  * ice_aq_read_i2c
5824  * @hw: pointer to the hw struct
5825  * @topo_addr: topology address for a device to communicate with
5826  * @bus_addr: 7-bit I2C bus address
5827  * @addr: I2C memory address (I2C offset) with up to 16 bits
5828  * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5829  *			    bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5830  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5831  * @cd: pointer to command details structure or NULL
5832  *
5833  * Read I2C (0x06E2)
5834  */
5835 enum ice_status
5836 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5837 		u16 bus_addr, __le16 addr, u8 params, u8 *data,
5838 		struct ice_sq_cd *cd)
5839 {
5840 	struct ice_aq_desc desc = { 0 };
5841 	struct ice_aqc_i2c *cmd;
5842 	enum ice_status status;
5843 	u8 data_size;
5844 
5845 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5846 	cmd = &desc.params.read_write_i2c;
5847 
5848 	if (!data)
5849 		return ICE_ERR_PARAM;
5850 
5851 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5852 
5853 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5854 	cmd->topo_addr = topo_addr;
5855 	cmd->i2c_params = params;
5856 	cmd->i2c_addr = addr;
5857 
5858 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5859 	if (!status) {
5860 		struct ice_aqc_read_i2c_resp *resp;
5861 		u8 i;
5862 
5863 		resp = &desc.params.read_i2c_resp;
5864 		for (i = 0; i < data_size; i++) {
5865 			*data = resp->i2c_data[i];
5866 			data++;
5867 		}
5868 	}
5869 
5870 	return status;
5871 }
5872 
5873 /**
5874  * ice_aq_write_i2c
5875  * @hw: pointer to the hw struct
5876  * @topo_addr: topology address for a device to communicate with
5877  * @bus_addr: 7-bit I2C bus address
5878  * @addr: I2C memory address (I2C offset) with up to 16 bits
5879  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5880  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5881  * @cd: pointer to command details structure or NULL
5882  *
5883  * Write I2C (0x06E3)
5884  */
5885 enum ice_status
5886 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5887 		 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5888 		 struct ice_sq_cd *cd)
5889 {
5890 	struct ice_aq_desc desc = { 0 };
5891 	struct ice_aqc_i2c *cmd;
5892 	u8 i, data_size;
5893 
5894 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5895 	cmd = &desc.params.read_write_i2c;
5896 
5897 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5898 
5899 	/* data_size limited to 4 */
5900 	if (data_size > 4)
5901 		return ICE_ERR_PARAM;
5902 
5903 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5904 	cmd->topo_addr = topo_addr;
5905 	cmd->i2c_params = params;
5906 	cmd->i2c_addr = addr;
5907 
5908 	for (i = 0; i < data_size; i++) {
5909 		cmd->i2c_data[i] = *data;
5910 		data++;
5911 	}
5912 
5913 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5914 }
5915 
5916 /**
5917  * ice_aq_set_gpio
5918  * @hw: pointer to the hw struct
5919  * @gpio_ctrl_handle: GPIO controller node handle
5920  * @pin_idx: IO Number of the GPIO that needs to be set
5921  * @value: SW provide IO value to set in the LSB
5922  * @cd: pointer to command details structure or NULL
5923  *
5924  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5925  */
5926 enum ice_status
5927 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5928 		struct ice_sq_cd *cd)
5929 {
5930 	struct ice_aqc_gpio *cmd;
5931 	struct ice_aq_desc desc;
5932 
5933 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5934 	cmd = &desc.params.read_write_gpio;
5935 	cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5936 	cmd->gpio_num = pin_idx;
5937 	cmd->gpio_val = value ? 1 : 0;
5938 
5939 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5940 }
5941 
5942 /**
5943  * ice_aq_get_gpio
5944  * @hw: pointer to the hw struct
5945  * @gpio_ctrl_handle: GPIO controller node handle
5946  * @pin_idx: IO Number of the GPIO that needs to be set
5947  * @value: IO value read
5948  * @cd: pointer to command details structure or NULL
5949  *
5950  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5951  * the topology
5952  */
5953 enum ice_status
5954 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5955 		bool *value, struct ice_sq_cd *cd)
5956 {
5957 	struct ice_aqc_gpio *cmd;
5958 	struct ice_aq_desc desc;
5959 	enum ice_status status;
5960 
5961 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5962 	cmd = &desc.params.read_write_gpio;
5963 	cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5964 	cmd->gpio_num = pin_idx;
5965 
5966 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5967 	if (status)
5968 		return status;
5969 
5970 	*value = !!cmd->gpio_val;
5971 	return ICE_SUCCESS;
5972 }
5973 
5974 /**
5975  * ice_fw_supports_link_override
5976  * @hw: pointer to the hardware structure
5977  *
5978  * Checks if the firmware supports link override
5979  */
5980 bool ice_fw_supports_link_override(struct ice_hw *hw)
5981 {
5982 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5983 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5984 			return true;
5985 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5986 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5987 			return true;
5988 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5989 		return true;
5990 	}
5991 
5992 	return false;
5993 }
5994 
5995 /**
5996  * ice_get_link_default_override
5997  * @ldo: pointer to the link default override struct
5998  * @pi: pointer to the port info struct
5999  *
6000  * Gets the link default override for a port
6001  */
6002 enum ice_status
6003 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6004 			      struct ice_port_info *pi)
6005 {
6006 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
6007 	struct ice_hw *hw = pi->hw;
6008 	enum ice_status status;
6009 
6010 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6011 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6012 	if (status) {
6013 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6014 		return status;
6015 	}
6016 
6017 	/* Each port has its own config; calculate for our port */
6018 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6019 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6020 
6021 	/* link options first */
6022 	status = ice_read_sr_word(hw, tlv_start, &buf);
6023 	if (status) {
6024 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6025 		return status;
6026 	}
6027 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6028 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6029 		ICE_LINK_OVERRIDE_PHY_CFG_S;
6030 
6031 	/* link PHY config */
6032 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6033 	status = ice_read_sr_word(hw, offset, &buf);
6034 	if (status) {
6035 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6036 		return status;
6037 	}
6038 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6039 
6040 	/* PHY types low */
6041 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6042 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6043 		status = ice_read_sr_word(hw, (offset + i), &buf);
6044 		if (status) {
6045 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6046 			return status;
6047 		}
6048 		/* shift 16 bits at a time to fill 64 bits */
6049 		ldo->phy_type_low |= ((u64)buf << (i * 16));
6050 	}
6051 
6052 	/* PHY types high */
6053 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6054 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6055 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6056 		status = ice_read_sr_word(hw, (offset + i), &buf);
6057 		if (status) {
6058 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6059 			return status;
6060 		}
6061 		/* shift 16 bits at a time to fill 64 bits */
6062 		ldo->phy_type_high |= ((u64)buf << (i * 16));
6063 	}
6064 
6065 	return status;
6066 }
6067 
6068 /**
6069  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6070  * @caps: get PHY capability data
6071  */
6072 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6073 {
6074 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6075 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6076 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
6077 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
6078 		return true;
6079 
6080 	return false;
6081 }
6082 
6083 /**
6084  * ice_is_fw_health_report_supported
6085  * @hw: pointer to the hardware structure
6086  *
6087  * Return true if firmware supports health status reports,
6088  * false otherwise
6089  */
6090 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6091 {
6092 	if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6093 		return true;
6094 
6095 	if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6096 		if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6097 			return true;
6098 		if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6099 		    hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6100 			return true;
6101 	}
6102 
6103 	return false;
6104 }
6105 
6106 /**
6107  * ice_aq_set_health_status_config - Configure FW health events
6108  * @hw: pointer to the HW struct
6109  * @event_source: type of diagnostic events to enable
6110  * @cd: pointer to command details structure or NULL
6111  *
6112  * Configure the health status event types that the firmware will send to this
6113  * PF. The supported event types are: PF-specific, all PFs, and global
6114  */
6115 enum ice_status
6116 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6117 				struct ice_sq_cd *cd)
6118 {
6119 	struct ice_aqc_set_health_status_config *cmd;
6120 	struct ice_aq_desc desc;
6121 
6122 	cmd = &desc.params.set_health_status_config;
6123 
6124 	ice_fill_dflt_direct_cmd_desc(&desc,
6125 				      ice_aqc_opc_set_health_status_config);
6126 
6127 	cmd->event_source = event_source;
6128 
6129 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6130 }
6131 
6132 /**
6133  * ice_aq_get_port_options
6134  * @hw: pointer to the hw struct
6135  * @options: buffer for the resultant port options
6136  * @option_count: input - size of the buffer in port options structures,
6137  *                output - number of returned port options
6138  * @lport: logical port to call the command with (optional)
6139  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6140  *               when PF owns more than 1 port it must be true
6141  * @active_option_idx: index of active port option in returned buffer
6142  * @active_option_valid: active option in returned buffer is valid
6143  *
6144  * Calls Get Port Options AQC (0x06ea) and verifies result.
6145  */
6146 enum ice_status
6147 ice_aq_get_port_options(struct ice_hw *hw,
6148 			struct ice_aqc_get_port_options_elem *options,
6149 			u8 *option_count, u8 lport, bool lport_valid,
6150 			u8 *active_option_idx, bool *active_option_valid)
6151 {
6152 	struct ice_aqc_get_port_options *cmd;
6153 	struct ice_aq_desc desc;
6154 	enum ice_status status;
6155 	u8 pmd_count;
6156 	u8 max_speed;
6157 	u8 i;
6158 
6159 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6160 
6161 	/* options buffer shall be able to hold max returned options */
6162 	if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6163 		return ICE_ERR_PARAM;
6164 
6165 	cmd = &desc.params.get_port_options;
6166 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6167 
6168 	if (lport_valid)
6169 		cmd->lport_num = lport;
6170 	cmd->lport_num_valid = lport_valid;
6171 
6172 	status = ice_aq_send_cmd(hw, &desc, options,
6173 				 *option_count * sizeof(*options), NULL);
6174 	if (status != ICE_SUCCESS)
6175 		return status;
6176 
6177 	/* verify direct FW response & set output parameters */
6178 	*option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6179 	ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6180 	*active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6181 	if (*active_option_valid) {
6182 		*active_option_idx = cmd->port_options &
6183 				     ICE_AQC_PORT_OPT_ACTIVE_M;
6184 		if (*active_option_idx > (*option_count - 1))
6185 			return ICE_ERR_OUT_OF_RANGE;
6186 		ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6187 			  *active_option_idx);
6188 	}
6189 
6190 	/* verify indirect FW response & mask output options fields */
6191 	for (i = 0; i < *option_count; i++) {
6192 		options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6193 		options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6194 		pmd_count = options[i].pmd;
6195 		max_speed = options[i].max_lane_speed;
6196 		ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6197 			  pmd_count, max_speed);
6198 
6199 		/* check only entries containing valid max pmd speed values,
6200 		 * other reserved values may be returned, when logical port
6201 		 * used is unrelated to specific option
6202 		 */
6203 		if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
6204 			if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
6205 				return ICE_ERR_OUT_OF_RANGE;
6206 			if (pmd_count > 2 &&
6207 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
6208 				return ICE_ERR_CFG;
6209 			if (pmd_count > 7 &&
6210 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
6211 				return ICE_ERR_CFG;
6212 		}
6213 	}
6214 
6215 	return ICE_SUCCESS;
6216 }
6217 
6218 /**
6219  * ice_aq_set_lldp_mib - Set the LLDP MIB
6220  * @hw: pointer to the HW struct
6221  * @mib_type: Local, Remote or both Local and Remote MIBs
6222  * @buf: pointer to the caller-supplied buffer to store the MIB block
6223  * @buf_size: size of the buffer (in bytes)
6224  * @cd: pointer to command details structure or NULL
6225  *
6226  * Set the LLDP MIB. (0x0A08)
6227  */
6228 enum ice_status
6229 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6230 		    struct ice_sq_cd *cd)
6231 {
6232 	struct ice_aqc_lldp_set_local_mib *cmd;
6233 	struct ice_aq_desc desc;
6234 
6235 	cmd = &desc.params.lldp_set_mib;
6236 
6237 	if (buf_size == 0 || !buf)
6238 		return ICE_ERR_PARAM;
6239 
6240 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6241 
6242 	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6243 	desc.datalen = CPU_TO_LE16(buf_size);
6244 
6245 	cmd->type = mib_type;
6246 	cmd->length = CPU_TO_LE16(buf_size);
6247 
6248 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6249 }
6250 
6251 /**
6252  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6253  * @hw: pointer to HW struct
6254  */
6255 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6256 {
6257 	if (hw->mac_type != ICE_MAC_E810)
6258 		return false;
6259 
6260 	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
6261 		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
6262 			return true;
6263 		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
6264 		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
6265 			return true;
6266 	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
6267 		return true;
6268 	}
6269 	return false;
6270 }
6271 
6272 /**
6273  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6274  * @hw: pointer to HW struct
6275  * @vsi_num: absolute HW index for VSI
6276  * @add: boolean for if adding or removing a filter
6277  */
6278 enum ice_status
6279 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6280 {
6281 	struct ice_aqc_lldp_filter_ctrl *cmd;
6282 	struct ice_aq_desc desc;
6283 
6284 	cmd = &desc.params.lldp_filter_ctrl;
6285 
6286 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6287 
6288 	if (add)
6289 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6290 	else
6291 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6292 
6293 	cmd->vsi_num = CPU_TO_LE16(vsi_num);
6294 
6295 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6296 }
6297 
6298 /**
6299  * ice_fw_supports_report_dflt_cfg
6300  * @hw: pointer to the hardware structure
6301  *
6302  * Checks if the firmware supports report default configuration
6303  */
6304 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6305 {
6306 	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
6307 		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
6308 			return true;
6309 		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
6310 		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
6311 			return true;
6312 	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
6313 		return true;
6314 	}
6315 	return false;
6316 }
6317 
6318 /**
6319  * ice_is_fw_auto_drop_supported
6320  * @hw: pointer to the hardware structure
6321  *
6322  * Checks if the firmware supports auto drop feature
6323  */
6324 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6325 {
6326 	if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6327 	    hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6328 		return true;
6329 	return false;
6330 }
6331