xref: /freebsd/sys/dev/ice/ice_common.c (revision b17b639832e707aab0e9514cf94727498e2d67bd)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36 
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39 
40 #define ICE_PF_RESET_WAIT_COUNT	500
41 
42 static const char * const ice_link_mode_str_low[] = {
43 	ice_arr_elem_idx(0, "100BASE_TX"),
44 	ice_arr_elem_idx(1, "100M_SGMII"),
45 	ice_arr_elem_idx(2, "1000BASE_T"),
46 	ice_arr_elem_idx(3, "1000BASE_SX"),
47 	ice_arr_elem_idx(4, "1000BASE_LX"),
48 	ice_arr_elem_idx(5, "1000BASE_KX"),
49 	ice_arr_elem_idx(6, "1G_SGMII"),
50 	ice_arr_elem_idx(7, "2500BASE_T"),
51 	ice_arr_elem_idx(8, "2500BASE_X"),
52 	ice_arr_elem_idx(9, "2500BASE_KX"),
53 	ice_arr_elem_idx(10, "5GBASE_T"),
54 	ice_arr_elem_idx(11, "5GBASE_KR"),
55 	ice_arr_elem_idx(12, "10GBASE_T"),
56 	ice_arr_elem_idx(13, "10G_SFI_DA"),
57 	ice_arr_elem_idx(14, "10GBASE_SR"),
58 	ice_arr_elem_idx(15, "10GBASE_LR"),
59 	ice_arr_elem_idx(16, "10GBASE_KR_CR1"),
60 	ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"),
61 	ice_arr_elem_idx(18, "10G_SFI_C2C"),
62 	ice_arr_elem_idx(19, "25GBASE_T"),
63 	ice_arr_elem_idx(20, "25GBASE_CR"),
64 	ice_arr_elem_idx(21, "25GBASE_CR_S"),
65 	ice_arr_elem_idx(22, "25GBASE_CR1"),
66 	ice_arr_elem_idx(23, "25GBASE_SR"),
67 	ice_arr_elem_idx(24, "25GBASE_LR"),
68 	ice_arr_elem_idx(25, "25GBASE_KR"),
69 	ice_arr_elem_idx(26, "25GBASE_KR_S"),
70 	ice_arr_elem_idx(27, "25GBASE_KR1"),
71 	ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"),
72 	ice_arr_elem_idx(29, "25G_AUI_C2C"),
73 	ice_arr_elem_idx(30, "40GBASE_CR4"),
74 	ice_arr_elem_idx(31, "40GBASE_SR4"),
75 	ice_arr_elem_idx(32, "40GBASE_LR4"),
76 	ice_arr_elem_idx(33, "40GBASE_KR4"),
77 	ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"),
78 	ice_arr_elem_idx(35, "40G_XLAUI"),
79 	ice_arr_elem_idx(36, "50GBASE_CR2"),
80 	ice_arr_elem_idx(37, "50GBASE_SR2"),
81 	ice_arr_elem_idx(38, "50GBASE_LR2"),
82 	ice_arr_elem_idx(39, "50GBASE_KR2"),
83 	ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"),
84 	ice_arr_elem_idx(41, "50G_LAUI2"),
85 	ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"),
86 	ice_arr_elem_idx(43, "50G_AUI2"),
87 	ice_arr_elem_idx(44, "50GBASE_CP"),
88 	ice_arr_elem_idx(45, "50GBASE_SR"),
89 	ice_arr_elem_idx(46, "50GBASE_FR"),
90 	ice_arr_elem_idx(47, "50GBASE_LR"),
91 	ice_arr_elem_idx(48, "50GBASE_KR_PAM4"),
92 	ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"),
93 	ice_arr_elem_idx(50, "50G_AUI1"),
94 	ice_arr_elem_idx(51, "100GBASE_CR4"),
95 	ice_arr_elem_idx(52, "100GBASE_SR4"),
96 	ice_arr_elem_idx(53, "100GBASE_LR4"),
97 	ice_arr_elem_idx(54, "100GBASE_KR4"),
98 	ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"),
99 	ice_arr_elem_idx(56, "100G_CAUI4"),
100 	ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"),
101 	ice_arr_elem_idx(58, "100G_AUI4"),
102 	ice_arr_elem_idx(59, "100GBASE_CR_PAM4"),
103 	ice_arr_elem_idx(60, "100GBASE_KR_PAM4"),
104 	ice_arr_elem_idx(61, "100GBASE_CP2"),
105 	ice_arr_elem_idx(62, "100GBASE_SR2"),
106 	ice_arr_elem_idx(63, "100GBASE_DR"),
107 };
108 
109 static const char * const ice_link_mode_str_high[] = {
110 	ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"),
111 	ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"),
112 	ice_arr_elem_idx(2, "100G_CAUI2"),
113 	ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
114 	ice_arr_elem_idx(4, "100G_AUI2"),
115 };
116 
117 /**
118  * ice_dump_phy_type - helper function to dump phy_type
119  * @hw: pointer to the HW structure
120  * @low: 64 bit value for phy_type_low
121  * @high: 64 bit value for phy_type_high
122  * @prefix: prefix string to differentiate multiple dumps
123  */
124 static void
125 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
126 {
127 	u32 i;
128 
129 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
130 		  (unsigned long long)low);
131 
132 	for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
133 		if (low & BIT_ULL(i))
134 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
135 				  prefix, i, ice_link_mode_str_low[i]);
136 	}
137 
138 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
139 		  (unsigned long long)high);
140 
141 	for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
142 		if (high & BIT_ULL(i))
143 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
144 				  prefix, i, ice_link_mode_str_high[i]);
145 	}
146 }
147 
148 /**
149  * ice_set_mac_type - Sets MAC type
150  * @hw: pointer to the HW structure
151  *
152  * This function sets the MAC type of the adapter based on the
153  * vendor ID and device ID stored in the HW structure.
154  */
155 enum ice_status ice_set_mac_type(struct ice_hw *hw)
156 {
157 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
158 
159 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
160 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
161 
162 	switch (hw->device_id) {
163 	case ICE_DEV_ID_E810C_BACKPLANE:
164 	case ICE_DEV_ID_E810C_QSFP:
165 	case ICE_DEV_ID_E810C_SFP:
166 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
167 	case ICE_DEV_ID_E810_XXV_QSFP:
168 	case ICE_DEV_ID_E810_XXV_SFP:
169 		hw->mac_type = ICE_MAC_E810;
170 		break;
171 	case ICE_DEV_ID_E822C_10G_BASE_T:
172 	case ICE_DEV_ID_E822C_BACKPLANE:
173 	case ICE_DEV_ID_E822C_QSFP:
174 	case ICE_DEV_ID_E822C_SFP:
175 	case ICE_DEV_ID_E822C_SGMII:
176 	case ICE_DEV_ID_E822L_10G_BASE_T:
177 	case ICE_DEV_ID_E822L_BACKPLANE:
178 	case ICE_DEV_ID_E822L_SFP:
179 	case ICE_DEV_ID_E822L_SGMII:
180 	case ICE_DEV_ID_E823L_10G_BASE_T:
181 	case ICE_DEV_ID_E823L_1GBE:
182 	case ICE_DEV_ID_E823L_BACKPLANE:
183 	case ICE_DEV_ID_E823L_QSFP:
184 	case ICE_DEV_ID_E823L_SFP:
185 	case ICE_DEV_ID_E823C_10G_BASE_T:
186 	case ICE_DEV_ID_E823C_BACKPLANE:
187 	case ICE_DEV_ID_E823C_QSFP:
188 	case ICE_DEV_ID_E823C_SFP:
189 	case ICE_DEV_ID_E823C_SGMII:
190 		hw->mac_type = ICE_MAC_GENERIC;
191 		break;
192 	default:
193 		hw->mac_type = ICE_MAC_UNKNOWN;
194 		break;
195 	}
196 
197 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
198 	return ICE_SUCCESS;
199 }
200 
201 /**
202  * ice_is_e810
203  * @hw: pointer to the hardware structure
204  *
205  * returns true if the device is E810 based, false if not.
206  */
207 bool ice_is_e810(struct ice_hw *hw)
208 {
209 	return hw->mac_type == ICE_MAC_E810;
210 }
211 
212 /**
213  * ice_is_e810t
214  * @hw: pointer to the hardware structure
215  *
216  * returns true if the device is E810T based, false if not.
217  */
218 bool ice_is_e810t(struct ice_hw *hw)
219 {
220 	switch (hw->device_id) {
221 	case ICE_DEV_ID_E810C_SFP:
222 		switch (hw->subsystem_device_id) {
223 		case ICE_SUBDEV_ID_E810T:
224 		case ICE_SUBDEV_ID_E810T2:
225 		case ICE_SUBDEV_ID_E810T3:
226 		case ICE_SUBDEV_ID_E810T4:
227 		case ICE_SUBDEV_ID_E810T5:
228 		case ICE_SUBDEV_ID_E810T7:
229 			return true;
230 		}
231 		break;
232 	case ICE_DEV_ID_E810C_QSFP:
233 		switch (hw->subsystem_device_id) {
234 		case ICE_SUBDEV_ID_E810T2:
235 		case ICE_SUBDEV_ID_E810T5:
236 		case ICE_SUBDEV_ID_E810T6:
237 			return true;
238 		}
239 		break;
240 	default:
241 		break;
242 	}
243 
244 	return false;
245 }
246 
247 /**
248  * ice_is_e823
249  * @hw: pointer to the hardware structure
250  *
251  * returns true if the device is E823-L or E823-C based, false if not.
252  */
253 bool ice_is_e823(struct ice_hw *hw)
254 {
255 	switch (hw->device_id) {
256 	case ICE_DEV_ID_E823L_BACKPLANE:
257 	case ICE_DEV_ID_E823L_SFP:
258 	case ICE_DEV_ID_E823L_10G_BASE_T:
259 	case ICE_DEV_ID_E823L_1GBE:
260 	case ICE_DEV_ID_E823L_QSFP:
261 	case ICE_DEV_ID_E823C_BACKPLANE:
262 	case ICE_DEV_ID_E823C_QSFP:
263 	case ICE_DEV_ID_E823C_SFP:
264 	case ICE_DEV_ID_E823C_10G_BASE_T:
265 	case ICE_DEV_ID_E823C_SGMII:
266 		return true;
267 	default:
268 		return false;
269 	}
270 }
271 
272 /**
273  * ice_clear_pf_cfg - Clear PF configuration
274  * @hw: pointer to the hardware structure
275  *
276  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
277  * configuration, flow director filters, etc.).
278  */
279 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
280 {
281 	struct ice_aq_desc desc;
282 
283 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
284 
285 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
286 }
287 
288 /**
289  * ice_aq_manage_mac_read - manage MAC address read command
290  * @hw: pointer to the HW struct
291  * @buf: a virtual buffer to hold the manage MAC read response
292  * @buf_size: Size of the virtual buffer
293  * @cd: pointer to command details structure or NULL
294  *
295  * This function is used to return per PF station MAC address (0x0107).
296  * NOTE: Upon successful completion of this command, MAC address information
297  * is returned in user specified buffer. Please interpret user specified
298  * buffer as "manage_mac_read" response.
299  * Response such as various MAC addresses are stored in HW struct (port.mac)
300  * ice_discover_dev_caps is expected to be called before this function is
301  * called.
302  */
303 enum ice_status
304 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
305 		       struct ice_sq_cd *cd)
306 {
307 	struct ice_aqc_manage_mac_read_resp *resp;
308 	struct ice_aqc_manage_mac_read *cmd;
309 	struct ice_aq_desc desc;
310 	enum ice_status status;
311 	u16 flags;
312 	u8 i;
313 
314 	cmd = &desc.params.mac_read;
315 
316 	if (buf_size < sizeof(*resp))
317 		return ICE_ERR_BUF_TOO_SHORT;
318 
319 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
320 
321 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
322 	if (status)
323 		return status;
324 
325 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
326 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
327 
328 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
329 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
330 		return ICE_ERR_CFG;
331 	}
332 
333 	/* A single port can report up to two (LAN and WoL) addresses */
334 	for (i = 0; i < cmd->num_addr; i++)
335 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
336 			ice_memcpy(hw->port_info->mac.lan_addr,
337 				   resp[i].mac_addr, ETH_ALEN,
338 				   ICE_NONDMA_TO_NONDMA);
339 			ice_memcpy(hw->port_info->mac.perm_addr,
340 				   resp[i].mac_addr,
341 				   ETH_ALEN, ICE_NONDMA_TO_NONDMA);
342 			break;
343 		}
344 	return ICE_SUCCESS;
345 }
346 
347 /**
348  * ice_aq_get_phy_caps - returns PHY capabilities
349  * @pi: port information structure
350  * @qual_mods: report qualified modules
351  * @report_mode: report mode capabilities
352  * @pcaps: structure for PHY capabilities to be filled
353  * @cd: pointer to command details structure or NULL
354  *
355  * Returns the various PHY capabilities supported on the Port (0x0600)
356  */
357 enum ice_status
358 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
359 		    struct ice_aqc_get_phy_caps_data *pcaps,
360 		    struct ice_sq_cd *cd)
361 {
362 	struct ice_aqc_get_phy_caps *cmd;
363 	u16 pcaps_size = sizeof(*pcaps);
364 	struct ice_aq_desc desc;
365 	enum ice_status status;
366 	const char *prefix;
367 	struct ice_hw *hw;
368 
369 	cmd = &desc.params.get_phy;
370 
371 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
372 		return ICE_ERR_PARAM;
373 	hw = pi->hw;
374 
375 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
376 	    !ice_fw_supports_report_dflt_cfg(hw))
377 		return ICE_ERR_PARAM;
378 
379 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
380 
381 	if (qual_mods)
382 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
383 
384 	cmd->param0 |= CPU_TO_LE16(report_mode);
385 
386 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
387 
388 	ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
389 
390 	switch (report_mode) {
391 	case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
392 		prefix = "phy_caps_media";
393 		break;
394 	case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
395 		prefix = "phy_caps_no_media";
396 		break;
397 	case ICE_AQC_REPORT_ACTIVE_CFG:
398 		prefix = "phy_caps_active";
399 		break;
400 	case ICE_AQC_REPORT_DFLT_CFG:
401 		prefix = "phy_caps_default";
402 		break;
403 	default:
404 		prefix = "phy_caps_invalid";
405 	}
406 
407 	ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
408 			  LE64_TO_CPU(pcaps->phy_type_high), prefix);
409 
410 	ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
411 		  prefix, report_mode);
412 	ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
413 	ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
414 		  pcaps->low_power_ctrl_an);
415 	ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
416 		  pcaps->eee_cap);
417 	ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
418 		  pcaps->eeer_value);
419 	ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
420 		  pcaps->link_fec_options);
421 	ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
422 		  prefix, pcaps->module_compliance_enforcement);
423 	ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
424 		  prefix, pcaps->extended_compliance_code);
425 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
426 		  pcaps->module_type[0]);
427 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
428 		  pcaps->module_type[1]);
429 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
430 		  pcaps->module_type[2]);
431 
432 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
433 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
434 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
435 		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
436 			   sizeof(pi->phy.link_info.module_type),
437 			   ICE_NONDMA_TO_NONDMA);
438 	}
439 
440 	return status;
441 }
442 
443 /**
444  * ice_aq_get_netlist_node
445  * @hw: pointer to the hw struct
446  * @cmd: get_link_topo AQ structure
447  * @node_part_number: output node part number if node found
448  * @node_handle: output node handle parameter if node found
449  */
450 enum ice_status
451 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
452 			u8 *node_part_number, u16 *node_handle)
453 {
454 	struct ice_aq_desc desc;
455 
456 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
457 	desc.params.get_link_topo = *cmd;
458 
459 	if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
460 		return ICE_ERR_NOT_SUPPORTED;
461 
462 	if (node_handle)
463 		*node_handle =
464 			LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
465 	if (node_part_number)
466 		*node_part_number = desc.params.get_link_topo.node_part_num;
467 
468 	return ICE_SUCCESS;
469 }
470 
471 #define MAX_NETLIST_SIZE 10
472 /**
473  * ice_find_netlist_node
474  * @hw: pointer to the hw struct
475  * @node_type_ctx: type of netlist node to look for
476  * @node_part_number: node part number to look for
477  * @node_handle: output parameter if node found - optional
478  *
479  * Find and return the node handle for a given node type and part number in the
480  * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
481  * otherwise. If node_handle provided, it would be set to found node handle.
482  */
483 enum ice_status
484 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
485 		      u16 *node_handle)
486 {
487 	struct ice_aqc_get_link_topo cmd;
488 	u8 rec_node_part_number;
489 	u16 rec_node_handle;
490 	u8 idx;
491 
492 	for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
493 		enum ice_status status;
494 
495 		memset(&cmd, 0, sizeof(cmd));
496 
497 		cmd.addr.topo_params.node_type_ctx =
498 			(node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
499 		cmd.addr.topo_params.index = idx;
500 
501 		status = ice_aq_get_netlist_node(hw, &cmd,
502 						 &rec_node_part_number,
503 						 &rec_node_handle);
504 		if (status)
505 			return status;
506 
507 		if (rec_node_part_number == node_part_number) {
508 			if (node_handle)
509 				*node_handle = rec_node_handle;
510 			return ICE_SUCCESS;
511 		}
512 	}
513 
514 	return ICE_ERR_DOES_NOT_EXIST;
515 }
516 
517 /**
518  * ice_is_media_cage_present
519  * @pi: port information structure
520  *
521  * Returns true if media cage is present, else false. If no cage, then
522  * media type is backplane or BASE-T.
523  */
524 static bool ice_is_media_cage_present(struct ice_port_info *pi)
525 {
526 	struct ice_aqc_get_link_topo *cmd;
527 	struct ice_aq_desc desc;
528 
529 	cmd = &desc.params.get_link_topo;
530 
531 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
532 
533 	cmd->addr.topo_params.node_type_ctx =
534 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
535 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
536 
537 	/* set node type */
538 	cmd->addr.topo_params.node_type_ctx |=
539 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
540 		 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
541 
542 	/* Node type cage can be used to determine if cage is present. If AQC
543 	 * returns error (ENOENT), then no cage present. If no cage present then
544 	 * connection type is backplane or BASE-T.
545 	 */
546 	return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
547 }
548 
549 /**
550  * ice_get_media_type - Gets media type
551  * @pi: port information structure
552  */
553 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
554 {
555 	struct ice_link_status *hw_link_info;
556 
557 	if (!pi)
558 		return ICE_MEDIA_UNKNOWN;
559 
560 	hw_link_info = &pi->phy.link_info;
561 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
562 		/* If more than one media type is selected, report unknown */
563 		return ICE_MEDIA_UNKNOWN;
564 
565 	if (hw_link_info->phy_type_low) {
566 		/* 1G SGMII is a special case where some DA cable PHYs
567 		 * may show this as an option when it really shouldn't
568 		 * be since SGMII is meant to be between a MAC and a PHY
569 		 * in a backplane. Try to detect this case and handle it
570 		 */
571 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
572 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
573 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
574 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
575 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
576 			return ICE_MEDIA_DA;
577 
578 		switch (hw_link_info->phy_type_low) {
579 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
580 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
581 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
582 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
583 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
584 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
585 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
586 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
587 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
588 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
589 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
590 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
591 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
592 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
593 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
594 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
595 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
596 			return ICE_MEDIA_FIBER;
597 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
598 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
599 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
600 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
601 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
602 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
603 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
604 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
605 			return ICE_MEDIA_FIBER;
606 		case ICE_PHY_TYPE_LOW_100BASE_TX:
607 		case ICE_PHY_TYPE_LOW_1000BASE_T:
608 		case ICE_PHY_TYPE_LOW_2500BASE_T:
609 		case ICE_PHY_TYPE_LOW_5GBASE_T:
610 		case ICE_PHY_TYPE_LOW_10GBASE_T:
611 		case ICE_PHY_TYPE_LOW_25GBASE_T:
612 			return ICE_MEDIA_BASET;
613 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
614 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
615 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
616 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
617 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
618 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
619 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
620 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
621 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
622 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
623 			return ICE_MEDIA_DA;
624 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
625 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
626 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
627 		case ICE_PHY_TYPE_LOW_50G_AUI2:
628 		case ICE_PHY_TYPE_LOW_50G_AUI1:
629 		case ICE_PHY_TYPE_LOW_100G_AUI4:
630 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
631 			if (ice_is_media_cage_present(pi))
632 				return ICE_MEDIA_AUI;
633 			/* fall-through */
634 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
635 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
636 		case ICE_PHY_TYPE_LOW_2500BASE_X:
637 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
638 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
639 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
640 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
641 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
642 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
643 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
644 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
645 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
646 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
647 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
648 			return ICE_MEDIA_BACKPLANE;
649 		}
650 	} else {
651 		switch (hw_link_info->phy_type_high) {
652 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
653 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
654 			if (ice_is_media_cage_present(pi))
655 				return ICE_MEDIA_AUI;
656 			/* fall-through */
657 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
658 			return ICE_MEDIA_BACKPLANE;
659 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
660 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
661 			return ICE_MEDIA_FIBER;
662 		}
663 	}
664 	return ICE_MEDIA_UNKNOWN;
665 }
666 
667 #define ice_get_link_status_datalen(hw)	ICE_GET_LINK_STATUS_DATALEN_V1
668 
669 /**
670  * ice_aq_get_link_info
671  * @pi: port information structure
672  * @ena_lse: enable/disable LinkStatusEvent reporting
673  * @link: pointer to link status structure - optional
674  * @cd: pointer to command details structure or NULL
675  *
676  * Get Link Status (0x607). Returns the link status of the adapter.
677  */
678 enum ice_status
679 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
680 		     struct ice_link_status *link, struct ice_sq_cd *cd)
681 {
682 	struct ice_aqc_get_link_status_data link_data = { 0 };
683 	struct ice_aqc_get_link_status *resp;
684 	struct ice_link_status *li_old, *li;
685 	enum ice_media_type *hw_media_type;
686 	struct ice_fc_info *hw_fc_info;
687 	bool tx_pause, rx_pause;
688 	struct ice_aq_desc desc;
689 	enum ice_status status;
690 	struct ice_hw *hw;
691 	u16 cmd_flags;
692 
693 	if (!pi)
694 		return ICE_ERR_PARAM;
695 	hw = pi->hw;
696 
697 	li_old = &pi->phy.link_info_old;
698 	hw_media_type = &pi->phy.media_type;
699 	li = &pi->phy.link_info;
700 	hw_fc_info = &pi->fc;
701 
702 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
703 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
704 	resp = &desc.params.get_link_status;
705 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
706 	resp->lport_num = pi->lport;
707 
708 	status = ice_aq_send_cmd(hw, &desc, &link_data,
709 				 ice_get_link_status_datalen(hw), cd);
710 	if (status != ICE_SUCCESS)
711 		return status;
712 
713 	/* save off old link status information */
714 	*li_old = *li;
715 
716 	/* update current link status information */
717 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
718 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
719 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
720 	*hw_media_type = ice_get_media_type(pi);
721 	li->link_info = link_data.link_info;
722 	li->link_cfg_err = link_data.link_cfg_err;
723 	li->an_info = link_data.an_info;
724 	li->ext_info = link_data.ext_info;
725 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
726 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
727 	li->topo_media_conflict = link_data.topo_media_conflict;
728 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
729 				      ICE_AQ_CFG_PACING_TYPE_M);
730 
731 	/* update fc info */
732 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
733 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
734 	if (tx_pause && rx_pause)
735 		hw_fc_info->current_mode = ICE_FC_FULL;
736 	else if (tx_pause)
737 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
738 	else if (rx_pause)
739 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
740 	else
741 		hw_fc_info->current_mode = ICE_FC_NONE;
742 
743 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
744 
745 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
746 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
747 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
748 		  (unsigned long long)li->phy_type_low);
749 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
750 		  (unsigned long long)li->phy_type_high);
751 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
752 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
753 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
754 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
755 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
756 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
757 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
758 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
759 		  li->max_frame_size);
760 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
761 
762 	/* save link status information */
763 	if (link)
764 		*link = *li;
765 
766 	/* flag cleared so calling functions don't call AQ again */
767 	pi->phy.get_link_info = false;
768 
769 	return ICE_SUCCESS;
770 }
771 
772 /**
773  * ice_fill_tx_timer_and_fc_thresh
774  * @hw: pointer to the HW struct
775  * @cmd: pointer to MAC cfg structure
776  *
777  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
778  * descriptor
779  */
780 static void
781 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
782 				struct ice_aqc_set_mac_cfg *cmd)
783 {
784 	u16 fc_thres_val, tx_timer_val;
785 	u32 val;
786 
787 	/* We read back the transmit timer and fc threshold value of
788 	 * LFC. Thus, we will use index =
789 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
790 	 *
791 	 * Also, because we are operating on transmit timer and fc
792 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
793 	 */
794 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
795 
796 	/* Retrieve the transmit timer */
797 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
798 	tx_timer_val = val &
799 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
800 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
801 
802 	/* Retrieve the fc threshold */
803 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
804 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
805 
806 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
807 }
808 
809 /**
810  * ice_aq_set_mac_cfg
811  * @hw: pointer to the HW struct
812  * @max_frame_size: Maximum Frame Size to be supported
813  * @auto_drop: Tell HW to drop packets if TC queue is blocked
814  * @cd: pointer to command details structure or NULL
815  *
816  * Set MAC configuration (0x0603)
817  */
818 enum ice_status
819 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
820 		   struct ice_sq_cd *cd)
821 {
822 	struct ice_aqc_set_mac_cfg *cmd;
823 	struct ice_aq_desc desc;
824 
825 	cmd = &desc.params.set_mac_cfg;
826 
827 	if (max_frame_size == 0)
828 		return ICE_ERR_PARAM;
829 
830 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
831 
832 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
833 
834 	if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
835 		cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
836 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
837 
838 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
839 }
840 
841 /**
842  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
843  * @hw: pointer to the HW struct
844  */
845 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
846 {
847 	struct ice_switch_info *sw;
848 	enum ice_status status;
849 
850 	hw->switch_info = (struct ice_switch_info *)
851 			  ice_malloc(hw, sizeof(*hw->switch_info));
852 
853 	sw = hw->switch_info;
854 
855 	if (!sw)
856 		return ICE_ERR_NO_MEMORY;
857 
858 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
859 	sw->prof_res_bm_init = 0;
860 
861 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
862 	if (status) {
863 		ice_free(hw, hw->switch_info);
864 		return status;
865 	}
866 	return ICE_SUCCESS;
867 }
868 
869 /**
870  * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
871  * @hw: pointer to the HW struct
872  * @sw: pointer to switch info struct for which function clears filters
873  */
874 static void
875 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
876 {
877 	struct ice_vsi_list_map_info *v_pos_map;
878 	struct ice_vsi_list_map_info *v_tmp_map;
879 	struct ice_sw_recipe *recps;
880 	u8 i;
881 
882 	if (!sw)
883 		return;
884 
885 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
886 				 ice_vsi_list_map_info, list_entry) {
887 		LIST_DEL(&v_pos_map->list_entry);
888 		ice_free(hw, v_pos_map);
889 	}
890 	recps = sw->recp_list;
891 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
892 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
893 
894 		recps[i].root_rid = i;
895 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
896 					 &recps[i].rg_list, ice_recp_grp_entry,
897 					 l_entry) {
898 			LIST_DEL(&rg_entry->l_entry);
899 			ice_free(hw, rg_entry);
900 		}
901 
902 		if (recps[i].adv_rule) {
903 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
904 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
905 
906 			ice_destroy_lock(&recps[i].filt_rule_lock);
907 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
908 						 &recps[i].filt_rules,
909 						 ice_adv_fltr_mgmt_list_entry,
910 						 list_entry) {
911 				LIST_DEL(&lst_itr->list_entry);
912 				ice_free(hw, lst_itr->lkups);
913 				ice_free(hw, lst_itr);
914 			}
915 		} else {
916 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
917 
918 			ice_destroy_lock(&recps[i].filt_rule_lock);
919 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
920 						 &recps[i].filt_rules,
921 						 ice_fltr_mgmt_list_entry,
922 						 list_entry) {
923 				LIST_DEL(&lst_itr->list_entry);
924 				ice_free(hw, lst_itr);
925 			}
926 		}
927 		if (recps[i].root_buf)
928 			ice_free(hw, recps[i].root_buf);
929 	}
930 	ice_rm_sw_replay_rule_info(hw, sw);
931 	ice_free(hw, sw->recp_list);
932 	ice_free(hw, sw);
933 }
934 
935 /**
936  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
937  * @hw: pointer to the HW struct
938  */
939 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
940 {
941 	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
942 }
943 
944 /**
945  * ice_get_itr_intrl_gran
946  * @hw: pointer to the HW struct
947  *
948  * Determines the ITR/INTRL granularities based on the maximum aggregate
949  * bandwidth according to the device's configuration during power-on.
950  */
951 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
952 {
953 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
954 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
955 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
956 
957 	switch (max_agg_bw) {
958 	case ICE_MAX_AGG_BW_200G:
959 	case ICE_MAX_AGG_BW_100G:
960 	case ICE_MAX_AGG_BW_50G:
961 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
962 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
963 		break;
964 	case ICE_MAX_AGG_BW_25G:
965 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
966 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
967 		break;
968 	}
969 }
970 
971 /**
972  * ice_print_rollback_msg - print FW rollback message
973  * @hw: pointer to the hardware structure
974  */
975 void ice_print_rollback_msg(struct ice_hw *hw)
976 {
977 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
978 	struct ice_orom_info *orom;
979 	struct ice_nvm_info *nvm;
980 
981 	orom = &hw->flash.orom;
982 	nvm = &hw->flash.nvm;
983 
984 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
985 		 nvm->major, nvm->minor, nvm->eetrack, orom->major,
986 		 orom->build, orom->patch);
987 	ice_warn(hw,
988 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
989 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
990 }
991 
992 /**
993  * ice_set_umac_shared
994  * @hw: pointer to the hw struct
995  *
996  * Set boolean flag to allow unicast MAC sharing
997  */
998 void ice_set_umac_shared(struct ice_hw *hw)
999 {
1000 	hw->umac_shared = true;
1001 }
1002 
1003 /**
1004  * ice_init_hw - main hardware initialization routine
1005  * @hw: pointer to the hardware structure
1006  */
1007 enum ice_status ice_init_hw(struct ice_hw *hw)
1008 {
1009 	struct ice_aqc_get_phy_caps_data *pcaps;
1010 	enum ice_status status;
1011 	u16 mac_buf_len;
1012 	void *mac_buf;
1013 
1014 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1015 
1016 	/* Set MAC type based on DeviceID */
1017 	status = ice_set_mac_type(hw);
1018 	if (status)
1019 		return status;
1020 
1021 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1022 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
1023 		PF_FUNC_RID_FUNCTION_NUMBER_S;
1024 
1025 	status = ice_reset(hw, ICE_RESET_PFR);
1026 	if (status)
1027 		return status;
1028 	ice_get_itr_intrl_gran(hw);
1029 
1030 	status = ice_create_all_ctrlq(hw);
1031 	if (status)
1032 		goto err_unroll_cqinit;
1033 
1034 	ice_fwlog_set_support_ena(hw);
1035 	status = ice_fwlog_set(hw, &hw->fwlog_cfg);
1036 	if (status) {
1037 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1038 			  status);
1039 	} else {
1040 		if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1041 			status = ice_fwlog_register(hw);
1042 			if (status)
1043 				ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1044 					  status);
1045 		} else {
1046 			status = ice_fwlog_unregister(hw);
1047 			if (status)
1048 				ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1049 					  status);
1050 		}
1051 	}
1052 
1053 	status = ice_init_nvm(hw);
1054 	if (status)
1055 		goto err_unroll_cqinit;
1056 
1057 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1058 		ice_print_rollback_msg(hw);
1059 
1060 	status = ice_clear_pf_cfg(hw);
1061 	if (status)
1062 		goto err_unroll_cqinit;
1063 
1064 	ice_clear_pxe_mode(hw);
1065 
1066 	status = ice_get_caps(hw);
1067 	if (status)
1068 		goto err_unroll_cqinit;
1069 
1070 	hw->port_info = (struct ice_port_info *)
1071 			ice_malloc(hw, sizeof(*hw->port_info));
1072 	if (!hw->port_info) {
1073 		status = ICE_ERR_NO_MEMORY;
1074 		goto err_unroll_cqinit;
1075 	}
1076 
1077 	/* set the back pointer to HW */
1078 	hw->port_info->hw = hw;
1079 
1080 	/* Initialize port_info struct with switch configuration data */
1081 	status = ice_get_initial_sw_cfg(hw);
1082 	if (status)
1083 		goto err_unroll_alloc;
1084 
1085 	hw->evb_veb = true;
1086 	/* Query the allocated resources for Tx scheduler */
1087 	status = ice_sched_query_res_alloc(hw);
1088 	if (status) {
1089 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1090 		goto err_unroll_alloc;
1091 	}
1092 	ice_sched_get_psm_clk_freq(hw);
1093 
1094 	/* Initialize port_info struct with scheduler data */
1095 	status = ice_sched_init_port(hw->port_info);
1096 	if (status)
1097 		goto err_unroll_sched;
1098 	pcaps = (struct ice_aqc_get_phy_caps_data *)
1099 		ice_malloc(hw, sizeof(*pcaps));
1100 	if (!pcaps) {
1101 		status = ICE_ERR_NO_MEMORY;
1102 		goto err_unroll_sched;
1103 	}
1104 
1105 	/* Initialize port_info struct with PHY capabilities */
1106 	status = ice_aq_get_phy_caps(hw->port_info, false,
1107 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1108 	ice_free(hw, pcaps);
1109 	if (status)
1110 		ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1111 			 status);
1112 
1113 	/* Initialize port_info struct with link information */
1114 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1115 	if (status)
1116 		goto err_unroll_sched;
1117 	/* need a valid SW entry point to build a Tx tree */
1118 	if (!hw->sw_entry_point_layer) {
1119 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1120 		status = ICE_ERR_CFG;
1121 		goto err_unroll_sched;
1122 	}
1123 	INIT_LIST_HEAD(&hw->agg_list);
1124 	/* Initialize max burst size */
1125 	if (!hw->max_burst_size)
1126 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1127 	status = ice_init_fltr_mgmt_struct(hw);
1128 	if (status)
1129 		goto err_unroll_sched;
1130 
1131 	/* Get MAC information */
1132 
1133 	/* A single port can report up to two (LAN and WoL) addresses */
1134 	mac_buf = ice_calloc(hw, 2,
1135 			     sizeof(struct ice_aqc_manage_mac_read_resp));
1136 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1137 
1138 	if (!mac_buf) {
1139 		status = ICE_ERR_NO_MEMORY;
1140 		goto err_unroll_fltr_mgmt_struct;
1141 	}
1142 
1143 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1144 	ice_free(hw, mac_buf);
1145 
1146 	if (status)
1147 		goto err_unroll_fltr_mgmt_struct;
1148 
1149 	/* enable jumbo frame support at MAC level */
1150 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1151 				    NULL);
1152 	if (status)
1153 		goto err_unroll_fltr_mgmt_struct;
1154 
1155 	status = ice_init_hw_tbls(hw);
1156 	if (status)
1157 		goto err_unroll_fltr_mgmt_struct;
1158 	ice_init_lock(&hw->tnl_lock);
1159 
1160 	return ICE_SUCCESS;
1161 
1162 err_unroll_fltr_mgmt_struct:
1163 	ice_cleanup_fltr_mgmt_struct(hw);
1164 err_unroll_sched:
1165 	ice_sched_cleanup_all(hw);
1166 err_unroll_alloc:
1167 	ice_free(hw, hw->port_info);
1168 	hw->port_info = NULL;
1169 err_unroll_cqinit:
1170 	ice_destroy_all_ctrlq(hw);
1171 	return status;
1172 }
1173 
1174 /**
1175  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1176  * @hw: pointer to the hardware structure
1177  *
1178  * This should be called only during nominal operation, not as a result of
1179  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1180  * applicable initializations if it fails for any reason.
1181  */
1182 void ice_deinit_hw(struct ice_hw *hw)
1183 {
1184 	ice_cleanup_fltr_mgmt_struct(hw);
1185 
1186 	ice_sched_cleanup_all(hw);
1187 	ice_sched_clear_agg(hw);
1188 	ice_free_seg(hw);
1189 	ice_free_hw_tbls(hw);
1190 	ice_destroy_lock(&hw->tnl_lock);
1191 
1192 	if (hw->port_info) {
1193 		ice_free(hw, hw->port_info);
1194 		hw->port_info = NULL;
1195 	}
1196 
1197 	ice_destroy_all_ctrlq(hw);
1198 
1199 	/* Clear VSI contexts if not already cleared */
1200 	ice_clear_all_vsi_ctx(hw);
1201 }
1202 
1203 /**
1204  * ice_check_reset - Check to see if a global reset is complete
1205  * @hw: pointer to the hardware structure
1206  */
1207 enum ice_status ice_check_reset(struct ice_hw *hw)
1208 {
1209 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1210 
1211 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1212 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1213 	 * Add 1sec for outstanding AQ commands that can take a long time.
1214 	 */
1215 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1216 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1217 
1218 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1219 		ice_msec_delay(100, true);
1220 		reg = rd32(hw, GLGEN_RSTAT);
1221 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1222 			break;
1223 	}
1224 
1225 	if (cnt == grst_timeout) {
1226 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1227 		return ICE_ERR_RESET_FAILED;
1228 	}
1229 
1230 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1231 				 GLNVM_ULD_PCIER_DONE_1_M |\
1232 				 GLNVM_ULD_CORER_DONE_M |\
1233 				 GLNVM_ULD_GLOBR_DONE_M |\
1234 				 GLNVM_ULD_POR_DONE_M |\
1235 				 GLNVM_ULD_POR_DONE_1_M |\
1236 				 GLNVM_ULD_PCIER_DONE_2_M)
1237 
1238 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1239 					  GLNVM_ULD_PE_DONE_M : 0);
1240 
1241 	/* Device is Active; check Global Reset processes are done */
1242 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1243 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1244 		if (reg == uld_mask) {
1245 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1246 			break;
1247 		}
1248 		ice_msec_delay(10, true);
1249 	}
1250 
1251 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1252 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1253 			  reg);
1254 		return ICE_ERR_RESET_FAILED;
1255 	}
1256 
1257 	return ICE_SUCCESS;
1258 }
1259 
1260 /**
1261  * ice_pf_reset - Reset the PF
1262  * @hw: pointer to the hardware structure
1263  *
1264  * If a global reset has been triggered, this function checks
1265  * for its completion and then issues the PF reset
1266  */
1267 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1268 {
1269 	u32 cnt, reg;
1270 
1271 	/* If at function entry a global reset was already in progress, i.e.
1272 	 * state is not 'device active' or any of the reset done bits are not
1273 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1274 	 * global reset is done.
1275 	 */
1276 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1277 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1278 		/* poll on global reset currently in progress until done */
1279 		if (ice_check_reset(hw))
1280 			return ICE_ERR_RESET_FAILED;
1281 
1282 		return ICE_SUCCESS;
1283 	}
1284 
1285 	/* Reset the PF */
1286 	reg = rd32(hw, PFGEN_CTRL);
1287 
1288 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1289 
1290 	/* Wait for the PFR to complete. The wait time is the global config lock
1291 	 * timeout plus the PFR timeout which will account for a possible reset
1292 	 * that is occurring during a download package operation.
1293 	 */
1294 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1295 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1296 		reg = rd32(hw, PFGEN_CTRL);
1297 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1298 			break;
1299 
1300 		ice_msec_delay(1, true);
1301 	}
1302 
1303 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1304 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1305 		return ICE_ERR_RESET_FAILED;
1306 	}
1307 
1308 	return ICE_SUCCESS;
1309 }
1310 
1311 /**
1312  * ice_reset - Perform different types of reset
1313  * @hw: pointer to the hardware structure
1314  * @req: reset request
1315  *
1316  * This function triggers a reset as specified by the req parameter.
1317  *
1318  * Note:
1319  * If anything other than a PF reset is triggered, PXE mode is restored.
1320  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1321  * interface has been restored in the rebuild flow.
1322  */
1323 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1324 {
1325 	u32 val = 0;
1326 
1327 	switch (req) {
1328 	case ICE_RESET_PFR:
1329 		return ice_pf_reset(hw);
1330 	case ICE_RESET_CORER:
1331 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1332 		val = GLGEN_RTRIG_CORER_M;
1333 		break;
1334 	case ICE_RESET_GLOBR:
1335 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1336 		val = GLGEN_RTRIG_GLOBR_M;
1337 		break;
1338 	default:
1339 		return ICE_ERR_PARAM;
1340 	}
1341 
1342 	val |= rd32(hw, GLGEN_RTRIG);
1343 	wr32(hw, GLGEN_RTRIG, val);
1344 	ice_flush(hw);
1345 
1346 	/* wait for the FW to be ready */
1347 	return ice_check_reset(hw);
1348 }
1349 
1350 /**
1351  * ice_copy_rxq_ctx_to_hw
1352  * @hw: pointer to the hardware structure
1353  * @ice_rxq_ctx: pointer to the rxq context
1354  * @rxq_index: the index of the Rx queue
1355  *
1356  * Copies rxq context from dense structure to HW register space
1357  */
1358 static enum ice_status
1359 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1360 {
1361 	u8 i;
1362 
1363 	if (!ice_rxq_ctx)
1364 		return ICE_ERR_BAD_PTR;
1365 
1366 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1367 		return ICE_ERR_PARAM;
1368 
1369 	/* Copy each dword separately to HW */
1370 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1371 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1372 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1373 
1374 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1375 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1376 	}
1377 
1378 	return ICE_SUCCESS;
1379 }
1380 
1381 /**
1382  * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
1383  * @hw: pointer to the hardware structure
1384  * @ice_rxq_ctx: pointer to the rxq context
1385  * @rxq_index: the index of the Rx queue
1386  *
1387  * Copies rxq context from HW register space to dense structure
1388  */
1389 static enum ice_status
1390 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1391 {
1392 	u8 i;
1393 
1394 	if (!ice_rxq_ctx)
1395 		return ICE_ERR_BAD_PTR;
1396 
1397 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1398 		return ICE_ERR_PARAM;
1399 
1400 	/* Copy each dword separately from HW */
1401 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1402 		u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32)));
1403 
1404 		*ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1405 
1406 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1407 	}
1408 
1409 	return ICE_SUCCESS;
1410 }
1411 
1412 /* LAN Rx Queue Context */
1413 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1414 	/* Field		Width	LSB */
1415 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1416 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1417 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1418 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1419 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1420 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1421 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1422 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1423 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1424 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1425 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1426 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1427 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1428 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1429 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1430 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1431 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1432 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1433 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1434 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1435 	{ 0 }
1436 };
1437 
1438 /**
1439  * ice_write_rxq_ctx
1440  * @hw: pointer to the hardware structure
1441  * @rlan_ctx: pointer to the rxq context
1442  * @rxq_index: the index of the Rx queue
1443  *
1444  * Converts rxq context from sparse to dense structure and then writes
1445  * it to HW register space and enables the hardware to prefetch descriptors
1446  * instead of only fetching them on demand
1447  */
1448 enum ice_status
1449 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1450 		  u32 rxq_index)
1451 {
1452 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1453 
1454 	if (!rlan_ctx)
1455 		return ICE_ERR_BAD_PTR;
1456 
1457 	rlan_ctx->prefena = 1;
1458 
1459 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1460 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1461 }
1462 
1463 /**
1464  * ice_read_rxq_ctx - Read rxq context from HW
1465  * @hw: pointer to the hardware structure
1466  * @rlan_ctx: pointer to the rxq context
1467  * @rxq_index: the index of the Rx queue
1468  *
1469  * Read rxq context from HW register space and then converts it from dense
1470  * structure to sparse
1471  */
1472 enum ice_status
1473 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1474 		 u32 rxq_index)
1475 {
1476 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1477 	enum ice_status status;
1478 
1479 	if (!rlan_ctx)
1480 		return ICE_ERR_BAD_PTR;
1481 
1482 	status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
1483 	if (status)
1484 		return status;
1485 
1486 	return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info);
1487 }
1488 
1489 /**
1490  * ice_clear_rxq_ctx
1491  * @hw: pointer to the hardware structure
1492  * @rxq_index: the index of the Rx queue to clear
1493  *
1494  * Clears rxq context in HW register space
1495  */
1496 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1497 {
1498 	u8 i;
1499 
1500 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1501 		return ICE_ERR_PARAM;
1502 
1503 	/* Clear each dword register separately */
1504 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1505 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1506 
1507 	return ICE_SUCCESS;
1508 }
1509 
1510 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
1511  * Bit[0-175] is valid
1512  */
1513 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1514 				    /* Field			Width	LSB */
1515 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1516 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1517 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1518 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1519 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1520 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1521 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1522 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1523 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1524 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1525 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1526 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1527 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1528 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1529 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1530 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1531 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1532 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1533 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1534 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1535 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1536 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1537 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1538 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1539 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1540 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1541 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1542 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1543 	{ 0 }
1544 };
1545 
1546 /**
1547  * ice_copy_tx_cmpltnq_ctx_to_hw
1548  * @hw: pointer to the hardware structure
1549  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1550  * @tx_cmpltnq_index: the index of the completion queue
1551  *
1552  * Copies Tx completion queue context from dense structure to HW register space
1553  */
1554 static enum ice_status
1555 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1556 			      u32 tx_cmpltnq_index)
1557 {
1558 	u8 i;
1559 
1560 	if (!ice_tx_cmpltnq_ctx)
1561 		return ICE_ERR_BAD_PTR;
1562 
1563 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1564 		return ICE_ERR_PARAM;
1565 
1566 	/* Copy each dword separately to HW */
1567 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1568 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1569 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1570 
1571 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1572 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1573 	}
1574 
1575 	return ICE_SUCCESS;
1576 }
1577 
1578 /* LAN Tx Completion Queue Context */
1579 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1580 				       /* Field			Width   LSB */
1581 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1582 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1583 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1584 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1585 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1586 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1587 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1588 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1589 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1590 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1591 	{ 0 }
1592 };
1593 
1594 /**
1595  * ice_write_tx_cmpltnq_ctx
1596  * @hw: pointer to the hardware structure
1597  * @tx_cmpltnq_ctx: pointer to the completion queue context
1598  * @tx_cmpltnq_index: the index of the completion queue
1599  *
1600  * Converts completion queue context from sparse to dense structure and then
1601  * writes it to HW register space
1602  */
1603 enum ice_status
1604 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1605 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1606 			 u32 tx_cmpltnq_index)
1607 {
1608 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1609 
1610 	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1611 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1612 }
1613 
1614 /**
1615  * ice_clear_tx_cmpltnq_ctx
1616  * @hw: pointer to the hardware structure
1617  * @tx_cmpltnq_index: the index of the completion queue to clear
1618  *
1619  * Clears Tx completion queue context in HW register space
1620  */
1621 enum ice_status
1622 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1623 {
1624 	u8 i;
1625 
1626 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1627 		return ICE_ERR_PARAM;
1628 
1629 	/* Clear each dword register separately */
1630 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1631 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1632 
1633 	return ICE_SUCCESS;
1634 }
1635 
1636 /**
1637  * ice_copy_tx_drbell_q_ctx_to_hw
1638  * @hw: pointer to the hardware structure
1639  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1640  * @tx_drbell_q_index: the index of the doorbell queue
1641  *
1642  * Copies doorbell queue context from dense structure to HW register space
1643  */
1644 static enum ice_status
1645 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1646 			       u32 tx_drbell_q_index)
1647 {
1648 	u8 i;
1649 
1650 	if (!ice_tx_drbell_q_ctx)
1651 		return ICE_ERR_BAD_PTR;
1652 
1653 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1654 		return ICE_ERR_PARAM;
1655 
1656 	/* Copy each dword separately to HW */
1657 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1658 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1659 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1660 
1661 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1662 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1663 	}
1664 
1665 	return ICE_SUCCESS;
1666 }
1667 
1668 /* LAN Tx Doorbell Queue Context info */
1669 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1670 					/* Field		Width   LSB */
1671 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1672 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1673 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1674 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1675 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1676 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1677 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1678 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1679 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1680 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1681 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1682 	{ 0 }
1683 };
1684 
1685 /**
1686  * ice_write_tx_drbell_q_ctx
1687  * @hw: pointer to the hardware structure
1688  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1689  * @tx_drbell_q_index: the index of the doorbell queue
1690  *
1691  * Converts doorbell queue context from sparse to dense structure and then
1692  * writes it to HW register space
1693  */
1694 enum ice_status
1695 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1696 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1697 			  u32 tx_drbell_q_index)
1698 {
1699 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1700 
1701 	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1702 		    ice_tx_drbell_q_ctx_info);
1703 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1704 }
1705 
1706 /**
1707  * ice_clear_tx_drbell_q_ctx
1708  * @hw: pointer to the hardware structure
1709  * @tx_drbell_q_index: the index of the doorbell queue to clear
1710  *
1711  * Clears doorbell queue context in HW register space
1712  */
1713 enum ice_status
1714 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1715 {
1716 	u8 i;
1717 
1718 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1719 		return ICE_ERR_PARAM;
1720 
1721 	/* Clear each dword register separately */
1722 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1723 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1724 
1725 	return ICE_SUCCESS;
1726 }
1727 
1728 /* FW Admin Queue command wrappers */
1729 
1730 /**
1731  * ice_should_retry_sq_send_cmd
1732  * @opcode: AQ opcode
1733  *
1734  * Decide if we should retry the send command routine for the ATQ, depending
1735  * on the opcode.
1736  */
1737 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1738 {
1739 	switch (opcode) {
1740 	case ice_aqc_opc_dnl_get_status:
1741 	case ice_aqc_opc_dnl_run:
1742 	case ice_aqc_opc_dnl_call:
1743 	case ice_aqc_opc_dnl_read_sto:
1744 	case ice_aqc_opc_dnl_write_sto:
1745 	case ice_aqc_opc_dnl_set_breakpoints:
1746 	case ice_aqc_opc_dnl_read_log:
1747 	case ice_aqc_opc_get_link_topo:
1748 	case ice_aqc_opc_done_alt_write:
1749 	case ice_aqc_opc_lldp_stop:
1750 	case ice_aqc_opc_lldp_start:
1751 	case ice_aqc_opc_lldp_filter_ctrl:
1752 		return true;
1753 	}
1754 
1755 	return false;
1756 }
1757 
1758 /**
1759  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1760  * @hw: pointer to the HW struct
1761  * @cq: pointer to the specific Control queue
1762  * @desc: prefilled descriptor describing the command
1763  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1764  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1765  * @cd: pointer to command details structure
1766  *
1767  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1768  * Queue if the EBUSY AQ error is returned.
1769  */
1770 static enum ice_status
1771 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1772 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1773 		      struct ice_sq_cd *cd)
1774 {
1775 	struct ice_aq_desc desc_cpy;
1776 	enum ice_status status;
1777 	bool is_cmd_for_retry;
1778 	u8 *buf_cpy = NULL;
1779 	u8 idx = 0;
1780 	u16 opcode;
1781 
1782 	opcode = LE16_TO_CPU(desc->opcode);
1783 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1784 	ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1785 
1786 	if (is_cmd_for_retry) {
1787 		if (buf) {
1788 			buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1789 			if (!buf_cpy)
1790 				return ICE_ERR_NO_MEMORY;
1791 		}
1792 
1793 		ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1794 			   ICE_NONDMA_TO_NONDMA);
1795 	}
1796 
1797 	do {
1798 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1799 
1800 		if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1801 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1802 			break;
1803 
1804 		if (buf_cpy)
1805 			ice_memcpy(buf, buf_cpy, buf_size,
1806 				   ICE_NONDMA_TO_NONDMA);
1807 
1808 		ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1809 			   ICE_NONDMA_TO_NONDMA);
1810 
1811 		ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1812 
1813 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1814 
1815 	if (buf_cpy)
1816 		ice_free(hw, buf_cpy);
1817 
1818 	return status;
1819 }
1820 
1821 /**
1822  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1823  * @hw: pointer to the HW struct
1824  * @desc: descriptor describing the command
1825  * @buf: buffer to use for indirect commands (NULL for direct commands)
1826  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1827  * @cd: pointer to command details structure
1828  *
1829  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1830  */
1831 enum ice_status
1832 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1833 		u16 buf_size, struct ice_sq_cd *cd)
1834 {
1835 	return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1836 }
1837 
1838 /**
1839  * ice_aq_get_fw_ver
1840  * @hw: pointer to the HW struct
1841  * @cd: pointer to command details structure or NULL
1842  *
1843  * Get the firmware version (0x0001) from the admin queue commands
1844  */
1845 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1846 {
1847 	struct ice_aqc_get_ver *resp;
1848 	struct ice_aq_desc desc;
1849 	enum ice_status status;
1850 
1851 	resp = &desc.params.get_ver;
1852 
1853 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1854 
1855 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1856 
1857 	if (!status) {
1858 		hw->fw_branch = resp->fw_branch;
1859 		hw->fw_maj_ver = resp->fw_major;
1860 		hw->fw_min_ver = resp->fw_minor;
1861 		hw->fw_patch = resp->fw_patch;
1862 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1863 		hw->api_branch = resp->api_branch;
1864 		hw->api_maj_ver = resp->api_major;
1865 		hw->api_min_ver = resp->api_minor;
1866 		hw->api_patch = resp->api_patch;
1867 	}
1868 
1869 	return status;
1870 }
1871 
1872 /**
1873  * ice_aq_send_driver_ver
1874  * @hw: pointer to the HW struct
1875  * @dv: driver's major, minor version
1876  * @cd: pointer to command details structure or NULL
1877  *
1878  * Send the driver version (0x0002) to the firmware
1879  */
1880 enum ice_status
1881 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1882 		       struct ice_sq_cd *cd)
1883 {
1884 	struct ice_aqc_driver_ver *cmd;
1885 	struct ice_aq_desc desc;
1886 	u16 len;
1887 
1888 	cmd = &desc.params.driver_ver;
1889 
1890 	if (!dv)
1891 		return ICE_ERR_PARAM;
1892 
1893 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1894 
1895 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1896 	cmd->major_ver = dv->major_ver;
1897 	cmd->minor_ver = dv->minor_ver;
1898 	cmd->build_ver = dv->build_ver;
1899 	cmd->subbuild_ver = dv->subbuild_ver;
1900 
1901 	len = 0;
1902 	while (len < sizeof(dv->driver_string) &&
1903 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1904 		len++;
1905 
1906 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1907 }
1908 
1909 /**
1910  * ice_aq_q_shutdown
1911  * @hw: pointer to the HW struct
1912  * @unloading: is the driver unloading itself
1913  *
1914  * Tell the Firmware that we're shutting down the AdminQ and whether
1915  * or not the driver is unloading as well (0x0003).
1916  */
1917 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1918 {
1919 	struct ice_aqc_q_shutdown *cmd;
1920 	struct ice_aq_desc desc;
1921 
1922 	cmd = &desc.params.q_shutdown;
1923 
1924 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1925 
1926 	if (unloading)
1927 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1928 
1929 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1930 }
1931 
1932 /**
1933  * ice_aq_req_res
1934  * @hw: pointer to the HW struct
1935  * @res: resource ID
1936  * @access: access type
1937  * @sdp_number: resource number
1938  * @timeout: the maximum time in ms that the driver may hold the resource
1939  * @cd: pointer to command details structure or NULL
1940  *
1941  * Requests common resource using the admin queue commands (0x0008).
1942  * When attempting to acquire the Global Config Lock, the driver can
1943  * learn of three states:
1944  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1945  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1946  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1947  *                          successfully downloaded the package; the driver does
1948  *                          not have to download the package and can continue
1949  *                          loading
1950  *
1951  * Note that if the caller is in an acquire lock, perform action, release lock
1952  * phase of operation, it is possible that the FW may detect a timeout and issue
1953  * a CORER. In this case, the driver will receive a CORER interrupt and will
1954  * have to determine its cause. The calling thread that is handling this flow
1955  * will likely get an error propagated back to it indicating the Download
1956  * Package, Update Package or the Release Resource AQ commands timed out.
1957  */
1958 static enum ice_status
1959 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1960 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1961 	       struct ice_sq_cd *cd)
1962 {
1963 	struct ice_aqc_req_res *cmd_resp;
1964 	struct ice_aq_desc desc;
1965 	enum ice_status status;
1966 
1967 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1968 
1969 	cmd_resp = &desc.params.res_owner;
1970 
1971 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1972 
1973 	cmd_resp->res_id = CPU_TO_LE16(res);
1974 	cmd_resp->access_type = CPU_TO_LE16(access);
1975 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1976 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1977 	*timeout = 0;
1978 
1979 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1980 
1981 	/* The completion specifies the maximum time in ms that the driver
1982 	 * may hold the resource in the Timeout field.
1983 	 */
1984 
1985 	/* Global config lock response utilizes an additional status field.
1986 	 *
1987 	 * If the Global config lock resource is held by some other driver, the
1988 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1989 	 * and the timeout field indicates the maximum time the current owner
1990 	 * of the resource has to free it.
1991 	 */
1992 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1993 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1994 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1995 			return ICE_SUCCESS;
1996 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1997 			   ICE_AQ_RES_GLBL_IN_PROG) {
1998 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1999 			return ICE_ERR_AQ_ERROR;
2000 		} else if (LE16_TO_CPU(cmd_resp->status) ==
2001 			   ICE_AQ_RES_GLBL_DONE) {
2002 			return ICE_ERR_AQ_NO_WORK;
2003 		}
2004 
2005 		/* invalid FW response, force a timeout immediately */
2006 		*timeout = 0;
2007 		return ICE_ERR_AQ_ERROR;
2008 	}
2009 
2010 	/* If the resource is held by some other driver, the command completes
2011 	 * with a busy return value and the timeout field indicates the maximum
2012 	 * time the current owner of the resource has to free it.
2013 	 */
2014 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
2015 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
2016 
2017 	return status;
2018 }
2019 
2020 /**
2021  * ice_aq_release_res
2022  * @hw: pointer to the HW struct
2023  * @res: resource ID
2024  * @sdp_number: resource number
2025  * @cd: pointer to command details structure or NULL
2026  *
2027  * release common resource using the admin queue commands (0x0009)
2028  */
2029 static enum ice_status
2030 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
2031 		   struct ice_sq_cd *cd)
2032 {
2033 	struct ice_aqc_req_res *cmd;
2034 	struct ice_aq_desc desc;
2035 
2036 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2037 
2038 	cmd = &desc.params.res_owner;
2039 
2040 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
2041 
2042 	cmd->res_id = CPU_TO_LE16(res);
2043 	cmd->res_number = CPU_TO_LE32(sdp_number);
2044 
2045 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2046 }
2047 
2048 /**
2049  * ice_acquire_res
2050  * @hw: pointer to the HW structure
2051  * @res: resource ID
2052  * @access: access type (read or write)
2053  * @timeout: timeout in milliseconds
2054  *
2055  * This function will attempt to acquire the ownership of a resource.
2056  */
2057 enum ice_status
2058 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2059 		enum ice_aq_res_access_type access, u32 timeout)
2060 {
2061 #define ICE_RES_POLLING_DELAY_MS	10
2062 	u32 delay = ICE_RES_POLLING_DELAY_MS;
2063 	u32 time_left = timeout;
2064 	enum ice_status status;
2065 
2066 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2067 
2068 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2069 
2070 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2071 	 * previously acquired the resource and performed any necessary updates;
2072 	 * in this case the caller does not obtain the resource and has no
2073 	 * further work to do.
2074 	 */
2075 	if (status == ICE_ERR_AQ_NO_WORK)
2076 		goto ice_acquire_res_exit;
2077 
2078 	if (status)
2079 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2080 
2081 	/* If necessary, poll until the current lock owner timeouts */
2082 	timeout = time_left;
2083 	while (status && timeout && time_left) {
2084 		ice_msec_delay(delay, true);
2085 		timeout = (timeout > delay) ? timeout - delay : 0;
2086 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2087 
2088 		if (status == ICE_ERR_AQ_NO_WORK)
2089 			/* lock free, but no work to do */
2090 			break;
2091 
2092 		if (!status)
2093 			/* lock acquired */
2094 			break;
2095 	}
2096 	if (status && status != ICE_ERR_AQ_NO_WORK)
2097 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2098 
2099 ice_acquire_res_exit:
2100 	if (status == ICE_ERR_AQ_NO_WORK) {
2101 		if (access == ICE_RES_WRITE)
2102 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2103 		else
2104 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2105 	}
2106 	return status;
2107 }
2108 
2109 /**
2110  * ice_release_res
2111  * @hw: pointer to the HW structure
2112  * @res: resource ID
2113  *
2114  * This function will release a resource using the proper Admin Command.
2115  */
2116 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2117 {
2118 	enum ice_status status;
2119 	u32 total_delay = 0;
2120 
2121 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2122 
2123 	status = ice_aq_release_res(hw, res, 0, NULL);
2124 
2125 	/* there are some rare cases when trying to release the resource
2126 	 * results in an admin queue timeout, so handle them correctly
2127 	 */
2128 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
2129 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
2130 		ice_msec_delay(1, true);
2131 		status = ice_aq_release_res(hw, res, 0, NULL);
2132 		total_delay++;
2133 	}
2134 }
2135 
2136 /**
2137  * ice_aq_alloc_free_res - command to allocate/free resources
2138  * @hw: pointer to the HW struct
2139  * @num_entries: number of resource entries in buffer
2140  * @buf: Indirect buffer to hold data parameters and response
2141  * @buf_size: size of buffer for indirect commands
2142  * @opc: pass in the command opcode
2143  * @cd: pointer to command details structure or NULL
2144  *
2145  * Helper function to allocate/free resources using the admin queue commands
2146  */
2147 enum ice_status
2148 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2149 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2150 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2151 {
2152 	struct ice_aqc_alloc_free_res_cmd *cmd;
2153 	struct ice_aq_desc desc;
2154 
2155 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2156 
2157 	cmd = &desc.params.sw_res_ctrl;
2158 
2159 	if (!buf)
2160 		return ICE_ERR_PARAM;
2161 
2162 	if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2163 		return ICE_ERR_PARAM;
2164 
2165 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2166 
2167 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2168 
2169 	cmd->num_entries = CPU_TO_LE16(num_entries);
2170 
2171 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2172 }
2173 
2174 /**
2175  * ice_alloc_hw_res - allocate resource
2176  * @hw: pointer to the HW struct
2177  * @type: type of resource
2178  * @num: number of resources to allocate
2179  * @btm: allocate from bottom
2180  * @res: pointer to array that will receive the resources
2181  */
2182 enum ice_status
2183 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2184 {
2185 	struct ice_aqc_alloc_free_res_elem *buf;
2186 	enum ice_status status;
2187 	u16 buf_len;
2188 
2189 	buf_len = ice_struct_size(buf, elem, num);
2190 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2191 	if (!buf)
2192 		return ICE_ERR_NO_MEMORY;
2193 
2194 	/* Prepare buffer to allocate resource. */
2195 	buf->num_elems = CPU_TO_LE16(num);
2196 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2197 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2198 	if (btm)
2199 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2200 
2201 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2202 				       ice_aqc_opc_alloc_res, NULL);
2203 	if (status)
2204 		goto ice_alloc_res_exit;
2205 
2206 	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2207 		   ICE_NONDMA_TO_NONDMA);
2208 
2209 ice_alloc_res_exit:
2210 	ice_free(hw, buf);
2211 	return status;
2212 }
2213 
2214 /**
2215  * ice_free_hw_res - free allocated HW resource
2216  * @hw: pointer to the HW struct
2217  * @type: type of resource to free
2218  * @num: number of resources
2219  * @res: pointer to array that contains the resources to free
2220  */
2221 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2222 {
2223 	struct ice_aqc_alloc_free_res_elem *buf;
2224 	enum ice_status status;
2225 	u16 buf_len;
2226 
2227 	buf_len = ice_struct_size(buf, elem, num);
2228 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2229 	if (!buf)
2230 		return ICE_ERR_NO_MEMORY;
2231 
2232 	/* Prepare buffer to free resource. */
2233 	buf->num_elems = CPU_TO_LE16(num);
2234 	buf->res_type = CPU_TO_LE16(type);
2235 	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2236 		   ICE_NONDMA_TO_NONDMA);
2237 
2238 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2239 				       ice_aqc_opc_free_res, NULL);
2240 	if (status)
2241 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2242 
2243 	ice_free(hw, buf);
2244 	return status;
2245 }
2246 
2247 /**
2248  * ice_get_num_per_func - determine number of resources per PF
2249  * @hw: pointer to the HW structure
2250  * @max: value to be evenly split between each PF
2251  *
2252  * Determine the number of valid functions by going through the bitmap returned
2253  * from parsing capabilities and use this to calculate the number of resources
2254  * per PF based on the max value passed in.
2255  */
2256 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2257 {
2258 	u8 funcs;
2259 
2260 #define ICE_CAPS_VALID_FUNCS_M	0xFF
2261 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2262 			     ICE_CAPS_VALID_FUNCS_M);
2263 
2264 	if (!funcs)
2265 		return 0;
2266 
2267 	return max / funcs;
2268 }
2269 
2270 /**
2271  * ice_print_led_caps - print LED capabilities
2272  * @hw: pointer to the ice_hw instance
2273  * @caps: pointer to common caps instance
2274  * @prefix: string to prefix when printing
2275  * @dbg: set to indicate debug print
2276  */
2277 static void
2278 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2279 		   char const *prefix, bool dbg)
2280 {
2281 	u8 i;
2282 
2283 	if (dbg)
2284 		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
2285 			  caps->led_pin_num);
2286 	else
2287 		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
2288 			 caps->led_pin_num);
2289 
2290 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2291 		if (!caps->led[i])
2292 			continue;
2293 
2294 		if (dbg)
2295 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
2296 				  prefix, i, caps->led[i]);
2297 		else
2298 			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
2299 				 caps->led[i]);
2300 	}
2301 }
2302 
2303 /**
2304  * ice_print_sdp_caps - print SDP capabilities
2305  * @hw: pointer to the ice_hw instance
2306  * @caps: pointer to common caps instance
2307  * @prefix: string to prefix when printing
2308  * @dbg: set to indicate debug print
2309  */
2310 static void
2311 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2312 		   char const *prefix, bool dbg)
2313 {
2314 	u8 i;
2315 
2316 	if (dbg)
2317 		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
2318 			  caps->sdp_pin_num);
2319 	else
2320 		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
2321 			 caps->sdp_pin_num);
2322 
2323 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2324 		if (!caps->sdp[i])
2325 			continue;
2326 
2327 		if (dbg)
2328 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
2329 				  prefix, i, caps->sdp[i]);
2330 		else
2331 			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
2332 				 i, caps->sdp[i]);
2333 	}
2334 }
2335 
2336 /**
2337  * ice_parse_common_caps - parse common device/function capabilities
2338  * @hw: pointer to the HW struct
2339  * @caps: pointer to common capabilities structure
2340  * @elem: the capability element to parse
2341  * @prefix: message prefix for tracing capabilities
2342  *
2343  * Given a capability element, extract relevant details into the common
2344  * capability structure.
2345  *
2346  * Returns: true if the capability matches one of the common capability ids,
2347  * false otherwise.
2348  */
2349 static bool
2350 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2351 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
2352 {
2353 	u32 logical_id = LE32_TO_CPU(elem->logical_id);
2354 	u32 phys_id = LE32_TO_CPU(elem->phys_id);
2355 	u32 number = LE32_TO_CPU(elem->number);
2356 	u16 cap = LE16_TO_CPU(elem->cap);
2357 	bool found = true;
2358 
2359 	switch (cap) {
2360 	case ICE_AQC_CAPS_SWITCHING_MODE:
2361 		caps->switching_mode = number;
2362 		ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2363 			  caps->switching_mode);
2364 		break;
2365 	case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2366 		caps->mgmt_mode = number;
2367 		caps->mgmt_protocols_mctp = logical_id;
2368 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2369 			  caps->mgmt_mode);
2370 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2371 			  caps->mgmt_protocols_mctp);
2372 		break;
2373 	case ICE_AQC_CAPS_OS2BMC:
2374 		caps->os2bmc = number;
2375 		ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2376 		break;
2377 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
2378 		caps->valid_functions = number;
2379 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2380 			  caps->valid_functions);
2381 		break;
2382 	case ICE_AQC_CAPS_SRIOV:
2383 		caps->sr_iov_1_1 = (number == 1);
2384 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2385 			  caps->sr_iov_1_1);
2386 		break;
2387 	case ICE_AQC_CAPS_802_1QBG:
2388 		caps->evb_802_1_qbg = (number == 1);
2389 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2390 		break;
2391 	case ICE_AQC_CAPS_802_1BR:
2392 		caps->evb_802_1_qbh = (number == 1);
2393 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2394 		break;
2395 	case ICE_AQC_CAPS_DCB:
2396 		caps->dcb = (number == 1);
2397 		caps->active_tc_bitmap = logical_id;
2398 		caps->maxtc = phys_id;
2399 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2400 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2401 			  caps->active_tc_bitmap);
2402 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2403 		break;
2404 	case ICE_AQC_CAPS_ISCSI:
2405 		caps->iscsi = (number == 1);
2406 		ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2407 		break;
2408 	case ICE_AQC_CAPS_RSS:
2409 		caps->rss_table_size = number;
2410 		caps->rss_table_entry_width = logical_id;
2411 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2412 			  caps->rss_table_size);
2413 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2414 			  caps->rss_table_entry_width);
2415 		break;
2416 	case ICE_AQC_CAPS_RXQS:
2417 		caps->num_rxq = number;
2418 		caps->rxq_first_id = phys_id;
2419 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2420 			  caps->num_rxq);
2421 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2422 			  caps->rxq_first_id);
2423 		break;
2424 	case ICE_AQC_CAPS_TXQS:
2425 		caps->num_txq = number;
2426 		caps->txq_first_id = phys_id;
2427 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2428 			  caps->num_txq);
2429 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2430 			  caps->txq_first_id);
2431 		break;
2432 	case ICE_AQC_CAPS_MSIX:
2433 		caps->num_msix_vectors = number;
2434 		caps->msix_vector_first_id = phys_id;
2435 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2436 			  caps->num_msix_vectors);
2437 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2438 			  caps->msix_vector_first_id);
2439 		break;
2440 	case ICE_AQC_CAPS_NVM_MGMT:
2441 		caps->sec_rev_disabled =
2442 			(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2443 			true : false;
2444 		ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2445 			  caps->sec_rev_disabled);
2446 		caps->update_disabled =
2447 			(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2448 			true : false;
2449 		ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2450 			  caps->update_disabled);
2451 		caps->nvm_unified_update =
2452 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2453 			true : false;
2454 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2455 			  caps->nvm_unified_update);
2456 		break;
2457 	case ICE_AQC_CAPS_CEM:
2458 		caps->mgmt_cem = (number == 1);
2459 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2460 			  caps->mgmt_cem);
2461 		break;
2462 	case ICE_AQC_CAPS_IWARP:
2463 		caps->iwarp = (number == 1);
2464 		ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
2465 		break;
2466 	case ICE_AQC_CAPS_ROCEV2_LAG:
2467 		caps->roce_lag = (number == 1);
2468 		ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
2469 			  prefix, caps->roce_lag);
2470 		break;
2471 	case ICE_AQC_CAPS_LED:
2472 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2473 			caps->led[phys_id] = true;
2474 			caps->led_pin_num++;
2475 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2476 		}
2477 		break;
2478 	case ICE_AQC_CAPS_SDP:
2479 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2480 			caps->sdp[phys_id] = true;
2481 			caps->sdp_pin_num++;
2482 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2483 		}
2484 		break;
2485 	case ICE_AQC_CAPS_WR_CSR_PROT:
2486 		caps->wr_csr_prot = number;
2487 		caps->wr_csr_prot |= (u64)logical_id << 32;
2488 		ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2489 			  (unsigned long long)caps->wr_csr_prot);
2490 		break;
2491 	case ICE_AQC_CAPS_WOL_PROXY:
2492 		caps->num_wol_proxy_fltr = number;
2493 		caps->wol_proxy_vsi_seid = logical_id;
2494 		caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2495 		caps->acpi_prog_mthd = !!(phys_id &
2496 					  ICE_ACPI_PROG_MTHD_M);
2497 		caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2498 		ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2499 			  caps->num_wol_proxy_fltr);
2500 		ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2501 			  caps->wol_proxy_vsi_seid);
2502 		ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
2503 			  prefix, caps->apm_wol_support);
2504 		break;
2505 	case ICE_AQC_CAPS_MAX_MTU:
2506 		caps->max_mtu = number;
2507 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2508 			  prefix, caps->max_mtu);
2509 		break;
2510 	case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2511 		caps->pcie_reset_avoidance = (number > 0);
2512 		ice_debug(hw, ICE_DBG_INIT,
2513 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2514 			  caps->pcie_reset_avoidance);
2515 		break;
2516 	case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2517 		caps->reset_restrict_support = (number == 1);
2518 		ice_debug(hw, ICE_DBG_INIT,
2519 			  "%s: reset_restrict_support = %d\n", prefix,
2520 			  caps->reset_restrict_support);
2521 		break;
2522 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2523 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2524 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2525 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2526 	{
2527 		u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2528 
2529 		caps->ext_topo_dev_img_ver_high[index] = number;
2530 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
2531 		caps->ext_topo_dev_img_part_num[index] =
2532 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2533 			ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2534 		caps->ext_topo_dev_img_load_en[index] =
2535 			(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2536 		caps->ext_topo_dev_img_prog_en[index] =
2537 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2538 		ice_debug(hw, ICE_DBG_INIT,
2539 			  "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2540 			  prefix, index,
2541 			  caps->ext_topo_dev_img_ver_high[index]);
2542 		ice_debug(hw, ICE_DBG_INIT,
2543 			  "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2544 			  prefix, index,
2545 			  caps->ext_topo_dev_img_ver_low[index]);
2546 		ice_debug(hw, ICE_DBG_INIT,
2547 			  "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2548 			  prefix, index,
2549 			  caps->ext_topo_dev_img_part_num[index]);
2550 		ice_debug(hw, ICE_DBG_INIT,
2551 			  "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2552 			  prefix, index,
2553 			  caps->ext_topo_dev_img_load_en[index]);
2554 		ice_debug(hw, ICE_DBG_INIT,
2555 			  "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2556 			  prefix, index,
2557 			  caps->ext_topo_dev_img_prog_en[index]);
2558 		break;
2559 	}
2560 	case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2561 		caps->tx_sched_topo_comp_mode_en = (number == 1);
2562 		break;
2563 	case ICE_AQC_CAPS_DYN_FLATTENING:
2564 		caps->dyn_flattening_en = (number == 1);
2565 		ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2566 			  prefix, caps->dyn_flattening_en);
2567 		break;
2568 	default:
2569 		/* Not one of the recognized common capabilities */
2570 		found = false;
2571 	}
2572 
2573 	return found;
2574 }
2575 
2576 /**
2577  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2578  * @hw: pointer to the HW structure
2579  * @caps: pointer to capabilities structure to fix
2580  *
2581  * Re-calculate the capabilities that are dependent on the number of physical
2582  * ports; i.e. some features are not supported or function differently on
2583  * devices with more than 4 ports.
2584  */
2585 static void
2586 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2587 {
2588 	/* This assumes device capabilities are always scanned before function
2589 	 * capabilities during the initialization flow.
2590 	 */
2591 	if (hw->dev_caps.num_funcs > 4) {
2592 		/* Max 4 TCs per port */
2593 		caps->maxtc = 4;
2594 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2595 			  caps->maxtc);
2596 		if (caps->iwarp) {
2597 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2598 			caps->iwarp = 0;
2599 		}
2600 
2601 		/* print message only when processing device capabilities
2602 		 * during initialization.
2603 		 */
2604 		if (caps == &hw->dev_caps.common_cap)
2605 			ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2606 	}
2607 }
2608 
2609 /**
2610  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2611  * @hw: pointer to the HW struct
2612  * @func_p: pointer to function capabilities structure
2613  * @cap: pointer to the capability element to parse
2614  *
2615  * Extract function capabilities for ICE_AQC_CAPS_VF.
2616  */
2617 static void
2618 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2619 		       struct ice_aqc_list_caps_elem *cap)
2620 {
2621 	u32 number = LE32_TO_CPU(cap->number);
2622 	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2623 
2624 	func_p->num_allocd_vfs = number;
2625 	func_p->vf_base_id = logical_id;
2626 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2627 		  func_p->num_allocd_vfs);
2628 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2629 		  func_p->vf_base_id);
2630 }
2631 
2632 /**
2633  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2634  * @hw: pointer to the HW struct
2635  * @func_p: pointer to function capabilities structure
2636  * @cap: pointer to the capability element to parse
2637  *
2638  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2639  */
2640 static void
2641 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2642 			struct ice_aqc_list_caps_elem *cap)
2643 {
2644 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2645 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2646 		  LE32_TO_CPU(cap->number));
2647 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2648 		  func_p->guar_num_vsi);
2649 }
2650 
2651 /**
2652  * ice_parse_func_caps - Parse function capabilities
2653  * @hw: pointer to the HW struct
2654  * @func_p: pointer to function capabilities structure
2655  * @buf: buffer containing the function capability records
2656  * @cap_count: the number of capabilities
2657  *
2658  * Helper function to parse function (0x000A) capabilities list. For
2659  * capabilities shared between device and function, this relies on
2660  * ice_parse_common_caps.
2661  *
2662  * Loop through the list of provided capabilities and extract the relevant
2663  * data into the function capabilities structured.
2664  */
2665 static void
2666 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2667 		    void *buf, u32 cap_count)
2668 {
2669 	struct ice_aqc_list_caps_elem *cap_resp;
2670 	u32 i;
2671 
2672 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2673 
2674 	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2675 
2676 	for (i = 0; i < cap_count; i++) {
2677 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2678 		bool found;
2679 
2680 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2681 					      &cap_resp[i], "func caps");
2682 
2683 		switch (cap) {
2684 		case ICE_AQC_CAPS_VF:
2685 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2686 			break;
2687 		case ICE_AQC_CAPS_VSI:
2688 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2689 			break;
2690 		default:
2691 			/* Don't list common capabilities as unknown */
2692 			if (!found)
2693 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2694 					  i, cap);
2695 			break;
2696 		}
2697 	}
2698 
2699 	ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2700 	ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2701 
2702 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2703 }
2704 
2705 /**
2706  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2707  * @hw: pointer to the HW struct
2708  * @dev_p: pointer to device capabilities structure
2709  * @cap: capability element to parse
2710  *
2711  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2712  */
2713 static void
2714 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2715 			      struct ice_aqc_list_caps_elem *cap)
2716 {
2717 	u32 number = LE32_TO_CPU(cap->number);
2718 
2719 	dev_p->num_funcs = ice_hweight32(number);
2720 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2721 		  dev_p->num_funcs);
2722 
2723 }
2724 
2725 /**
2726  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2727  * @hw: pointer to the HW struct
2728  * @dev_p: pointer to device capabilities structure
2729  * @cap: capability element to parse
2730  *
2731  * Parse ICE_AQC_CAPS_VF for device capabilities.
2732  */
2733 static void
2734 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2735 		      struct ice_aqc_list_caps_elem *cap)
2736 {
2737 	u32 number = LE32_TO_CPU(cap->number);
2738 
2739 	dev_p->num_vfs_exposed = number;
2740 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2741 		  dev_p->num_vfs_exposed);
2742 }
2743 
2744 /**
2745  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2746  * @hw: pointer to the HW struct
2747  * @dev_p: pointer to device capabilities structure
2748  * @cap: capability element to parse
2749  *
2750  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2751  */
2752 static void
2753 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2754 		       struct ice_aqc_list_caps_elem *cap)
2755 {
2756 	u32 number = LE32_TO_CPU(cap->number);
2757 
2758 	dev_p->num_vsi_allocd_to_host = number;
2759 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2760 		  dev_p->num_vsi_allocd_to_host);
2761 }
2762 
2763 /**
2764  * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2765  * @hw: pointer to the HW struct
2766  * @dev_p: pointer to device capabilities structure
2767  * @cap: capability element to parse
2768  *
2769  * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2770  */
2771 static void
2772 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2773 			    struct ice_aqc_list_caps_elem *cap)
2774 {
2775 	dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2776 	dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2777 
2778 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2779 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2780 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2781 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2782 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
2783 		  dev_p->nac_topo.id);
2784 }
2785 
2786 /**
2787  * ice_parse_dev_caps - Parse device capabilities
2788  * @hw: pointer to the HW struct
2789  * @dev_p: pointer to device capabilities structure
2790  * @buf: buffer containing the device capability records
2791  * @cap_count: the number of capabilities
2792  *
2793  * Helper device to parse device (0x000B) capabilities list. For
2794  * capabilities shared between device and function, this relies on
2795  * ice_parse_common_caps.
2796  *
2797  * Loop through the list of provided capabilities and extract the relevant
2798  * data into the device capabilities structured.
2799  */
2800 static void
2801 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2802 		   void *buf, u32 cap_count)
2803 {
2804 	struct ice_aqc_list_caps_elem *cap_resp;
2805 	u32 i;
2806 
2807 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2808 
2809 	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2810 
2811 	for (i = 0; i < cap_count; i++) {
2812 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2813 		bool found;
2814 
2815 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2816 					      &cap_resp[i], "dev caps");
2817 
2818 		switch (cap) {
2819 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2820 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2821 			break;
2822 		case ICE_AQC_CAPS_VF:
2823 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2824 			break;
2825 		case ICE_AQC_CAPS_VSI:
2826 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2827 			break;
2828 		case ICE_AQC_CAPS_NAC_TOPOLOGY:
2829 			ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
2830 			break;
2831 		default:
2832 			/* Don't list common capabilities as unknown */
2833 			if (!found)
2834 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2835 					  i, cap);
2836 			break;
2837 		}
2838 	}
2839 
2840 	ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2841 	ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2842 
2843 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2844 }
2845 
2846 /**
2847  * ice_aq_list_caps - query function/device capabilities
2848  * @hw: pointer to the HW struct
2849  * @buf: a buffer to hold the capabilities
2850  * @buf_size: size of the buffer
2851  * @cap_count: if not NULL, set to the number of capabilities reported
2852  * @opc: capabilities type to discover, device or function
2853  * @cd: pointer to command details structure or NULL
2854  *
2855  * Get the function (0x000A) or device (0x000B) capabilities description from
2856  * firmware and store it in the buffer.
2857  *
2858  * If the cap_count pointer is not NULL, then it is set to the number of
2859  * capabilities firmware will report. Note that if the buffer size is too
2860  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2861  * cap_count will still be updated in this case. It is recommended that the
2862  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2863  * firmware could return) to avoid this.
2864  */
2865 static enum ice_status
2866 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2867 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2868 {
2869 	struct ice_aqc_list_caps *cmd;
2870 	struct ice_aq_desc desc;
2871 	enum ice_status status;
2872 
2873 	cmd = &desc.params.get_cap;
2874 
2875 	if (opc != ice_aqc_opc_list_func_caps &&
2876 	    opc != ice_aqc_opc_list_dev_caps)
2877 		return ICE_ERR_PARAM;
2878 
2879 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2880 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2881 
2882 	if (cap_count)
2883 		*cap_count = LE32_TO_CPU(cmd->count);
2884 
2885 	return status;
2886 }
2887 
2888 /**
2889  * ice_discover_dev_caps - Read and extract device capabilities
2890  * @hw: pointer to the hardware structure
2891  * @dev_caps: pointer to device capabilities structure
2892  *
2893  * Read the device capabilities and extract them into the dev_caps structure
2894  * for later use.
2895  */
2896 static enum ice_status
2897 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2898 {
2899 	enum ice_status status;
2900 	u32 cap_count = 0;
2901 	void *cbuf;
2902 
2903 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2904 	if (!cbuf)
2905 		return ICE_ERR_NO_MEMORY;
2906 
2907 	/* Although the driver doesn't know the number of capabilities the
2908 	 * device will return, we can simply send a 4KB buffer, the maximum
2909 	 * possible size that firmware can return.
2910 	 */
2911 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2912 
2913 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2914 				  ice_aqc_opc_list_dev_caps, NULL);
2915 	if (!status)
2916 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2917 	ice_free(hw, cbuf);
2918 
2919 	return status;
2920 }
2921 
2922 /**
2923  * ice_discover_func_caps - Read and extract function capabilities
2924  * @hw: pointer to the hardware structure
2925  * @func_caps: pointer to function capabilities structure
2926  *
2927  * Read the function capabilities and extract them into the func_caps structure
2928  * for later use.
2929  */
2930 static enum ice_status
2931 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2932 {
2933 	enum ice_status status;
2934 	u32 cap_count = 0;
2935 	void *cbuf;
2936 
2937 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2938 	if (!cbuf)
2939 		return ICE_ERR_NO_MEMORY;
2940 
2941 	/* Although the driver doesn't know the number of capabilities the
2942 	 * device will return, we can simply send a 4KB buffer, the maximum
2943 	 * possible size that firmware can return.
2944 	 */
2945 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2946 
2947 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2948 				  ice_aqc_opc_list_func_caps, NULL);
2949 	if (!status)
2950 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2951 	ice_free(hw, cbuf);
2952 
2953 	return status;
2954 }
2955 
2956 /**
2957  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2958  * @hw: pointer to the hardware structure
2959  */
2960 void ice_set_safe_mode_caps(struct ice_hw *hw)
2961 {
2962 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2963 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2964 	struct ice_hw_common_caps cached_caps;
2965 	u32 num_funcs;
2966 
2967 	/* cache some func_caps values that should be restored after memset */
2968 	cached_caps = func_caps->common_cap;
2969 
2970 	/* unset func capabilities */
2971 	memset(func_caps, 0, sizeof(*func_caps));
2972 
2973 #define ICE_RESTORE_FUNC_CAP(name) \
2974 	func_caps->common_cap.name = cached_caps.name
2975 
2976 	/* restore cached values */
2977 	ICE_RESTORE_FUNC_CAP(valid_functions);
2978 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2979 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2980 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2981 	ICE_RESTORE_FUNC_CAP(max_mtu);
2982 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2983 
2984 	/* one Tx and one Rx queue in safe mode */
2985 	func_caps->common_cap.num_rxq = 1;
2986 	func_caps->common_cap.num_txq = 1;
2987 
2988 	/* two MSIX vectors, one for traffic and one for misc causes */
2989 	func_caps->common_cap.num_msix_vectors = 2;
2990 	func_caps->guar_num_vsi = 1;
2991 
2992 	/* cache some dev_caps values that should be restored after memset */
2993 	cached_caps = dev_caps->common_cap;
2994 	num_funcs = dev_caps->num_funcs;
2995 
2996 	/* unset dev capabilities */
2997 	memset(dev_caps, 0, sizeof(*dev_caps));
2998 
2999 #define ICE_RESTORE_DEV_CAP(name) \
3000 	dev_caps->common_cap.name = cached_caps.name
3001 
3002 	/* restore cached values */
3003 	ICE_RESTORE_DEV_CAP(valid_functions);
3004 	ICE_RESTORE_DEV_CAP(txq_first_id);
3005 	ICE_RESTORE_DEV_CAP(rxq_first_id);
3006 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
3007 	ICE_RESTORE_DEV_CAP(max_mtu);
3008 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
3009 	dev_caps->num_funcs = num_funcs;
3010 
3011 	/* one Tx and one Rx queue per function in safe mode */
3012 	dev_caps->common_cap.num_rxq = num_funcs;
3013 	dev_caps->common_cap.num_txq = num_funcs;
3014 
3015 	/* two MSIX vectors per function */
3016 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
3017 }
3018 
3019 /**
3020  * ice_get_caps - get info about the HW
3021  * @hw: pointer to the hardware structure
3022  */
3023 enum ice_status ice_get_caps(struct ice_hw *hw)
3024 {
3025 	enum ice_status status;
3026 
3027 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
3028 	if (status)
3029 		return status;
3030 
3031 	return ice_discover_func_caps(hw, &hw->func_caps);
3032 }
3033 
3034 /**
3035  * ice_aq_manage_mac_write - manage MAC address write command
3036  * @hw: pointer to the HW struct
3037  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
3038  * @flags: flags to control write behavior
3039  * @cd: pointer to command details structure or NULL
3040  *
3041  * This function is used to write MAC address to the NVM (0x0108).
3042  */
3043 enum ice_status
3044 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3045 			struct ice_sq_cd *cd)
3046 {
3047 	struct ice_aqc_manage_mac_write *cmd;
3048 	struct ice_aq_desc desc;
3049 
3050 	cmd = &desc.params.mac_write;
3051 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3052 
3053 	cmd->flags = flags;
3054 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
3055 
3056 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3057 }
3058 
3059 /**
3060  * ice_aq_clear_pxe_mode
3061  * @hw: pointer to the HW struct
3062  *
3063  * Tell the firmware that the driver is taking over from PXE (0x0110).
3064  */
3065 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
3066 {
3067 	struct ice_aq_desc desc;
3068 
3069 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3070 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3071 
3072 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3073 }
3074 
3075 /**
3076  * ice_clear_pxe_mode - clear pxe operations mode
3077  * @hw: pointer to the HW struct
3078  *
3079  * Make sure all PXE mode settings are cleared, including things
3080  * like descriptor fetch/write-back mode.
3081  */
3082 void ice_clear_pxe_mode(struct ice_hw *hw)
3083 {
3084 	if (ice_check_sq_alive(hw, &hw->adminq))
3085 		ice_aq_clear_pxe_mode(hw);
3086 }
3087 
3088 /**
3089  * ice_aq_set_port_params - set physical port parameters.
3090  * @pi: pointer to the port info struct
3091  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
3092  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
3093  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
3094  * @double_vlan: if set double VLAN is enabled
3095  * @cd: pointer to command details structure or NULL
3096  *
3097  * Set Physical port parameters (0x0203)
3098  */
3099 enum ice_status
3100 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
3101 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
3102 		       struct ice_sq_cd *cd)
3103 {
3104 	struct ice_aqc_set_port_params *cmd;
3105 	struct ice_hw *hw = pi->hw;
3106 	struct ice_aq_desc desc;
3107 	u16 cmd_flags = 0;
3108 
3109 	cmd = &desc.params.set_port_params;
3110 
3111 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3112 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3113 	if (save_bad_pac)
3114 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3115 	if (pad_short_pac)
3116 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3117 	if (double_vlan)
3118 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3119 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3120 
3121 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3122 }
3123 
3124 /**
3125  * ice_is_100m_speed_supported
3126  * @hw: pointer to the HW struct
3127  *
3128  * returns true if 100M speeds are supported by the device,
3129  * false otherwise.
3130  */
3131 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3132 {
3133 	switch (hw->device_id) {
3134 	case ICE_DEV_ID_E822C_SGMII:
3135 	case ICE_DEV_ID_E822L_SGMII:
3136 	case ICE_DEV_ID_E823L_1GBE:
3137 	case ICE_DEV_ID_E823C_SGMII:
3138 		return true;
3139 	default:
3140 		return false;
3141 	}
3142 }
3143 
3144 /**
3145  * ice_get_link_speed_based_on_phy_type - returns link speed
3146  * @phy_type_low: lower part of phy_type
3147  * @phy_type_high: higher part of phy_type
3148  *
3149  * This helper function will convert an entry in PHY type structure
3150  * [phy_type_low, phy_type_high] to its corresponding link speed.
3151  * Note: In the structure of [phy_type_low, phy_type_high], there should
3152  * be one bit set, as this function will convert one PHY type to its
3153  * speed.
3154  * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3155  * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3156  */
3157 static u16
3158 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3159 {
3160 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3161 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3162 
3163 	switch (phy_type_low) {
3164 	case ICE_PHY_TYPE_LOW_100BASE_TX:
3165 	case ICE_PHY_TYPE_LOW_100M_SGMII:
3166 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3167 		break;
3168 	case ICE_PHY_TYPE_LOW_1000BASE_T:
3169 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
3170 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
3171 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
3172 	case ICE_PHY_TYPE_LOW_1G_SGMII:
3173 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3174 		break;
3175 	case ICE_PHY_TYPE_LOW_2500BASE_T:
3176 	case ICE_PHY_TYPE_LOW_2500BASE_X:
3177 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
3178 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3179 		break;
3180 	case ICE_PHY_TYPE_LOW_5GBASE_T:
3181 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
3182 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3183 		break;
3184 	case ICE_PHY_TYPE_LOW_10GBASE_T:
3185 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3186 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
3187 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
3188 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3189 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3190 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3191 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3192 		break;
3193 	case ICE_PHY_TYPE_LOW_25GBASE_T:
3194 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
3195 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3196 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3197 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
3198 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
3199 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
3200 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3201 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3202 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3203 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3204 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3205 		break;
3206 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3207 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3208 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3209 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3210 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3211 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
3212 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3213 		break;
3214 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3215 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3216 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3217 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3218 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3219 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
3220 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3221 	case ICE_PHY_TYPE_LOW_50G_AUI2:
3222 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
3223 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
3224 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
3225 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
3226 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3227 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3228 	case ICE_PHY_TYPE_LOW_50G_AUI1:
3229 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3230 		break;
3231 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3232 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3233 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3234 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3235 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3236 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
3237 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3238 	case ICE_PHY_TYPE_LOW_100G_AUI4:
3239 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3240 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3241 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3242 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3243 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
3244 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3245 		break;
3246 	default:
3247 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3248 		break;
3249 	}
3250 
3251 	switch (phy_type_high) {
3252 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3253 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3254 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3255 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3256 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
3257 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3258 		break;
3259 	default:
3260 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3261 		break;
3262 	}
3263 
3264 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3265 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3266 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3267 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3268 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3269 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3270 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3271 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3272 		return speed_phy_type_low;
3273 	else
3274 		return speed_phy_type_high;
3275 }
3276 
3277 /**
3278  * ice_update_phy_type
3279  * @phy_type_low: pointer to the lower part of phy_type
3280  * @phy_type_high: pointer to the higher part of phy_type
3281  * @link_speeds_bitmap: targeted link speeds bitmap
3282  *
3283  * Note: For the link_speeds_bitmap structure, you can check it at
3284  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3285  * link_speeds_bitmap include multiple speeds.
3286  *
3287  * Each entry in this [phy_type_low, phy_type_high] structure will
3288  * present a certain link speed. This helper function will turn on bits
3289  * in [phy_type_low, phy_type_high] structure based on the value of
3290  * link_speeds_bitmap input parameter.
3291  */
3292 void
3293 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3294 		    u16 link_speeds_bitmap)
3295 {
3296 	u64 pt_high;
3297 	u64 pt_low;
3298 	int index;
3299 	u16 speed;
3300 
3301 	/* We first check with low part of phy_type */
3302 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3303 		pt_low = BIT_ULL(index);
3304 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3305 
3306 		if (link_speeds_bitmap & speed)
3307 			*phy_type_low |= BIT_ULL(index);
3308 	}
3309 
3310 	/* We then check with high part of phy_type */
3311 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3312 		pt_high = BIT_ULL(index);
3313 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3314 
3315 		if (link_speeds_bitmap & speed)
3316 			*phy_type_high |= BIT_ULL(index);
3317 	}
3318 }
3319 
3320 /**
3321  * ice_aq_set_phy_cfg
3322  * @hw: pointer to the HW struct
3323  * @pi: port info structure of the interested logical port
3324  * @cfg: structure with PHY configuration data to be set
3325  * @cd: pointer to command details structure or NULL
3326  *
3327  * Set the various PHY configuration parameters supported on the Port.
3328  * One or more of the Set PHY config parameters may be ignored in an MFP
3329  * mode as the PF may not have the privilege to set some of the PHY Config
3330  * parameters. This status will be indicated by the command response (0x0601).
3331  */
3332 enum ice_status
3333 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3334 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3335 {
3336 	struct ice_aq_desc desc;
3337 	enum ice_status status;
3338 
3339 	if (!cfg)
3340 		return ICE_ERR_PARAM;
3341 
3342 	/* Ensure that only valid bits of cfg->caps can be turned on. */
3343 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3344 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3345 			  cfg->caps);
3346 
3347 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3348 	}
3349 
3350 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3351 	desc.params.set_phy.lport_num = pi->lport;
3352 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3353 
3354 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3355 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
3356 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3357 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
3358 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3359 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
3360 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
3361 		  cfg->low_power_ctrl_an);
3362 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
3363 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
3364 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
3365 		  cfg->link_fec_opt);
3366 
3367 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3368 
3369 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3370 		status = ICE_SUCCESS;
3371 
3372 	if (!status)
3373 		pi->phy.curr_user_phy_cfg = *cfg;
3374 
3375 	return status;
3376 }
3377 
3378 /**
3379  * ice_update_link_info - update status of the HW network link
3380  * @pi: port info structure of the interested logical port
3381  */
3382 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3383 {
3384 	struct ice_link_status *li;
3385 	enum ice_status status;
3386 
3387 	if (!pi)
3388 		return ICE_ERR_PARAM;
3389 
3390 	li = &pi->phy.link_info;
3391 
3392 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
3393 	if (status)
3394 		return status;
3395 
3396 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3397 		struct ice_aqc_get_phy_caps_data *pcaps;
3398 		struct ice_hw *hw;
3399 
3400 		hw = pi->hw;
3401 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3402 			ice_malloc(hw, sizeof(*pcaps));
3403 		if (!pcaps)
3404 			return ICE_ERR_NO_MEMORY;
3405 
3406 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3407 					     pcaps, NULL);
3408 
3409 		if (status == ICE_SUCCESS)
3410 			ice_memcpy(li->module_type, &pcaps->module_type,
3411 				   sizeof(li->module_type),
3412 				   ICE_NONDMA_TO_NONDMA);
3413 
3414 		ice_free(hw, pcaps);
3415 	}
3416 
3417 	return status;
3418 }
3419 
3420 /**
3421  * ice_cache_phy_user_req
3422  * @pi: port information structure
3423  * @cache_data: PHY logging data
3424  * @cache_mode: PHY logging mode
3425  *
3426  * Log the user request on (FC, FEC, SPEED) for later user.
3427  */
3428 static void
3429 ice_cache_phy_user_req(struct ice_port_info *pi,
3430 		       struct ice_phy_cache_mode_data cache_data,
3431 		       enum ice_phy_cache_mode cache_mode)
3432 {
3433 	if (!pi)
3434 		return;
3435 
3436 	switch (cache_mode) {
3437 	case ICE_FC_MODE:
3438 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3439 		break;
3440 	case ICE_SPEED_MODE:
3441 		pi->phy.curr_user_speed_req =
3442 			cache_data.data.curr_user_speed_req;
3443 		break;
3444 	case ICE_FEC_MODE:
3445 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3446 		break;
3447 	default:
3448 		break;
3449 	}
3450 }
3451 
3452 /**
3453  * ice_caps_to_fc_mode
3454  * @caps: PHY capabilities
3455  *
3456  * Convert PHY FC capabilities to ice FC mode
3457  */
3458 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3459 {
3460 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3461 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3462 		return ICE_FC_FULL;
3463 
3464 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3465 		return ICE_FC_TX_PAUSE;
3466 
3467 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3468 		return ICE_FC_RX_PAUSE;
3469 
3470 	return ICE_FC_NONE;
3471 }
3472 
3473 /**
3474  * ice_caps_to_fec_mode
3475  * @caps: PHY capabilities
3476  * @fec_options: Link FEC options
3477  *
3478  * Convert PHY FEC capabilities to ice FEC mode
3479  */
3480 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3481 {
3482 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
3483 		if (fec_options & ICE_AQC_PHY_FEC_DIS)
3484 			return ICE_FEC_DIS_AUTO;
3485 		else
3486 			return ICE_FEC_AUTO;
3487 	}
3488 
3489 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3490 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3491 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3492 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3493 		return ICE_FEC_BASER;
3494 
3495 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3496 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3497 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3498 		return ICE_FEC_RS;
3499 
3500 	return ICE_FEC_NONE;
3501 }
3502 
3503 /**
3504  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3505  * @pi: port information structure
3506  * @cfg: PHY configuration data to set FC mode
3507  * @req_mode: FC mode to configure
3508  */
3509 static enum ice_status
3510 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3511 	       enum ice_fc_mode req_mode)
3512 {
3513 	struct ice_phy_cache_mode_data cache_data;
3514 	u8 pause_mask = 0x0;
3515 
3516 	if (!pi || !cfg)
3517 		return ICE_ERR_BAD_PTR;
3518 	switch (req_mode) {
3519 	case ICE_FC_AUTO:
3520 	{
3521 		struct ice_aqc_get_phy_caps_data *pcaps;
3522 		enum ice_status status;
3523 
3524 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3525 			ice_malloc(pi->hw, sizeof(*pcaps));
3526 		if (!pcaps)
3527 			return ICE_ERR_NO_MEMORY;
3528 		/* Query the value of FC that both the NIC and attached media
3529 		 * can do.
3530 		 */
3531 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3532 					     pcaps, NULL);
3533 		if (status) {
3534 			ice_free(pi->hw, pcaps);
3535 			return status;
3536 		}
3537 
3538 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3539 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3540 
3541 		ice_free(pi->hw, pcaps);
3542 		break;
3543 	}
3544 	case ICE_FC_FULL:
3545 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3546 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3547 		break;
3548 	case ICE_FC_RX_PAUSE:
3549 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3550 		break;
3551 	case ICE_FC_TX_PAUSE:
3552 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3553 		break;
3554 	default:
3555 		break;
3556 	}
3557 
3558 	/* clear the old pause settings */
3559 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3560 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3561 
3562 	/* set the new capabilities */
3563 	cfg->caps |= pause_mask;
3564 
3565 	/* Cache user FC request */
3566 	cache_data.data.curr_user_fc_req = req_mode;
3567 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3568 
3569 	return ICE_SUCCESS;
3570 }
3571 
3572 /**
3573  * ice_set_fc
3574  * @pi: port information structure
3575  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3576  * @ena_auto_link_update: enable automatic link update
3577  *
3578  * Set the requested flow control mode.
3579  */
3580 enum ice_status
3581 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3582 {
3583 	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
3584 	struct ice_aqc_get_phy_caps_data *pcaps;
3585 	enum ice_status status;
3586 	struct ice_hw *hw;
3587 
3588 	if (!pi || !aq_failures)
3589 		return ICE_ERR_BAD_PTR;
3590 
3591 	*aq_failures = 0;
3592 	hw = pi->hw;
3593 
3594 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3595 		ice_malloc(hw, sizeof(*pcaps));
3596 	if (!pcaps)
3597 		return ICE_ERR_NO_MEMORY;
3598 
3599 	/* Get the current PHY config */
3600 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3601 				     pcaps, NULL);
3602 
3603 	if (status) {
3604 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3605 		goto out;
3606 	}
3607 
3608 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3609 
3610 	/* Configure the set PHY data */
3611 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3612 	if (status) {
3613 		if (status != ICE_ERR_BAD_PTR)
3614 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3615 
3616 		goto out;
3617 	}
3618 
3619 	/* If the capabilities have changed, then set the new config */
3620 	if (cfg.caps != pcaps->caps) {
3621 		int retry_count, retry_max = 10;
3622 
3623 		/* Auto restart link so settings take effect */
3624 		if (ena_auto_link_update)
3625 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3626 
3627 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3628 		if (status) {
3629 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3630 			goto out;
3631 		}
3632 
3633 		/* Update the link info
3634 		 * It sometimes takes a really long time for link to
3635 		 * come back from the atomic reset. Thus, we wait a
3636 		 * little bit.
3637 		 */
3638 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3639 			status = ice_update_link_info(pi);
3640 
3641 			if (status == ICE_SUCCESS)
3642 				break;
3643 
3644 			ice_msec_delay(100, true);
3645 		}
3646 
3647 		if (status)
3648 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3649 	}
3650 
3651 out:
3652 	ice_free(hw, pcaps);
3653 	return status;
3654 }
3655 
3656 /**
3657  * ice_phy_caps_equals_cfg
3658  * @phy_caps: PHY capabilities
3659  * @phy_cfg: PHY configuration
3660  *
3661  * Helper function to determine if PHY capabilities matches PHY
3662  * configuration
3663  */
3664 bool
3665 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3666 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3667 {
3668 	u8 caps_mask, cfg_mask;
3669 
3670 	if (!phy_caps || !phy_cfg)
3671 		return false;
3672 
3673 	/* These bits are not common between capabilities and configuration.
3674 	 * Do not use them to determine equality.
3675 	 */
3676 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3677 					      ICE_AQC_PHY_EN_MOD_QUAL);
3678 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3679 
3680 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3681 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3682 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3683 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3684 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3685 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3686 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3687 		return false;
3688 
3689 	return true;
3690 }
3691 
3692 /**
3693  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3694  * @pi: port information structure
3695  * @caps: PHY ability structure to copy data from
3696  * @cfg: PHY configuration structure to copy data to
3697  *
3698  * Helper function to copy AQC PHY get ability data to PHY set configuration
3699  * data structure
3700  */
3701 void
3702 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3703 			 struct ice_aqc_get_phy_caps_data *caps,
3704 			 struct ice_aqc_set_phy_cfg_data *cfg)
3705 {
3706 	if (!pi || !caps || !cfg)
3707 		return;
3708 
3709 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3710 	cfg->phy_type_low = caps->phy_type_low;
3711 	cfg->phy_type_high = caps->phy_type_high;
3712 	cfg->caps = caps->caps;
3713 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3714 	cfg->eee_cap = caps->eee_cap;
3715 	cfg->eeer_value = caps->eeer_value;
3716 	cfg->link_fec_opt = caps->link_fec_options;
3717 	cfg->module_compliance_enforcement =
3718 		caps->module_compliance_enforcement;
3719 }
3720 
3721 /**
3722  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3723  * @pi: port information structure
3724  * @cfg: PHY configuration data to set FEC mode
3725  * @fec: FEC mode to configure
3726  */
3727 enum ice_status
3728 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3729 		enum ice_fec_mode fec)
3730 {
3731 	struct ice_aqc_get_phy_caps_data *pcaps;
3732 	enum ice_status status = ICE_SUCCESS;
3733 	struct ice_hw *hw;
3734 
3735 	if (!pi || !cfg)
3736 		return ICE_ERR_BAD_PTR;
3737 
3738 	hw = pi->hw;
3739 
3740 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3741 		ice_malloc(hw, sizeof(*pcaps));
3742 	if (!pcaps)
3743 		return ICE_ERR_NO_MEMORY;
3744 
3745 	status = ice_aq_get_phy_caps(pi, false,
3746 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3747 				      ICE_AQC_REPORT_DFLT_CFG :
3748 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3749 
3750 	if (status)
3751 		goto out;
3752 
3753 	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3754 	cfg->link_fec_opt = pcaps->link_fec_options;
3755 
3756 	switch (fec) {
3757 	case ICE_FEC_BASER:
3758 		/* Clear RS bits, and AND BASE-R ability
3759 		 * bits and OR request bits.
3760 		 */
3761 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3762 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3763 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3764 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3765 		break;
3766 	case ICE_FEC_RS:
3767 		/* Clear BASE-R bits, and AND RS ability
3768 		 * bits and OR request bits.
3769 		 */
3770 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3771 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3772 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3773 		break;
3774 	case ICE_FEC_NONE:
3775 		/* Clear all FEC option bits. */
3776 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3777 		break;
3778 	case ICE_FEC_DIS_AUTO:
3779 		/* Set No FEC and auto FEC */
3780 		if (!ice_fw_supports_fec_dis_auto(hw))
3781 			return ICE_ERR_NOT_SUPPORTED;
3782 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
3783 		/* fall-through */
3784 	case ICE_FEC_AUTO:
3785 		/* AND auto FEC bit, and all caps bits. */
3786 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3787 		cfg->link_fec_opt |= pcaps->link_fec_options;
3788 		break;
3789 	default:
3790 		status = ICE_ERR_PARAM;
3791 		break;
3792 	}
3793 
3794 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3795 	    !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3796 		struct ice_link_default_override_tlv tlv;
3797 
3798 		if (ice_get_link_default_override(&tlv, pi))
3799 			goto out;
3800 
3801 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3802 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3803 			cfg->link_fec_opt = tlv.fec_options;
3804 	}
3805 
3806 out:
3807 	ice_free(hw, pcaps);
3808 
3809 	return status;
3810 }
3811 
3812 /**
3813  * ice_get_link_status - get status of the HW network link
3814  * @pi: port information structure
3815  * @link_up: pointer to bool (true/false = linkup/linkdown)
3816  *
3817  * Variable link_up is true if link is up, false if link is down.
3818  * The variable link_up is invalid if status is non zero. As a
3819  * result of this call, link status reporting becomes enabled
3820  */
3821 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3822 {
3823 	struct ice_phy_info *phy_info;
3824 	enum ice_status status = ICE_SUCCESS;
3825 
3826 	if (!pi || !link_up)
3827 		return ICE_ERR_PARAM;
3828 
3829 	phy_info = &pi->phy;
3830 
3831 	if (phy_info->get_link_info) {
3832 		status = ice_update_link_info(pi);
3833 
3834 		if (status)
3835 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3836 				  status);
3837 	}
3838 
3839 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3840 
3841 	return status;
3842 }
3843 
3844 /**
3845  * ice_aq_set_link_restart_an
3846  * @pi: pointer to the port information structure
3847  * @ena_link: if true: enable link, if false: disable link
3848  * @cd: pointer to command details structure or NULL
3849  *
3850  * Sets up the link and restarts the Auto-Negotiation over the link.
3851  */
3852 enum ice_status
3853 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3854 			   struct ice_sq_cd *cd)
3855 {
3856 	struct ice_aqc_restart_an *cmd;
3857 	struct ice_aq_desc desc;
3858 
3859 	cmd = &desc.params.restart_an;
3860 
3861 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3862 
3863 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3864 	cmd->lport_num = pi->lport;
3865 	if (ena_link)
3866 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3867 	else
3868 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3869 
3870 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3871 }
3872 
3873 /**
3874  * ice_aq_set_event_mask
3875  * @hw: pointer to the HW struct
3876  * @port_num: port number of the physical function
3877  * @mask: event mask to be set
3878  * @cd: pointer to command details structure or NULL
3879  *
3880  * Set event mask (0x0613)
3881  */
3882 enum ice_status
3883 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3884 		      struct ice_sq_cd *cd)
3885 {
3886 	struct ice_aqc_set_event_mask *cmd;
3887 	struct ice_aq_desc desc;
3888 
3889 	cmd = &desc.params.set_event_mask;
3890 
3891 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3892 
3893 	cmd->lport_num = port_num;
3894 
3895 	cmd->event_mask = CPU_TO_LE16(mask);
3896 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3897 }
3898 
3899 /**
3900  * ice_aq_set_mac_loopback
3901  * @hw: pointer to the HW struct
3902  * @ena_lpbk: Enable or Disable loopback
3903  * @cd: pointer to command details structure or NULL
3904  *
3905  * Enable/disable loopback on a given port
3906  */
3907 enum ice_status
3908 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3909 {
3910 	struct ice_aqc_set_mac_lb *cmd;
3911 	struct ice_aq_desc desc;
3912 
3913 	cmd = &desc.params.set_mac_lb;
3914 
3915 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3916 	if (ena_lpbk)
3917 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3918 
3919 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3920 }
3921 
3922 /**
3923  * ice_aq_set_port_id_led
3924  * @pi: pointer to the port information
3925  * @is_orig_mode: is this LED set to original mode (by the net-list)
3926  * @cd: pointer to command details structure or NULL
3927  *
3928  * Set LED value for the given port (0x06e9)
3929  */
3930 enum ice_status
3931 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3932 		       struct ice_sq_cd *cd)
3933 {
3934 	struct ice_aqc_set_port_id_led *cmd;
3935 	struct ice_hw *hw = pi->hw;
3936 	struct ice_aq_desc desc;
3937 
3938 	cmd = &desc.params.set_port_id_led;
3939 
3940 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3941 
3942 	if (is_orig_mode)
3943 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3944 	else
3945 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3946 
3947 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3948 }
3949 
3950 /**
3951  * ice_aq_sff_eeprom
3952  * @hw: pointer to the HW struct
3953  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3954  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3955  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3956  * @page: QSFP page
3957  * @set_page: set or ignore the page
3958  * @data: pointer to data buffer to be read/written to the I2C device.
3959  * @length: 1-16 for read, 1 for write.
3960  * @write: 0 read, 1 for write.
3961  * @cd: pointer to command details structure or NULL
3962  *
3963  * Read/Write SFF EEPROM (0x06EE)
3964  */
3965 enum ice_status
3966 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3967 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3968 		  bool write, struct ice_sq_cd *cd)
3969 {
3970 	struct ice_aqc_sff_eeprom *cmd;
3971 	struct ice_aq_desc desc;
3972 	enum ice_status status;
3973 
3974 	if (!data || (mem_addr & 0xff00))
3975 		return ICE_ERR_PARAM;
3976 
3977 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3978 	cmd = &desc.params.read_write_sff_param;
3979 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3980 	cmd->lport_num = (u8)(lport & 0xff);
3981 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3982 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3983 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3984 					((set_page <<
3985 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3986 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3987 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3988 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3989 	if (write)
3990 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3991 
3992 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3993 	return status;
3994 }
3995 
3996 /**
3997  * ice_aq_prog_topo_dev_nvm
3998  * @hw: pointer to the hardware structure
3999  * @topo_params: pointer to structure storing topology parameters for a device
4000  * @cd: pointer to command details structure or NULL
4001  *
4002  * Program Topology Device NVM (0x06F2)
4003  *
4004  */
4005 enum ice_status
4006 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
4007 			 struct ice_aqc_link_topo_params *topo_params,
4008 			 struct ice_sq_cd *cd)
4009 {
4010 	struct ice_aqc_prog_topo_dev_nvm *cmd;
4011 	struct ice_aq_desc desc;
4012 
4013 	cmd = &desc.params.prog_topo_dev_nvm;
4014 
4015 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
4016 
4017 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4018 		   ICE_NONDMA_TO_NONDMA);
4019 
4020 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4021 }
4022 
4023 /**
4024  * ice_aq_read_topo_dev_nvm
4025  * @hw: pointer to the hardware structure
4026  * @topo_params: pointer to structure storing topology parameters for a device
4027  * @start_address: byte offset in the topology device NVM
4028  * @data: pointer to data buffer
4029  * @data_size: number of bytes to be read from the topology device NVM
4030  * @cd: pointer to command details structure or NULL
4031  * Read Topology Device NVM (0x06F3)
4032  *
4033  */
4034 enum ice_status
4035 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
4036 			 struct ice_aqc_link_topo_params *topo_params,
4037 			 u32 start_address, u8 *data, u8 data_size,
4038 			 struct ice_sq_cd *cd)
4039 {
4040 	struct ice_aqc_read_topo_dev_nvm *cmd;
4041 	struct ice_aq_desc desc;
4042 	enum ice_status status;
4043 
4044 	if (!data || data_size == 0 ||
4045 	    data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
4046 		return ICE_ERR_PARAM;
4047 
4048 	cmd = &desc.params.read_topo_dev_nvm;
4049 
4050 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
4051 
4052 	desc.datalen = CPU_TO_LE16(data_size);
4053 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4054 		   ICE_NONDMA_TO_NONDMA);
4055 	cmd->start_address = CPU_TO_LE32(start_address);
4056 
4057 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4058 	if (status)
4059 		return status;
4060 
4061 	ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4062 
4063 	return ICE_SUCCESS;
4064 }
4065 
4066 /**
4067  * __ice_aq_get_set_rss_lut
4068  * @hw: pointer to the hardware structure
4069  * @params: RSS LUT parameters
4070  * @set: set true to set the table, false to get the table
4071  *
4072  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4073  */
4074 static enum ice_status
4075 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4076 {
4077 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
4078 	struct ice_aqc_get_set_rss_lut *cmd_resp;
4079 	struct ice_aq_desc desc;
4080 	enum ice_status status;
4081 	u8 *lut;
4082 
4083 	if (!params)
4084 		return ICE_ERR_PARAM;
4085 
4086 	vsi_handle = params->vsi_handle;
4087 	lut = params->lut;
4088 
4089 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
4090 		return ICE_ERR_PARAM;
4091 
4092 	lut_size = params->lut_size;
4093 	lut_type = params->lut_type;
4094 	glob_lut_idx = params->global_lut_id;
4095 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4096 
4097 	cmd_resp = &desc.params.get_set_rss_lut;
4098 
4099 	if (set) {
4100 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
4101 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4102 	} else {
4103 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
4104 	}
4105 
4106 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4107 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
4108 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4109 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4110 
4111 	switch (lut_type) {
4112 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
4113 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
4114 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
4115 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4116 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
4117 		break;
4118 	default:
4119 		status = ICE_ERR_PARAM;
4120 		goto ice_aq_get_set_rss_lut_exit;
4121 	}
4122 
4123 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
4124 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4125 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4126 
4127 		if (!set)
4128 			goto ice_aq_get_set_rss_lut_send;
4129 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4130 		if (!set)
4131 			goto ice_aq_get_set_rss_lut_send;
4132 	} else {
4133 		goto ice_aq_get_set_rss_lut_send;
4134 	}
4135 
4136 	/* LUT size is only valid for Global and PF table types */
4137 	switch (lut_size) {
4138 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
4139 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
4140 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4141 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4142 		break;
4143 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
4144 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
4145 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4146 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4147 		break;
4148 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
4149 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4150 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
4151 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4152 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4153 			break;
4154 		}
4155 		/* fall-through */
4156 	default:
4157 		status = ICE_ERR_PARAM;
4158 		goto ice_aq_get_set_rss_lut_exit;
4159 	}
4160 
4161 ice_aq_get_set_rss_lut_send:
4162 	cmd_resp->flags = CPU_TO_LE16(flags);
4163 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4164 
4165 ice_aq_get_set_rss_lut_exit:
4166 	return status;
4167 }
4168 
4169 /**
4170  * ice_aq_get_rss_lut
4171  * @hw: pointer to the hardware structure
4172  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4173  *
4174  * get the RSS lookup table, PF or VSI type
4175  */
4176 enum ice_status
4177 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4178 {
4179 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
4180 }
4181 
4182 /**
4183  * ice_aq_set_rss_lut
4184  * @hw: pointer to the hardware structure
4185  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4186  *
4187  * set the RSS lookup table, PF or VSI type
4188  */
4189 enum ice_status
4190 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4191 {
4192 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
4193 }
4194 
4195 /**
4196  * __ice_aq_get_set_rss_key
4197  * @hw: pointer to the HW struct
4198  * @vsi_id: VSI FW index
4199  * @key: pointer to key info struct
4200  * @set: set true to set the key, false to get the key
4201  *
4202  * get (0x0B04) or set (0x0B02) the RSS key per VSI
4203  */
4204 static enum
4205 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4206 				    struct ice_aqc_get_set_rss_keys *key,
4207 				    bool set)
4208 {
4209 	struct ice_aqc_get_set_rss_key *cmd_resp;
4210 	u16 key_size = sizeof(*key);
4211 	struct ice_aq_desc desc;
4212 
4213 	cmd_resp = &desc.params.get_set_rss_key;
4214 
4215 	if (set) {
4216 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4217 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4218 	} else {
4219 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4220 	}
4221 
4222 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4223 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4224 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4225 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4226 
4227 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4228 }
4229 
4230 /**
4231  * ice_aq_get_rss_key
4232  * @hw: pointer to the HW struct
4233  * @vsi_handle: software VSI handle
4234  * @key: pointer to key info struct
4235  *
4236  * get the RSS key per VSI
4237  */
4238 enum ice_status
4239 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4240 		   struct ice_aqc_get_set_rss_keys *key)
4241 {
4242 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4243 		return ICE_ERR_PARAM;
4244 
4245 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4246 					key, false);
4247 }
4248 
4249 /**
4250  * ice_aq_set_rss_key
4251  * @hw: pointer to the HW struct
4252  * @vsi_handle: software VSI handle
4253  * @keys: pointer to key info struct
4254  *
4255  * set the RSS key per VSI
4256  */
4257 enum ice_status
4258 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4259 		   struct ice_aqc_get_set_rss_keys *keys)
4260 {
4261 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4262 		return ICE_ERR_PARAM;
4263 
4264 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4265 					keys, true);
4266 }
4267 
4268 /**
4269  * ice_aq_add_lan_txq
4270  * @hw: pointer to the hardware structure
4271  * @num_qgrps: Number of added queue groups
4272  * @qg_list: list of queue groups to be added
4273  * @buf_size: size of buffer for indirect command
4274  * @cd: pointer to command details structure or NULL
4275  *
4276  * Add Tx LAN queue (0x0C30)
4277  *
4278  * NOTE:
4279  * Prior to calling add Tx LAN queue:
4280  * Initialize the following as part of the Tx queue context:
4281  * Completion queue ID if the queue uses Completion queue, Quanta profile,
4282  * Cache profile and Packet shaper profile.
4283  *
4284  * After add Tx LAN queue AQ command is completed:
4285  * Interrupts should be associated with specific queues,
4286  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4287  * flow.
4288  */
4289 enum ice_status
4290 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4291 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4292 		   struct ice_sq_cd *cd)
4293 {
4294 	struct ice_aqc_add_tx_qgrp *list;
4295 	struct ice_aqc_add_txqs *cmd;
4296 	struct ice_aq_desc desc;
4297 	u16 i, sum_size = 0;
4298 
4299 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4300 
4301 	cmd = &desc.params.add_txqs;
4302 
4303 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4304 
4305 	if (!qg_list)
4306 		return ICE_ERR_PARAM;
4307 
4308 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4309 		return ICE_ERR_PARAM;
4310 
4311 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
4312 		sum_size += ice_struct_size(list, txqs, list->num_txqs);
4313 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4314 						      list->num_txqs);
4315 	}
4316 
4317 	if (buf_size != sum_size)
4318 		return ICE_ERR_PARAM;
4319 
4320 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4321 
4322 	cmd->num_qgrps = num_qgrps;
4323 
4324 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4325 }
4326 
4327 /**
4328  * ice_aq_dis_lan_txq
4329  * @hw: pointer to the hardware structure
4330  * @num_qgrps: number of groups in the list
4331  * @qg_list: the list of groups to disable
4332  * @buf_size: the total size of the qg_list buffer in bytes
4333  * @rst_src: if called due to reset, specifies the reset source
4334  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4335  * @cd: pointer to command details structure or NULL
4336  *
4337  * Disable LAN Tx queue (0x0C31)
4338  */
4339 static enum ice_status
4340 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4341 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4342 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
4343 		   struct ice_sq_cd *cd)
4344 {
4345 	struct ice_aqc_dis_txq_item *item;
4346 	struct ice_aqc_dis_txqs *cmd;
4347 	struct ice_aq_desc desc;
4348 	enum ice_status status;
4349 	u16 i, sz = 0;
4350 
4351 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4352 	cmd = &desc.params.dis_txqs;
4353 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4354 
4355 	/* qg_list can be NULL only in VM/VF reset flow */
4356 	if (!qg_list && !rst_src)
4357 		return ICE_ERR_PARAM;
4358 
4359 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4360 		return ICE_ERR_PARAM;
4361 
4362 	cmd->num_entries = num_qgrps;
4363 
4364 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4365 					    ICE_AQC_Q_DIS_TIMEOUT_M);
4366 
4367 	switch (rst_src) {
4368 	case ICE_VM_RESET:
4369 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4370 		cmd->vmvf_and_timeout |=
4371 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4372 		break;
4373 	case ICE_VF_RESET:
4374 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4375 		/* In this case, FW expects vmvf_num to be absolute VF ID */
4376 		cmd->vmvf_and_timeout |=
4377 			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4378 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
4379 		break;
4380 	case ICE_NO_RESET:
4381 	default:
4382 		break;
4383 	}
4384 
4385 	/* flush pipe on time out */
4386 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4387 	/* If no queue group info, we are in a reset flow. Issue the AQ */
4388 	if (!qg_list)
4389 		goto do_aq;
4390 
4391 	/* set RD bit to indicate that command buffer is provided by the driver
4392 	 * and it needs to be read by the firmware
4393 	 */
4394 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4395 
4396 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
4397 		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4398 
4399 		/* If the num of queues is even, add 2 bytes of padding */
4400 		if ((item->num_qs % 2) == 0)
4401 			item_size += 2;
4402 
4403 		sz += item_size;
4404 
4405 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4406 	}
4407 
4408 	if (buf_size != sz)
4409 		return ICE_ERR_PARAM;
4410 
4411 do_aq:
4412 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4413 	if (status) {
4414 		if (!qg_list)
4415 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4416 				  vmvf_num, hw->adminq.sq_last_status);
4417 		else
4418 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4419 				  LE16_TO_CPU(qg_list[0].q_id[0]),
4420 				  hw->adminq.sq_last_status);
4421 	}
4422 	return status;
4423 }
4424 
4425 /**
4426  * ice_aq_move_recfg_lan_txq
4427  * @hw: pointer to the hardware structure
4428  * @num_qs: number of queues to move/reconfigure
4429  * @is_move: true if this operation involves node movement
4430  * @is_tc_change: true if this operation involves a TC change
4431  * @subseq_call: true if this operation is a subsequent call
4432  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4433  * @timeout: timeout in units of 100 usec (valid values 0-50)
4434  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4435  * @buf: struct containing src/dest TEID and per-queue info
4436  * @buf_size: size of buffer for indirect command
4437  * @txqs_moved: out param, number of queues successfully moved
4438  * @cd: pointer to command details structure or NULL
4439  *
4440  * Move / Reconfigure Tx LAN queues (0x0C32)
4441  */
4442 enum ice_status
4443 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4444 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
4445 			  u8 timeout, u32 *blocked_cgds,
4446 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4447 			  u8 *txqs_moved, struct ice_sq_cd *cd)
4448 {
4449 	struct ice_aqc_move_txqs *cmd;
4450 	struct ice_aq_desc desc;
4451 	enum ice_status status;
4452 
4453 	cmd = &desc.params.move_txqs;
4454 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4455 
4456 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4457 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4458 		return ICE_ERR_PARAM;
4459 
4460 	if (is_tc_change && !flush_pipe && !blocked_cgds)
4461 		return ICE_ERR_PARAM;
4462 
4463 	if (!is_move && !is_tc_change)
4464 		return ICE_ERR_PARAM;
4465 
4466 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4467 
4468 	if (is_move)
4469 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4470 
4471 	if (is_tc_change)
4472 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4473 
4474 	if (subseq_call)
4475 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4476 
4477 	if (flush_pipe)
4478 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4479 
4480 	cmd->num_qs = num_qs;
4481 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4482 			ICE_AQC_Q_CMD_TIMEOUT_M);
4483 
4484 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4485 
4486 	if (!status && txqs_moved)
4487 		*txqs_moved = cmd->num_qs;
4488 
4489 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4490 	    is_tc_change && !flush_pipe)
4491 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4492 
4493 	return status;
4494 }
4495 
4496 /**
4497  * ice_aq_add_rdma_qsets
4498  * @hw: pointer to the hardware structure
4499  * @num_qset_grps: Number of RDMA Qset groups
4500  * @qset_list: list of qset groups to be added
4501  * @buf_size: size of buffer for indirect command
4502  * @cd: pointer to command details structure or NULL
4503  *
4504  * Add Tx RDMA Qsets (0x0C33)
4505  */
4506 enum ice_status
4507 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4508 		      struct ice_aqc_add_rdma_qset_data *qset_list,
4509 		      u16 buf_size, struct ice_sq_cd *cd)
4510 {
4511 	struct ice_aqc_add_rdma_qset_data *list;
4512 	struct ice_aqc_add_rdma_qset *cmd;
4513 	struct ice_aq_desc desc;
4514 	u16 i, sum_size = 0;
4515 
4516 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4517 
4518 	cmd = &desc.params.add_rdma_qset;
4519 
4520 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4521 
4522 	if (!qset_list)
4523 		return ICE_ERR_PARAM;
4524 
4525 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4526 		return ICE_ERR_PARAM;
4527 
4528 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4529 		u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4530 
4531 		sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4532 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4533 							     num_qsets);
4534 	}
4535 
4536 	if (buf_size != sum_size)
4537 		return ICE_ERR_PARAM;
4538 
4539 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4540 
4541 	cmd->num_qset_grps = num_qset_grps;
4542 
4543 	return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4544 }
4545 
4546 /* End of FW Admin Queue command wrappers */
4547 
4548 /**
4549  * ice_write_byte - write a byte to a packed context structure
4550  * @src_ctx:  the context structure to read from
4551  * @dest_ctx: the context to be written to
4552  * @ce_info:  a description of the struct to be filled
4553  */
4554 static void
4555 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4556 {
4557 	u8 src_byte, dest_byte, mask;
4558 	u8 *from, *dest;
4559 	u16 shift_width;
4560 
4561 	/* copy from the next struct field */
4562 	from = src_ctx + ce_info->offset;
4563 
4564 	/* prepare the bits and mask */
4565 	shift_width = ce_info->lsb % 8;
4566 	mask = (u8)(BIT(ce_info->width) - 1);
4567 
4568 	src_byte = *from;
4569 	src_byte &= mask;
4570 
4571 	/* shift to correct alignment */
4572 	mask <<= shift_width;
4573 	src_byte <<= shift_width;
4574 
4575 	/* get the current bits from the target bit string */
4576 	dest = dest_ctx + (ce_info->lsb / 8);
4577 
4578 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4579 
4580 	dest_byte &= ~mask;	/* get the bits not changing */
4581 	dest_byte |= src_byte;	/* add in the new bits */
4582 
4583 	/* put it all back */
4584 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4585 }
4586 
4587 /**
4588  * ice_write_word - write a word to a packed context structure
4589  * @src_ctx:  the context structure to read from
4590  * @dest_ctx: the context to be written to
4591  * @ce_info:  a description of the struct to be filled
4592  */
4593 static void
4594 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4595 {
4596 	u16 src_word, mask;
4597 	__le16 dest_word;
4598 	u8 *from, *dest;
4599 	u16 shift_width;
4600 
4601 	/* copy from the next struct field */
4602 	from = src_ctx + ce_info->offset;
4603 
4604 	/* prepare the bits and mask */
4605 	shift_width = ce_info->lsb % 8;
4606 	mask = BIT(ce_info->width) - 1;
4607 
4608 	/* don't swizzle the bits until after the mask because the mask bits
4609 	 * will be in a different bit position on big endian machines
4610 	 */
4611 	src_word = *(u16 *)from;
4612 	src_word &= mask;
4613 
4614 	/* shift to correct alignment */
4615 	mask <<= shift_width;
4616 	src_word <<= shift_width;
4617 
4618 	/* get the current bits from the target bit string */
4619 	dest = dest_ctx + (ce_info->lsb / 8);
4620 
4621 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4622 
4623 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
4624 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
4625 
4626 	/* put it all back */
4627 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4628 }
4629 
4630 /**
4631  * ice_write_dword - write a dword to a packed context structure
4632  * @src_ctx:  the context structure to read from
4633  * @dest_ctx: the context to be written to
4634  * @ce_info:  a description of the struct to be filled
4635  */
4636 static void
4637 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4638 {
4639 	u32 src_dword, mask;
4640 	__le32 dest_dword;
4641 	u8 *from, *dest;
4642 	u16 shift_width;
4643 
4644 	/* copy from the next struct field */
4645 	from = src_ctx + ce_info->offset;
4646 
4647 	/* prepare the bits and mask */
4648 	shift_width = ce_info->lsb % 8;
4649 
4650 	/* if the field width is exactly 32 on an x86 machine, then the shift
4651 	 * operation will not work because the SHL instructions count is masked
4652 	 * to 5 bits so the shift will do nothing
4653 	 */
4654 	if (ce_info->width < 32)
4655 		mask = BIT(ce_info->width) - 1;
4656 	else
4657 		mask = (u32)~0;
4658 
4659 	/* don't swizzle the bits until after the mask because the mask bits
4660 	 * will be in a different bit position on big endian machines
4661 	 */
4662 	src_dword = *(u32 *)from;
4663 	src_dword &= mask;
4664 
4665 	/* shift to correct alignment */
4666 	mask <<= shift_width;
4667 	src_dword <<= shift_width;
4668 
4669 	/* get the current bits from the target bit string */
4670 	dest = dest_ctx + (ce_info->lsb / 8);
4671 
4672 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4673 
4674 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
4675 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
4676 
4677 	/* put it all back */
4678 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4679 }
4680 
4681 /**
4682  * ice_write_qword - write a qword to a packed context structure
4683  * @src_ctx:  the context structure to read from
4684  * @dest_ctx: the context to be written to
4685  * @ce_info:  a description of the struct to be filled
4686  */
4687 static void
4688 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4689 {
4690 	u64 src_qword, mask;
4691 	__le64 dest_qword;
4692 	u8 *from, *dest;
4693 	u16 shift_width;
4694 
4695 	/* copy from the next struct field */
4696 	from = src_ctx + ce_info->offset;
4697 
4698 	/* prepare the bits and mask */
4699 	shift_width = ce_info->lsb % 8;
4700 
4701 	/* if the field width is exactly 64 on an x86 machine, then the shift
4702 	 * operation will not work because the SHL instructions count is masked
4703 	 * to 6 bits so the shift will do nothing
4704 	 */
4705 	if (ce_info->width < 64)
4706 		mask = BIT_ULL(ce_info->width) - 1;
4707 	else
4708 		mask = (u64)~0;
4709 
4710 	/* don't swizzle the bits until after the mask because the mask bits
4711 	 * will be in a different bit position on big endian machines
4712 	 */
4713 	src_qword = *(u64 *)from;
4714 	src_qword &= mask;
4715 
4716 	/* shift to correct alignment */
4717 	mask <<= shift_width;
4718 	src_qword <<= shift_width;
4719 
4720 	/* get the current bits from the target bit string */
4721 	dest = dest_ctx + (ce_info->lsb / 8);
4722 
4723 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4724 
4725 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4726 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4727 
4728 	/* put it all back */
4729 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4730 }
4731 
4732 /**
4733  * ice_set_ctx - set context bits in packed structure
4734  * @hw: pointer to the hardware structure
4735  * @src_ctx:  pointer to a generic non-packed context structure
4736  * @dest_ctx: pointer to memory for the packed structure
4737  * @ce_info:  a description of the structure to be transformed
4738  */
4739 enum ice_status
4740 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4741 	    const struct ice_ctx_ele *ce_info)
4742 {
4743 	int f;
4744 
4745 	for (f = 0; ce_info[f].width; f++) {
4746 		/* We have to deal with each element of the FW response
4747 		 * using the correct size so that we are correct regardless
4748 		 * of the endianness of the machine.
4749 		 */
4750 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4751 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4752 				  f, ce_info[f].width, ce_info[f].size_of);
4753 			continue;
4754 		}
4755 		switch (ce_info[f].size_of) {
4756 		case sizeof(u8):
4757 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4758 			break;
4759 		case sizeof(u16):
4760 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4761 			break;
4762 		case sizeof(u32):
4763 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4764 			break;
4765 		case sizeof(u64):
4766 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4767 			break;
4768 		default:
4769 			return ICE_ERR_INVAL_SIZE;
4770 		}
4771 	}
4772 
4773 	return ICE_SUCCESS;
4774 }
4775 
4776 /**
4777  * ice_aq_get_internal_data
4778  * @hw: pointer to the hardware structure
4779  * @cluster_id: specific cluster to dump
4780  * @table_id: table ID within cluster
4781  * @start: index of line in the block to read
4782  * @buf: dump buffer
4783  * @buf_size: dump buffer size
4784  * @ret_buf_size: return buffer size (returned by FW)
4785  * @ret_next_table: next block to read (returned by FW)
4786  * @ret_next_index: next index to read (returned by FW)
4787  * @cd: pointer to command details structure
4788  *
4789  * Get internal FW/HW data (0xFF08) for debug purposes.
4790  */
4791 enum ice_status
4792 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4793 			 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4794 			 u16 *ret_next_table, u32 *ret_next_index,
4795 			 struct ice_sq_cd *cd)
4796 {
4797 	struct ice_aqc_debug_dump_internals *cmd;
4798 	struct ice_aq_desc desc;
4799 	enum ice_status status;
4800 
4801 	cmd = &desc.params.debug_dump;
4802 
4803 	if (buf_size == 0 || !buf)
4804 		return ICE_ERR_PARAM;
4805 
4806 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4807 
4808 	cmd->cluster_id = cluster_id;
4809 	cmd->table_id = CPU_TO_LE16(table_id);
4810 	cmd->idx = CPU_TO_LE32(start);
4811 
4812 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4813 
4814 	if (!status) {
4815 		if (ret_buf_size)
4816 			*ret_buf_size = LE16_TO_CPU(desc.datalen);
4817 		if (ret_next_table)
4818 			*ret_next_table = LE16_TO_CPU(cmd->table_id);
4819 		if (ret_next_index)
4820 			*ret_next_index = LE32_TO_CPU(cmd->idx);
4821 	}
4822 
4823 	return status;
4824 }
4825 
4826 /**
4827  * ice_read_byte - read context byte into struct
4828  * @src_ctx:  the context structure to read from
4829  * @dest_ctx: the context to be written to
4830  * @ce_info:  a description of the struct to be filled
4831  */
4832 static void
4833 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4834 {
4835 	u8 dest_byte, mask;
4836 	u8 *src, *target;
4837 	u16 shift_width;
4838 
4839 	/* prepare the bits and mask */
4840 	shift_width = ce_info->lsb % 8;
4841 	mask = (u8)(BIT(ce_info->width) - 1);
4842 
4843 	/* shift to correct alignment */
4844 	mask <<= shift_width;
4845 
4846 	/* get the current bits from the src bit string */
4847 	src = src_ctx + (ce_info->lsb / 8);
4848 
4849 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4850 
4851 	dest_byte &= mask;
4852 
4853 	dest_byte >>= shift_width;
4854 
4855 	/* get the address from the struct field */
4856 	target = dest_ctx + ce_info->offset;
4857 
4858 	/* put it back in the struct */
4859 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4860 }
4861 
4862 /**
4863  * ice_read_word - read context word into struct
4864  * @src_ctx:  the context structure to read from
4865  * @dest_ctx: the context to be written to
4866  * @ce_info:  a description of the struct to be filled
4867  */
4868 static void
4869 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4870 {
4871 	u16 dest_word, mask;
4872 	u8 *src, *target;
4873 	__le16 src_word;
4874 	u16 shift_width;
4875 
4876 	/* prepare the bits and mask */
4877 	shift_width = ce_info->lsb % 8;
4878 	mask = BIT(ce_info->width) - 1;
4879 
4880 	/* shift to correct alignment */
4881 	mask <<= shift_width;
4882 
4883 	/* get the current bits from the src bit string */
4884 	src = src_ctx + (ce_info->lsb / 8);
4885 
4886 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4887 
4888 	/* the data in the memory is stored as little endian so mask it
4889 	 * correctly
4890 	 */
4891 	src_word &= CPU_TO_LE16(mask);
4892 
4893 	/* get the data back into host order before shifting */
4894 	dest_word = LE16_TO_CPU(src_word);
4895 
4896 	dest_word >>= shift_width;
4897 
4898 	/* get the address from the struct field */
4899 	target = dest_ctx + ce_info->offset;
4900 
4901 	/* put it back in the struct */
4902 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4903 }
4904 
4905 /**
4906  * ice_read_dword - read context dword into struct
4907  * @src_ctx:  the context structure to read from
4908  * @dest_ctx: the context to be written to
4909  * @ce_info:  a description of the struct to be filled
4910  */
4911 static void
4912 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4913 {
4914 	u32 dest_dword, mask;
4915 	__le32 src_dword;
4916 	u8 *src, *target;
4917 	u16 shift_width;
4918 
4919 	/* prepare the bits and mask */
4920 	shift_width = ce_info->lsb % 8;
4921 
4922 	/* if the field width is exactly 32 on an x86 machine, then the shift
4923 	 * operation will not work because the SHL instructions count is masked
4924 	 * to 5 bits so the shift will do nothing
4925 	 */
4926 	if (ce_info->width < 32)
4927 		mask = BIT(ce_info->width) - 1;
4928 	else
4929 		mask = (u32)~0;
4930 
4931 	/* shift to correct alignment */
4932 	mask <<= shift_width;
4933 
4934 	/* get the current bits from the src bit string */
4935 	src = src_ctx + (ce_info->lsb / 8);
4936 
4937 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4938 
4939 	/* the data in the memory is stored as little endian so mask it
4940 	 * correctly
4941 	 */
4942 	src_dword &= CPU_TO_LE32(mask);
4943 
4944 	/* get the data back into host order before shifting */
4945 	dest_dword = LE32_TO_CPU(src_dword);
4946 
4947 	dest_dword >>= shift_width;
4948 
4949 	/* get the address from the struct field */
4950 	target = dest_ctx + ce_info->offset;
4951 
4952 	/* put it back in the struct */
4953 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4954 }
4955 
4956 /**
4957  * ice_read_qword - read context qword into struct
4958  * @src_ctx:  the context structure to read from
4959  * @dest_ctx: the context to be written to
4960  * @ce_info:  a description of the struct to be filled
4961  */
4962 static void
4963 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4964 {
4965 	u64 dest_qword, mask;
4966 	__le64 src_qword;
4967 	u8 *src, *target;
4968 	u16 shift_width;
4969 
4970 	/* prepare the bits and mask */
4971 	shift_width = ce_info->lsb % 8;
4972 
4973 	/* if the field width is exactly 64 on an x86 machine, then the shift
4974 	 * operation will not work because the SHL instructions count is masked
4975 	 * to 6 bits so the shift will do nothing
4976 	 */
4977 	if (ce_info->width < 64)
4978 		mask = BIT_ULL(ce_info->width) - 1;
4979 	else
4980 		mask = (u64)~0;
4981 
4982 	/* shift to correct alignment */
4983 	mask <<= shift_width;
4984 
4985 	/* get the current bits from the src bit string */
4986 	src = src_ctx + (ce_info->lsb / 8);
4987 
4988 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4989 
4990 	/* the data in the memory is stored as little endian so mask it
4991 	 * correctly
4992 	 */
4993 	src_qword &= CPU_TO_LE64(mask);
4994 
4995 	/* get the data back into host order before shifting */
4996 	dest_qword = LE64_TO_CPU(src_qword);
4997 
4998 	dest_qword >>= shift_width;
4999 
5000 	/* get the address from the struct field */
5001 	target = dest_ctx + ce_info->offset;
5002 
5003 	/* put it back in the struct */
5004 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
5005 }
5006 
5007 /**
5008  * ice_get_ctx - extract context bits from a packed structure
5009  * @src_ctx:  pointer to a generic packed context structure
5010  * @dest_ctx: pointer to a generic non-packed context structure
5011  * @ce_info:  a description of the structure to be read from
5012  */
5013 enum ice_status
5014 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5015 {
5016 	int f;
5017 
5018 	for (f = 0; ce_info[f].width; f++) {
5019 		switch (ce_info[f].size_of) {
5020 		case 1:
5021 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
5022 			break;
5023 		case 2:
5024 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
5025 			break;
5026 		case 4:
5027 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
5028 			break;
5029 		case 8:
5030 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
5031 			break;
5032 		default:
5033 			/* nothing to do, just keep going */
5034 			break;
5035 		}
5036 	}
5037 
5038 	return ICE_SUCCESS;
5039 }
5040 
5041 /**
5042  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
5043  * @hw: pointer to the HW struct
5044  * @vsi_handle: software VSI handle
5045  * @tc: TC number
5046  * @q_handle: software queue handle
5047  */
5048 struct ice_q_ctx *
5049 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
5050 {
5051 	struct ice_vsi_ctx *vsi;
5052 	struct ice_q_ctx *q_ctx;
5053 
5054 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
5055 	if (!vsi)
5056 		return NULL;
5057 	if (q_handle >= vsi->num_lan_q_entries[tc])
5058 		return NULL;
5059 	if (!vsi->lan_q_ctx[tc])
5060 		return NULL;
5061 	q_ctx = vsi->lan_q_ctx[tc];
5062 	return &q_ctx[q_handle];
5063 }
5064 
5065 /**
5066  * ice_ena_vsi_txq
5067  * @pi: port information structure
5068  * @vsi_handle: software VSI handle
5069  * @tc: TC number
5070  * @q_handle: software queue handle
5071  * @num_qgrps: Number of added queue groups
5072  * @buf: list of queue groups to be added
5073  * @buf_size: size of buffer for indirect command
5074  * @cd: pointer to command details structure or NULL
5075  *
5076  * This function adds one LAN queue
5077  */
5078 enum ice_status
5079 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
5080 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
5081 		struct ice_sq_cd *cd)
5082 {
5083 	struct ice_aqc_txsched_elem_data node = { 0 };
5084 	struct ice_sched_node *parent;
5085 	struct ice_q_ctx *q_ctx;
5086 	enum ice_status status;
5087 	struct ice_hw *hw;
5088 
5089 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5090 		return ICE_ERR_CFG;
5091 
5092 	if (num_qgrps > 1 || buf->num_txqs > 1)
5093 		return ICE_ERR_MAX_LIMIT;
5094 
5095 	hw = pi->hw;
5096 
5097 	if (!ice_is_vsi_valid(hw, vsi_handle))
5098 		return ICE_ERR_PARAM;
5099 
5100 	ice_acquire_lock(&pi->sched_lock);
5101 
5102 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5103 	if (!q_ctx) {
5104 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5105 			  q_handle);
5106 		status = ICE_ERR_PARAM;
5107 		goto ena_txq_exit;
5108 	}
5109 
5110 	/* find a parent node */
5111 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5112 					    ICE_SCHED_NODE_OWNER_LAN);
5113 	if (!parent) {
5114 		status = ICE_ERR_PARAM;
5115 		goto ena_txq_exit;
5116 	}
5117 
5118 	buf->parent_teid = parent->info.node_teid;
5119 	node.parent_teid = parent->info.node_teid;
5120 	/* Mark that the values in the "generic" section as valid. The default
5121 	 * value in the "generic" section is zero. This means that :
5122 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5123 	 * - 0 priority among siblings, indicated by Bit 1-3.
5124 	 * - WFQ, indicated by Bit 4.
5125 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5126 	 * Bit 5-6.
5127 	 * - Bit 7 is reserved.
5128 	 * Without setting the generic section as valid in valid_sections, the
5129 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
5130 	 */
5131 	buf->txqs[0].info.valid_sections =
5132 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5133 		ICE_AQC_ELEM_VALID_EIR;
5134 	buf->txqs[0].info.generic = 0;
5135 	buf->txqs[0].info.cir_bw.bw_profile_idx =
5136 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5137 	buf->txqs[0].info.cir_bw.bw_alloc =
5138 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5139 	buf->txqs[0].info.eir_bw.bw_profile_idx =
5140 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5141 	buf->txqs[0].info.eir_bw.bw_alloc =
5142 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5143 
5144 	/* add the LAN queue */
5145 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5146 	if (status != ICE_SUCCESS) {
5147 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5148 			  LE16_TO_CPU(buf->txqs[0].txq_id),
5149 			  hw->adminq.sq_last_status);
5150 		goto ena_txq_exit;
5151 	}
5152 
5153 	node.node_teid = buf->txqs[0].q_teid;
5154 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5155 	q_ctx->q_handle = q_handle;
5156 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5157 
5158 	/* add a leaf node into scheduler tree queue layer */
5159 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
5160 	if (!status)
5161 		status = ice_sched_replay_q_bw(pi, q_ctx);
5162 
5163 ena_txq_exit:
5164 	ice_release_lock(&pi->sched_lock);
5165 	return status;
5166 }
5167 
5168 /**
5169  * ice_dis_vsi_txq
5170  * @pi: port information structure
5171  * @vsi_handle: software VSI handle
5172  * @tc: TC number
5173  * @num_queues: number of queues
5174  * @q_handles: pointer to software queue handle array
5175  * @q_ids: pointer to the q_id array
5176  * @q_teids: pointer to queue node teids
5177  * @rst_src: if called due to reset, specifies the reset source
5178  * @vmvf_num: the relative VM or VF number that is undergoing the reset
5179  * @cd: pointer to command details structure or NULL
5180  *
5181  * This function removes queues and their corresponding nodes in SW DB
5182  */
5183 enum ice_status
5184 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5185 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
5186 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
5187 		struct ice_sq_cd *cd)
5188 {
5189 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5190 	struct ice_aqc_dis_txq_item *qg_list;
5191 	struct ice_q_ctx *q_ctx;
5192 	struct ice_hw *hw;
5193 	u16 i, buf_size;
5194 
5195 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5196 		return ICE_ERR_CFG;
5197 
5198 	hw = pi->hw;
5199 
5200 	if (!num_queues) {
5201 		/* if queue is disabled already yet the disable queue command
5202 		 * has to be sent to complete the VF reset, then call
5203 		 * ice_aq_dis_lan_txq without any queue information
5204 		 */
5205 		if (rst_src)
5206 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5207 						  vmvf_num, NULL);
5208 		return ICE_ERR_CFG;
5209 	}
5210 
5211 	buf_size = ice_struct_size(qg_list, q_id, 1);
5212 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5213 	if (!qg_list)
5214 		return ICE_ERR_NO_MEMORY;
5215 
5216 	ice_acquire_lock(&pi->sched_lock);
5217 
5218 	for (i = 0; i < num_queues; i++) {
5219 		struct ice_sched_node *node;
5220 
5221 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5222 		if (!node)
5223 			continue;
5224 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5225 		if (!q_ctx) {
5226 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5227 				  q_handles[i]);
5228 			continue;
5229 		}
5230 		if (q_ctx->q_handle != q_handles[i]) {
5231 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5232 				  q_ctx->q_handle, q_handles[i]);
5233 			continue;
5234 		}
5235 		qg_list->parent_teid = node->info.parent_teid;
5236 		qg_list->num_qs = 1;
5237 		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5238 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5239 					    vmvf_num, cd);
5240 
5241 		if (status != ICE_SUCCESS)
5242 			break;
5243 		ice_free_sched_node(pi, node);
5244 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5245 	}
5246 	ice_release_lock(&pi->sched_lock);
5247 	ice_free(hw, qg_list);
5248 	return status;
5249 }
5250 
5251 /**
5252  * ice_cfg_vsi_qs - configure the new/existing VSI queues
5253  * @pi: port information structure
5254  * @vsi_handle: software VSI handle
5255  * @tc_bitmap: TC bitmap
5256  * @maxqs: max queues array per TC
5257  * @owner: LAN or RDMA
5258  *
5259  * This function adds/updates the VSI queues per TC.
5260  */
5261 static enum ice_status
5262 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5263 	       u16 *maxqs, u8 owner)
5264 {
5265 	enum ice_status status = ICE_SUCCESS;
5266 	u8 i;
5267 
5268 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5269 		return ICE_ERR_CFG;
5270 
5271 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5272 		return ICE_ERR_PARAM;
5273 
5274 	ice_acquire_lock(&pi->sched_lock);
5275 
5276 	ice_for_each_traffic_class(i) {
5277 		/* configuration is possible only if TC node is present */
5278 		if (!ice_sched_get_tc_node(pi, i))
5279 			continue;
5280 
5281 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5282 					   ice_is_tc_ena(tc_bitmap, i));
5283 		if (status)
5284 			break;
5285 	}
5286 
5287 	ice_release_lock(&pi->sched_lock);
5288 	return status;
5289 }
5290 
5291 /**
5292  * ice_cfg_vsi_lan - configure VSI LAN queues
5293  * @pi: port information structure
5294  * @vsi_handle: software VSI handle
5295  * @tc_bitmap: TC bitmap
5296  * @max_lanqs: max LAN queues array per TC
5297  *
5298  * This function adds/updates the VSI LAN queues per TC.
5299  */
5300 enum ice_status
5301 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5302 		u16 *max_lanqs)
5303 {
5304 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5305 			      ICE_SCHED_NODE_OWNER_LAN);
5306 }
5307 
5308 /**
5309  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5310  * @pi: port information structure
5311  * @vsi_handle: software VSI handle
5312  * @tc_bitmap: TC bitmap
5313  * @max_rdmaqs: max RDMA queues array per TC
5314  *
5315  * This function adds/updates the VSI RDMA queues per TC.
5316  */
5317 enum ice_status
5318 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5319 		 u16 *max_rdmaqs)
5320 {
5321 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5322 			      ICE_SCHED_NODE_OWNER_RDMA);
5323 }
5324 
5325 /**
5326  * ice_ena_vsi_rdma_qset
5327  * @pi: port information structure
5328  * @vsi_handle: software VSI handle
5329  * @tc: TC number
5330  * @rdma_qset: pointer to RDMA qset
5331  * @num_qsets: number of RDMA qsets
5332  * @qset_teid: pointer to qset node teids
5333  *
5334  * This function adds RDMA qset
5335  */
5336 enum ice_status
5337 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5338 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5339 {
5340 	struct ice_aqc_txsched_elem_data node = { 0 };
5341 	struct ice_aqc_add_rdma_qset_data *buf;
5342 	struct ice_sched_node *parent;
5343 	enum ice_status status;
5344 	struct ice_hw *hw;
5345 	u16 i, buf_size;
5346 
5347 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5348 		return ICE_ERR_CFG;
5349 	hw = pi->hw;
5350 
5351 	if (!ice_is_vsi_valid(hw, vsi_handle))
5352 		return ICE_ERR_PARAM;
5353 
5354 	buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5355 	buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5356 	if (!buf)
5357 		return ICE_ERR_NO_MEMORY;
5358 	ice_acquire_lock(&pi->sched_lock);
5359 
5360 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5361 					    ICE_SCHED_NODE_OWNER_RDMA);
5362 	if (!parent) {
5363 		status = ICE_ERR_PARAM;
5364 		goto rdma_error_exit;
5365 	}
5366 	buf->parent_teid = parent->info.node_teid;
5367 	node.parent_teid = parent->info.node_teid;
5368 
5369 	buf->num_qsets = CPU_TO_LE16(num_qsets);
5370 	for (i = 0; i < num_qsets; i++) {
5371 		buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5372 		buf->rdma_qsets[i].info.valid_sections =
5373 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5374 			ICE_AQC_ELEM_VALID_EIR;
5375 		buf->rdma_qsets[i].info.generic = 0;
5376 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5377 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5378 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5379 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5380 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5381 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5382 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5383 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5384 	}
5385 	status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5386 	if (status != ICE_SUCCESS) {
5387 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5388 		goto rdma_error_exit;
5389 	}
5390 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5391 	for (i = 0; i < num_qsets; i++) {
5392 		node.node_teid = buf->rdma_qsets[i].qset_teid;
5393 		status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5394 					    &node);
5395 		if (status)
5396 			break;
5397 		qset_teid[i] = LE32_TO_CPU(node.node_teid);
5398 	}
5399 rdma_error_exit:
5400 	ice_release_lock(&pi->sched_lock);
5401 	ice_free(hw, buf);
5402 	return status;
5403 }
5404 
5405 /**
5406  * ice_dis_vsi_rdma_qset - free RDMA resources
5407  * @pi: port_info struct
5408  * @count: number of RDMA qsets to free
5409  * @qset_teid: TEID of qset node
5410  * @q_id: list of queue IDs being disabled
5411  */
5412 enum ice_status
5413 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5414 		      u16 *q_id)
5415 {
5416 	struct ice_aqc_dis_txq_item *qg_list;
5417 	enum ice_status status = ICE_SUCCESS;
5418 	struct ice_hw *hw;
5419 	u16 qg_size;
5420 	int i;
5421 
5422 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5423 		return ICE_ERR_CFG;
5424 
5425 	hw = pi->hw;
5426 
5427 	qg_size = ice_struct_size(qg_list, q_id, 1);
5428 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5429 	if (!qg_list)
5430 		return ICE_ERR_NO_MEMORY;
5431 
5432 	ice_acquire_lock(&pi->sched_lock);
5433 
5434 	for (i = 0; i < count; i++) {
5435 		struct ice_sched_node *node;
5436 
5437 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5438 		if (!node)
5439 			continue;
5440 
5441 		qg_list->parent_teid = node->info.parent_teid;
5442 		qg_list->num_qs = 1;
5443 		qg_list->q_id[0] =
5444 			CPU_TO_LE16(q_id[i] |
5445 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5446 
5447 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5448 					    ICE_NO_RESET, 0, NULL);
5449 		if (status)
5450 			break;
5451 
5452 		ice_free_sched_node(pi, node);
5453 	}
5454 
5455 	ice_release_lock(&pi->sched_lock);
5456 	ice_free(hw, qg_list);
5457 	return status;
5458 }
5459 
5460 /**
5461  * ice_is_main_vsi - checks whether the VSI is main VSI
5462  * @hw: pointer to the HW struct
5463  * @vsi_handle: VSI handle
5464  *
5465  * Checks whether the VSI is the main VSI (the first PF VSI created on
5466  * given PF).
5467  */
5468 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5469 {
5470 	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5471 }
5472 
5473 /**
5474  * ice_replay_pre_init - replay pre initialization
5475  * @hw: pointer to the HW struct
5476  * @sw: pointer to switch info struct for which function initializes filters
5477  *
5478  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5479  */
5480 enum ice_status
5481 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5482 {
5483 	enum ice_status status;
5484 	u8 i;
5485 
5486 	/* Delete old entries from replay filter list head if there is any */
5487 	ice_rm_sw_replay_rule_info(hw, sw);
5488 	/* In start of replay, move entries into replay_rules list, it
5489 	 * will allow adding rules entries back to filt_rules list,
5490 	 * which is operational list.
5491 	 */
5492 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5493 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5494 				  &sw->recp_list[i].filt_replay_rules);
5495 	ice_sched_replay_agg_vsi_preinit(hw);
5496 
5497 	status = ice_sched_replay_root_node_bw(hw->port_info);
5498 	if (status)
5499 		return status;
5500 
5501 	return ice_sched_replay_tc_node_bw(hw->port_info);
5502 }
5503 
5504 /**
5505  * ice_replay_vsi - replay VSI configuration
5506  * @hw: pointer to the HW struct
5507  * @vsi_handle: driver VSI handle
5508  *
5509  * Restore all VSI configuration after reset. It is required to call this
5510  * function with main VSI first.
5511  */
5512 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5513 {
5514 	struct ice_switch_info *sw = hw->switch_info;
5515 	struct ice_port_info *pi = hw->port_info;
5516 	enum ice_status status;
5517 
5518 	if (!ice_is_vsi_valid(hw, vsi_handle))
5519 		return ICE_ERR_PARAM;
5520 
5521 	/* Replay pre-initialization if there is any */
5522 	if (ice_is_main_vsi(hw, vsi_handle)) {
5523 		status = ice_replay_pre_init(hw, sw);
5524 		if (status)
5525 			return status;
5526 	}
5527 	/* Replay per VSI all RSS configurations */
5528 	status = ice_replay_rss_cfg(hw, vsi_handle);
5529 	if (status)
5530 		return status;
5531 	/* Replay per VSI all filters */
5532 	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5533 	if (!status)
5534 		status = ice_replay_vsi_agg(hw, vsi_handle);
5535 	return status;
5536 }
5537 
5538 /**
5539  * ice_replay_post - post replay configuration cleanup
5540  * @hw: pointer to the HW struct
5541  *
5542  * Post replay cleanup.
5543  */
5544 void ice_replay_post(struct ice_hw *hw)
5545 {
5546 	/* Delete old entries from replay filter list head */
5547 	ice_rm_all_sw_replay_rule_info(hw);
5548 	ice_sched_replay_agg(hw);
5549 }
5550 
5551 /**
5552  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5553  * @hw: ptr to the hardware info
5554  * @reg: offset of 64 bit HW register to read from
5555  * @prev_stat_loaded: bool to specify if previous stats are loaded
5556  * @prev_stat: ptr to previous loaded stat value
5557  * @cur_stat: ptr to current stat value
5558  */
5559 void
5560 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5561 		  u64 *prev_stat, u64 *cur_stat)
5562 {
5563 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5564 
5565 	/* device stats are not reset at PFR, they likely will not be zeroed
5566 	 * when the driver starts. Thus, save the value from the first read
5567 	 * without adding to the statistic value so that we report stats which
5568 	 * count up from zero.
5569 	 */
5570 	if (!prev_stat_loaded) {
5571 		*prev_stat = new_data;
5572 		return;
5573 	}
5574 
5575 	/* Calculate the difference between the new and old values, and then
5576 	 * add it to the software stat value.
5577 	 */
5578 	if (new_data >= *prev_stat)
5579 		*cur_stat += new_data - *prev_stat;
5580 	else
5581 		/* to manage the potential roll-over */
5582 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5583 
5584 	/* Update the previously stored value to prepare for next read */
5585 	*prev_stat = new_data;
5586 }
5587 
5588 /**
5589  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5590  * @hw: ptr to the hardware info
5591  * @reg: offset of HW register to read from
5592  * @prev_stat_loaded: bool to specify if previous stats are loaded
5593  * @prev_stat: ptr to previous loaded stat value
5594  * @cur_stat: ptr to current stat value
5595  */
5596 void
5597 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5598 		  u64 *prev_stat, u64 *cur_stat)
5599 {
5600 	u32 new_data;
5601 
5602 	new_data = rd32(hw, reg);
5603 
5604 	/* device stats are not reset at PFR, they likely will not be zeroed
5605 	 * when the driver starts. Thus, save the value from the first read
5606 	 * without adding to the statistic value so that we report stats which
5607 	 * count up from zero.
5608 	 */
5609 	if (!prev_stat_loaded) {
5610 		*prev_stat = new_data;
5611 		return;
5612 	}
5613 
5614 	/* Calculate the difference between the new and old values, and then
5615 	 * add it to the software stat value.
5616 	 */
5617 	if (new_data >= *prev_stat)
5618 		*cur_stat += new_data - *prev_stat;
5619 	else
5620 		/* to manage the potential roll-over */
5621 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5622 
5623 	/* Update the previously stored value to prepare for next read */
5624 	*prev_stat = new_data;
5625 }
5626 
5627 /**
5628  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5629  * @hw: ptr to the hardware info
5630  * @vsi_handle: VSI handle
5631  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5632  * @cur_stats: ptr to current stats structure
5633  *
5634  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5635  * thus cannot be read using the normal ice_stat_update32 function.
5636  *
5637  * Read the GLV_REPC register associated with the given VSI, and update the
5638  * rx_no_desc and rx_error values in the ice_eth_stats structure.
5639  *
5640  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5641  * cleared each time it's read.
5642  *
5643  * Note that the GLV_RDPC register also counts the causes that would trigger
5644  * GLV_REPC. However, it does not give the finer grained detail about why the
5645  * packets are being dropped. The GLV_REPC values can be used to distinguish
5646  * whether Rx packets are dropped due to errors or due to no available
5647  * descriptors.
5648  */
5649 void
5650 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5651 		     struct ice_eth_stats *cur_stats)
5652 {
5653 	u16 vsi_num, no_desc, error_cnt;
5654 	u32 repc;
5655 
5656 	if (!ice_is_vsi_valid(hw, vsi_handle))
5657 		return;
5658 
5659 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5660 
5661 	/* If we haven't loaded stats yet, just clear the current value */
5662 	if (!prev_stat_loaded) {
5663 		wr32(hw, GLV_REPC(vsi_num), 0);
5664 		return;
5665 	}
5666 
5667 	repc = rd32(hw, GLV_REPC(vsi_num));
5668 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5669 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5670 
5671 	/* Clear the count by writing to the stats register */
5672 	wr32(hw, GLV_REPC(vsi_num), 0);
5673 
5674 	cur_stats->rx_no_desc += no_desc;
5675 	cur_stats->rx_errors += error_cnt;
5676 }
5677 
5678 /**
5679  * ice_aq_alternate_write
5680  * @hw: pointer to the hardware structure
5681  * @reg_addr0: address of first dword to be written
5682  * @reg_val0: value to be written under 'reg_addr0'
5683  * @reg_addr1: address of second dword to be written
5684  * @reg_val1: value to be written under 'reg_addr1'
5685  *
5686  * Write one or two dwords to alternate structure. Fields are indicated
5687  * by 'reg_addr0' and 'reg_addr1' register numbers.
5688  */
5689 enum ice_status
5690 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
5691 		       u32 reg_addr1, u32 reg_val1)
5692 {
5693 	struct ice_aqc_read_write_alt_direct *cmd;
5694 	struct ice_aq_desc desc;
5695 	enum ice_status status;
5696 
5697 	cmd = &desc.params.read_write_alt_direct;
5698 
5699 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
5700 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5701 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5702 	cmd->dword0_value = CPU_TO_LE32(reg_val0);
5703 	cmd->dword1_value = CPU_TO_LE32(reg_val1);
5704 
5705 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5706 
5707 	return status;
5708 }
5709 
5710 /**
5711  * ice_aq_alternate_read
5712  * @hw: pointer to the hardware structure
5713  * @reg_addr0: address of first dword to be read
5714  * @reg_val0: pointer for data read from 'reg_addr0'
5715  * @reg_addr1: address of second dword to be read
5716  * @reg_val1: pointer for data read from 'reg_addr1'
5717  *
5718  * Read one or two dwords from alternate structure. Fields are indicated
5719  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
5720  * is not passed then only register at 'reg_addr0' is read.
5721  */
5722 enum ice_status
5723 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
5724 		      u32 reg_addr1, u32 *reg_val1)
5725 {
5726 	struct ice_aqc_read_write_alt_direct *cmd;
5727 	struct ice_aq_desc desc;
5728 	enum ice_status status;
5729 
5730 	cmd = &desc.params.read_write_alt_direct;
5731 
5732 	if (!reg_val0)
5733 		return ICE_ERR_PARAM;
5734 
5735 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
5736 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5737 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5738 
5739 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5740 
5741 	if (status == ICE_SUCCESS) {
5742 		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
5743 
5744 		if (reg_val1)
5745 			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
5746 	}
5747 
5748 	return status;
5749 }
5750 
5751 /**
5752  *  ice_aq_alternate_write_done
5753  *  @hw: pointer to the HW structure.
5754  *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
5755  *  @reset_needed: indicates the SW should trigger GLOBAL reset
5756  *
5757  *  Indicates to the FW that alternate structures have been changed.
5758  */
5759 enum ice_status
5760 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
5761 {
5762 	struct ice_aqc_done_alt_write *cmd;
5763 	struct ice_aq_desc desc;
5764 	enum ice_status status;
5765 
5766 	cmd = &desc.params.done_alt_write;
5767 
5768 	if (!reset_needed)
5769 		return ICE_ERR_PARAM;
5770 
5771 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
5772 	cmd->flags = bios_mode;
5773 
5774 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5775 	if (!status)
5776 		*reset_needed = (LE16_TO_CPU(cmd->flags) &
5777 				 ICE_AQC_RESP_RESET_NEEDED) != 0;
5778 
5779 	return status;
5780 }
5781 
5782 /**
5783  *  ice_aq_alternate_clear
5784  *  @hw: pointer to the HW structure.
5785  *
5786  *  Clear the alternate structures of the port from which the function
5787  *  is called.
5788  */
5789 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
5790 {
5791 	struct ice_aq_desc desc;
5792 	enum ice_status status;
5793 
5794 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
5795 
5796 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5797 
5798 	return status;
5799 }
5800 
5801 /**
5802  * ice_sched_query_elem - query element information from HW
5803  * @hw: pointer to the HW struct
5804  * @node_teid: node TEID to be queried
5805  * @buf: buffer to element information
5806  *
5807  * This function queries HW element information
5808  */
5809 enum ice_status
5810 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5811 		     struct ice_aqc_txsched_elem_data *buf)
5812 {
5813 	u16 buf_size, num_elem_ret = 0;
5814 	enum ice_status status;
5815 
5816 	buf_size = sizeof(*buf);
5817 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5818 	buf->node_teid = CPU_TO_LE32(node_teid);
5819 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5820 					  NULL);
5821 	if (status != ICE_SUCCESS || num_elem_ret != 1)
5822 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5823 	return status;
5824 }
5825 
5826 /**
5827  * ice_get_fw_mode - returns FW mode
5828  * @hw: pointer to the HW struct
5829  */
5830 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5831 {
5832 #define ICE_FW_MODE_DBG_M BIT(0)
5833 #define ICE_FW_MODE_REC_M BIT(1)
5834 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5835 	u32 fw_mode;
5836 
5837 	/* check the current FW mode */
5838 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5839 	if (fw_mode & ICE_FW_MODE_DBG_M)
5840 		return ICE_FW_MODE_DBG;
5841 	else if (fw_mode & ICE_FW_MODE_REC_M)
5842 		return ICE_FW_MODE_REC;
5843 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5844 		return ICE_FW_MODE_ROLLBACK;
5845 	else
5846 		return ICE_FW_MODE_NORMAL;
5847 }
5848 
5849 /**
5850  * ice_get_cur_lldp_persist_status
5851  * @hw: pointer to the HW struct
5852  * @lldp_status: return value of LLDP persistent status
5853  *
5854  * Get the current status of LLDP persistent
5855  */
5856 enum ice_status
5857 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5858 {
5859 	struct ice_port_info *pi = hw->port_info;
5860 	enum ice_status ret;
5861 	__le32 raw_data;
5862 	u32 data, mask;
5863 
5864 	if (!lldp_status)
5865 		return ICE_ERR_BAD_PTR;
5866 
5867 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5868 	if (ret)
5869 		return ret;
5870 
5871 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5872 			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5873 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5874 			      false, true, NULL);
5875 	if (!ret) {
5876 		data = LE32_TO_CPU(raw_data);
5877 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5878 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5879 		data = data & mask;
5880 		*lldp_status = data >>
5881 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5882 	}
5883 
5884 	ice_release_nvm(hw);
5885 
5886 	return ret;
5887 }
5888 
5889 /**
5890  * ice_get_dflt_lldp_persist_status
5891  * @hw: pointer to the HW struct
5892  * @lldp_status: return value of LLDP persistent status
5893  *
5894  * Get the default status of LLDP persistent
5895  */
5896 enum ice_status
5897 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5898 {
5899 	struct ice_port_info *pi = hw->port_info;
5900 	u32 data, mask, loc_data, loc_data_tmp;
5901 	enum ice_status ret;
5902 	__le16 loc_raw_data;
5903 	__le32 raw_data;
5904 
5905 	if (!lldp_status)
5906 		return ICE_ERR_BAD_PTR;
5907 
5908 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5909 	if (ret)
5910 		return ret;
5911 
5912 	/* Read the offset of EMP_SR_PTR */
5913 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5914 			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5915 			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5916 			      &loc_raw_data, false, true, NULL);
5917 	if (ret)
5918 		goto exit;
5919 
5920 	loc_data = LE16_TO_CPU(loc_raw_data);
5921 	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5922 		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5923 		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5924 	} else {
5925 		loc_data *= ICE_AQC_NVM_WORD_UNIT;
5926 	}
5927 
5928 	/* Read the offset of LLDP configuration pointer */
5929 	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5930 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5931 			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5932 			      false, true, NULL);
5933 	if (ret)
5934 		goto exit;
5935 
5936 	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5937 	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5938 	loc_data += loc_data_tmp;
5939 
5940 	/* We need to skip LLDP configuration section length (2 bytes) */
5941 	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5942 
5943 	/* Read the LLDP Default Configure */
5944 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5945 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5946 			      true, NULL);
5947 	if (!ret) {
5948 		data = LE32_TO_CPU(raw_data);
5949 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5950 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5951 		data = data & mask;
5952 		*lldp_status = data >>
5953 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5954 	}
5955 
5956 exit:
5957 	ice_release_nvm(hw);
5958 
5959 	return ret;
5960 }
5961 
5962 /**
5963  * ice_aq_read_i2c
5964  * @hw: pointer to the hw struct
5965  * @topo_addr: topology address for a device to communicate with
5966  * @bus_addr: 7-bit I2C bus address
5967  * @addr: I2C memory address (I2C offset) with up to 16 bits
5968  * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5969  *			    bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5970  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5971  * @cd: pointer to command details structure or NULL
5972  *
5973  * Read I2C (0x06E2)
5974  */
5975 enum ice_status
5976 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5977 		u16 bus_addr, __le16 addr, u8 params, u8 *data,
5978 		struct ice_sq_cd *cd)
5979 {
5980 	struct ice_aq_desc desc = { 0 };
5981 	struct ice_aqc_i2c *cmd;
5982 	enum ice_status status;
5983 	u8 data_size;
5984 
5985 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5986 	cmd = &desc.params.read_write_i2c;
5987 
5988 	if (!data)
5989 		return ICE_ERR_PARAM;
5990 
5991 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5992 
5993 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5994 	cmd->topo_addr = topo_addr;
5995 	cmd->i2c_params = params;
5996 	cmd->i2c_addr = addr;
5997 
5998 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5999 	if (!status) {
6000 		struct ice_aqc_read_i2c_resp *resp;
6001 		u8 i;
6002 
6003 		resp = &desc.params.read_i2c_resp;
6004 		for (i = 0; i < data_size; i++) {
6005 			*data = resp->i2c_data[i];
6006 			data++;
6007 		}
6008 	}
6009 
6010 	return status;
6011 }
6012 
6013 /**
6014  * ice_aq_write_i2c
6015  * @hw: pointer to the hw struct
6016  * @topo_addr: topology address for a device to communicate with
6017  * @bus_addr: 7-bit I2C bus address
6018  * @addr: I2C memory address (I2C offset) with up to 16 bits
6019  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
6020  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
6021  * @cd: pointer to command details structure or NULL
6022  *
6023  * Write I2C (0x06E3)
6024  */
6025 enum ice_status
6026 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6027 		 u16 bus_addr, __le16 addr, u8 params, u8 *data,
6028 		 struct ice_sq_cd *cd)
6029 {
6030 	struct ice_aq_desc desc = { 0 };
6031 	struct ice_aqc_i2c *cmd;
6032 	u8 i, data_size;
6033 
6034 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
6035 	cmd = &desc.params.read_write_i2c;
6036 
6037 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6038 
6039 	/* data_size limited to 4 */
6040 	if (data_size > 4)
6041 		return ICE_ERR_PARAM;
6042 
6043 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6044 	cmd->topo_addr = topo_addr;
6045 	cmd->i2c_params = params;
6046 	cmd->i2c_addr = addr;
6047 
6048 	for (i = 0; i < data_size; i++) {
6049 		cmd->i2c_data[i] = *data;
6050 		data++;
6051 	}
6052 
6053 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6054 }
6055 
6056 /**
6057  * ice_aq_set_gpio
6058  * @hw: pointer to the hw struct
6059  * @gpio_ctrl_handle: GPIO controller node handle
6060  * @pin_idx: IO Number of the GPIO that needs to be set
6061  * @value: SW provide IO value to set in the LSB
6062  * @cd: pointer to command details structure or NULL
6063  *
6064  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6065  */
6066 enum ice_status
6067 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6068 		struct ice_sq_cd *cd)
6069 {
6070 	struct ice_aqc_gpio *cmd;
6071 	struct ice_aq_desc desc;
6072 
6073 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6074 	cmd = &desc.params.read_write_gpio;
6075 	cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6076 	cmd->gpio_num = pin_idx;
6077 	cmd->gpio_val = value ? 1 : 0;
6078 
6079 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6080 }
6081 
6082 /**
6083  * ice_aq_get_gpio
6084  * @hw: pointer to the hw struct
6085  * @gpio_ctrl_handle: GPIO controller node handle
6086  * @pin_idx: IO Number of the GPIO that needs to be set
6087  * @value: IO value read
6088  * @cd: pointer to command details structure or NULL
6089  *
6090  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6091  * the topology
6092  */
6093 enum ice_status
6094 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6095 		bool *value, struct ice_sq_cd *cd)
6096 {
6097 	struct ice_aqc_gpio *cmd;
6098 	struct ice_aq_desc desc;
6099 	enum ice_status status;
6100 
6101 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6102 	cmd = &desc.params.read_write_gpio;
6103 	cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6104 	cmd->gpio_num = pin_idx;
6105 
6106 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6107 	if (status)
6108 		return status;
6109 
6110 	*value = !!cmd->gpio_val;
6111 	return ICE_SUCCESS;
6112 }
6113 
6114 /**
6115  * ice_is_fw_api_min_ver
6116  * @hw: pointer to the hardware structure
6117  * @maj: major version
6118  * @min: minor version
6119  * @patch: patch version
6120  *
6121  * Checks if the firmware is minimum version
6122  */
6123 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6124 {
6125 	if (hw->api_maj_ver == maj) {
6126 		if (hw->api_min_ver > min)
6127 			return true;
6128 		if (hw->api_min_ver == min && hw->api_patch >= patch)
6129 			return true;
6130 	} else if (hw->api_maj_ver > maj) {
6131 		return true;
6132 	}
6133 
6134 	return false;
6135 }
6136 
6137 /**
6138  * ice_is_fw_min_ver
6139  * @hw: pointer to the hardware structure
6140  * @branch: branch version
6141  * @maj: major version
6142  * @min: minor version
6143  * @patch: patch version
6144  *
6145  * Checks if the firmware is minimum version
6146  */
6147 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6148 			      u8 patch)
6149 {
6150 	if (hw->fw_branch == branch) {
6151 		if (hw->fw_maj_ver > maj)
6152 			return true;
6153 		if (hw->fw_maj_ver == maj) {
6154 			if (hw->fw_min_ver > min)
6155 				return true;
6156 			if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6157 				return true;
6158 		}
6159 	} else if (hw->fw_branch > branch) {
6160 		return true;
6161 	}
6162 
6163 	return false;
6164 }
6165 
6166 /**
6167  * ice_fw_supports_link_override
6168  * @hw: pointer to the hardware structure
6169  *
6170  * Checks if the firmware supports link override
6171  */
6172 bool ice_fw_supports_link_override(struct ice_hw *hw)
6173 {
6174 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6175 				     ICE_FW_API_LINK_OVERRIDE_MIN,
6176 				     ICE_FW_API_LINK_OVERRIDE_PATCH);
6177 }
6178 
6179 /**
6180  * ice_get_link_default_override
6181  * @ldo: pointer to the link default override struct
6182  * @pi: pointer to the port info struct
6183  *
6184  * Gets the link default override for a port
6185  */
6186 enum ice_status
6187 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6188 			      struct ice_port_info *pi)
6189 {
6190 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
6191 	struct ice_hw *hw = pi->hw;
6192 	enum ice_status status;
6193 
6194 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6195 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6196 	if (status) {
6197 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6198 		return status;
6199 	}
6200 
6201 	/* Each port has its own config; calculate for our port */
6202 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6203 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6204 
6205 	/* link options first */
6206 	status = ice_read_sr_word(hw, tlv_start, &buf);
6207 	if (status) {
6208 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6209 		return status;
6210 	}
6211 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6212 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6213 		ICE_LINK_OVERRIDE_PHY_CFG_S;
6214 
6215 	/* link PHY config */
6216 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6217 	status = ice_read_sr_word(hw, offset, &buf);
6218 	if (status) {
6219 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6220 		return status;
6221 	}
6222 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6223 
6224 	/* PHY types low */
6225 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6226 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6227 		status = ice_read_sr_word(hw, (offset + i), &buf);
6228 		if (status) {
6229 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6230 			return status;
6231 		}
6232 		/* shift 16 bits at a time to fill 64 bits */
6233 		ldo->phy_type_low |= ((u64)buf << (i * 16));
6234 	}
6235 
6236 	/* PHY types high */
6237 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6238 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6239 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6240 		status = ice_read_sr_word(hw, (offset + i), &buf);
6241 		if (status) {
6242 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6243 			return status;
6244 		}
6245 		/* shift 16 bits at a time to fill 64 bits */
6246 		ldo->phy_type_high |= ((u64)buf << (i * 16));
6247 	}
6248 
6249 	return status;
6250 }
6251 
6252 /**
6253  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6254  * @caps: get PHY capability data
6255  */
6256 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6257 {
6258 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6259 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6260 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
6261 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
6262 		return true;
6263 
6264 	return false;
6265 }
6266 
6267 /**
6268  * ice_is_fw_health_report_supported
6269  * @hw: pointer to the hardware structure
6270  *
6271  * Return true if firmware supports health status reports,
6272  * false otherwise
6273  */
6274 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6275 {
6276 	if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6277 		return true;
6278 
6279 	if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6280 		if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6281 			return true;
6282 		if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6283 		    hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6284 			return true;
6285 	}
6286 
6287 	return false;
6288 }
6289 
6290 /**
6291  * ice_aq_set_health_status_config - Configure FW health events
6292  * @hw: pointer to the HW struct
6293  * @event_source: type of diagnostic events to enable
6294  * @cd: pointer to command details structure or NULL
6295  *
6296  * Configure the health status event types that the firmware will send to this
6297  * PF. The supported event types are: PF-specific, all PFs, and global
6298  */
6299 enum ice_status
6300 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6301 				struct ice_sq_cd *cd)
6302 {
6303 	struct ice_aqc_set_health_status_config *cmd;
6304 	struct ice_aq_desc desc;
6305 
6306 	cmd = &desc.params.set_health_status_config;
6307 
6308 	ice_fill_dflt_direct_cmd_desc(&desc,
6309 				      ice_aqc_opc_set_health_status_config);
6310 
6311 	cmd->event_source = event_source;
6312 
6313 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6314 }
6315 
6316 /**
6317  * ice_aq_get_port_options
6318  * @hw: pointer to the hw struct
6319  * @options: buffer for the resultant port options
6320  * @option_count: input - size of the buffer in port options structures,
6321  *                output - number of returned port options
6322  * @lport: logical port to call the command with (optional)
6323  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6324  *               when PF owns more than 1 port it must be true
6325  * @active_option_idx: index of active port option in returned buffer
6326  * @active_option_valid: active option in returned buffer is valid
6327  * @pending_option_idx: index of pending port option in returned buffer
6328  * @pending_option_valid: pending option in returned buffer is valid
6329  *
6330  * Calls Get Port Options AQC (0x06ea) and verifies result.
6331  */
6332 enum ice_status
6333 ice_aq_get_port_options(struct ice_hw *hw,
6334 			struct ice_aqc_get_port_options_elem *options,
6335 			u8 *option_count, u8 lport, bool lport_valid,
6336 			u8 *active_option_idx, bool *active_option_valid,
6337 			u8 *pending_option_idx, bool *pending_option_valid)
6338 {
6339 	struct ice_aqc_get_port_options *cmd;
6340 	struct ice_aq_desc desc;
6341 	enum ice_status status;
6342 	u8 i;
6343 
6344 	/* options buffer shall be able to hold max returned options */
6345 	if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6346 		return ICE_ERR_PARAM;
6347 
6348 	cmd = &desc.params.get_port_options;
6349 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6350 
6351 	cmd->lport_num = lport;
6352 	cmd->lport_num_valid = lport_valid;
6353 
6354 	status = ice_aq_send_cmd(hw, &desc, options,
6355 				 *option_count * sizeof(*options), NULL);
6356 	if (status != ICE_SUCCESS)
6357 		return status;
6358 
6359 	/* verify direct FW response & set output parameters */
6360 	*option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6361 	ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6362 	*active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6363 	if (*active_option_valid) {
6364 		*active_option_idx = cmd->port_options &
6365 				     ICE_AQC_PORT_OPT_ACTIVE_M;
6366 		if (*active_option_idx > (*option_count - 1))
6367 			return ICE_ERR_OUT_OF_RANGE;
6368 		ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6369 			  *active_option_idx);
6370 	}
6371 
6372 	*pending_option_valid = cmd->pending_port_option_status &
6373 				ICE_AQC_PENDING_PORT_OPT_VALID;
6374 	if (*pending_option_valid) {
6375 		*pending_option_idx = cmd->pending_port_option_status &
6376 				      ICE_AQC_PENDING_PORT_OPT_IDX_M;
6377 		if (*pending_option_idx > (*option_count - 1))
6378 			return ICE_ERR_OUT_OF_RANGE;
6379 		ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
6380 			  *pending_option_idx);
6381 	}
6382 
6383 	/* mask output options fields */
6384 	for (i = 0; i < *option_count; i++) {
6385 		options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6386 		options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6387 		ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6388 			  options[i].pmd, options[i].max_lane_speed);
6389 	}
6390 
6391 	return ICE_SUCCESS;
6392 }
6393 
6394 /**
6395  * ice_aq_set_port_option
6396  * @hw: pointer to the hw struct
6397  * @lport: logical port to call the command with
6398  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6399  *               when PF owns more than 1 port it must be true
6400  * @new_option: new port option to be written
6401  *
6402  * Calls Set Port Options AQC (0x06eb).
6403  */
6404 enum ice_status
6405 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
6406 		       u8 new_option)
6407 {
6408 	struct ice_aqc_set_port_option *cmd;
6409 	struct ice_aq_desc desc;
6410 
6411 	if (new_option >= ICE_AQC_PORT_OPT_COUNT_M)
6412 		return ICE_ERR_PARAM;
6413 
6414 	cmd = &desc.params.set_port_option;
6415 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
6416 
6417 	cmd->lport_num = lport;
6418 
6419 	cmd->lport_num_valid = lport_valid;
6420 	cmd->selected_port_option = new_option;
6421 
6422 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6423 }
6424 
6425 /**
6426  * ice_aq_set_lldp_mib - Set the LLDP MIB
6427  * @hw: pointer to the HW struct
6428  * @mib_type: Local, Remote or both Local and Remote MIBs
6429  * @buf: pointer to the caller-supplied buffer to store the MIB block
6430  * @buf_size: size of the buffer (in bytes)
6431  * @cd: pointer to command details structure or NULL
6432  *
6433  * Set the LLDP MIB. (0x0A08)
6434  */
6435 enum ice_status
6436 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6437 		    struct ice_sq_cd *cd)
6438 {
6439 	struct ice_aqc_lldp_set_local_mib *cmd;
6440 	struct ice_aq_desc desc;
6441 
6442 	cmd = &desc.params.lldp_set_mib;
6443 
6444 	if (buf_size == 0 || !buf)
6445 		return ICE_ERR_PARAM;
6446 
6447 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6448 
6449 	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6450 	desc.datalen = CPU_TO_LE16(buf_size);
6451 
6452 	cmd->type = mib_type;
6453 	cmd->length = CPU_TO_LE16(buf_size);
6454 
6455 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6456 }
6457 
6458 /**
6459  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6460  * @hw: pointer to HW struct
6461  */
6462 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6463 {
6464 	if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6465 		return false;
6466 
6467 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6468 				     ICE_FW_API_LLDP_FLTR_MIN,
6469 				     ICE_FW_API_LLDP_FLTR_PATCH);
6470 }
6471 
6472 /**
6473  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6474  * @hw: pointer to HW struct
6475  * @vsi_num: absolute HW index for VSI
6476  * @add: boolean for if adding or removing a filter
6477  */
6478 enum ice_status
6479 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6480 {
6481 	struct ice_aqc_lldp_filter_ctrl *cmd;
6482 	struct ice_aq_desc desc;
6483 
6484 	cmd = &desc.params.lldp_filter_ctrl;
6485 
6486 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6487 
6488 	if (add)
6489 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6490 	else
6491 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6492 
6493 	cmd->vsi_num = CPU_TO_LE16(vsi_num);
6494 
6495 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6496 }
6497 
6498 /**
6499  * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6500  * @hw: pointer to HW struct
6501  */
6502 enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
6503 {
6504 	struct ice_aq_desc desc;
6505 
6506 	ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
6507 
6508 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6509 }
6510 
6511 /**
6512  * ice_fw_supports_report_dflt_cfg
6513  * @hw: pointer to the hardware structure
6514  *
6515  * Checks if the firmware supports report default configuration
6516  */
6517 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6518 {
6519 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6520 				     ICE_FW_API_REPORT_DFLT_CFG_MIN,
6521 				     ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6522 }
6523 
6524 /* each of the indexes into the following array match the speed of a return
6525  * value from the list of AQ returned speeds like the range:
6526  * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6527  * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15
6528  * elements long because the link_speed returned by the firmware is a 16 bit
6529  * value, but is indexed by [fls(speed) - 1]
6530  */
6531 static const u32 ice_aq_to_link_speed[15] = {
6532 	ICE_LINK_SPEED_10MBPS,	/* BIT(0) */
6533 	ICE_LINK_SPEED_100MBPS,
6534 	ICE_LINK_SPEED_1000MBPS,
6535 	ICE_LINK_SPEED_2500MBPS,
6536 	ICE_LINK_SPEED_5000MBPS,
6537 	ICE_LINK_SPEED_10000MBPS,
6538 	ICE_LINK_SPEED_20000MBPS,
6539 	ICE_LINK_SPEED_25000MBPS,
6540 	ICE_LINK_SPEED_40000MBPS,
6541 	ICE_LINK_SPEED_50000MBPS,
6542 	ICE_LINK_SPEED_100000MBPS,	/* BIT(10) */
6543 	ICE_LINK_SPEED_UNKNOWN,
6544 	ICE_LINK_SPEED_UNKNOWN,
6545 	ICE_LINK_SPEED_UNKNOWN,
6546 	ICE_LINK_SPEED_UNKNOWN		/* BIT(14) */
6547 };
6548 
6549 /**
6550  * ice_get_link_speed - get integer speed from table
6551  * @index: array index from fls(aq speed) - 1
6552  *
6553  * Returns: u32 value containing integer speed
6554  */
6555 u32 ice_get_link_speed(u16 index)
6556 {
6557 	return ice_aq_to_link_speed[index];
6558 }
6559 
6560 /**
6561  * ice_fw_supports_fec_dis_auto
6562  * @hw: pointer to the hardware structure
6563  *
6564  * Checks if the firmware supports FEC disable in Auto FEC mode
6565  */
6566 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6567 {
6568 	return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
6569 				 ICE_FW_FEC_DIS_AUTO_MAJ,
6570 				 ICE_FW_FEC_DIS_AUTO_MIN,
6571 				 ICE_FW_FEC_DIS_AUTO_PATCH);
6572 }
6573 /**
6574  * ice_is_fw_auto_drop_supported
6575  * @hw: pointer to the hardware structure
6576  *
6577  * Checks if the firmware supports auto drop feature
6578  */
6579 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6580 {
6581 	if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6582 	    hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6583 		return true;
6584 	return false;
6585 }
6586 
6587