xref: /freebsd/sys/dev/ice/ice_common.c (revision 963f5dc7a30624e95d72fb7f87b8892651164e46)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36 
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39 
40 #define ICE_PF_RESET_WAIT_COUNT	300
41 
42 /**
43  * ice_set_mac_type - Sets MAC type
44  * @hw: pointer to the HW structure
45  *
46  * This function sets the MAC type of the adapter based on the
47  * vendor ID and device ID stored in the HW structure.
48  */
49 enum ice_status ice_set_mac_type(struct ice_hw *hw)
50 {
51 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
52 
53 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
55 
56 	switch (hw->device_id) {
57 	case ICE_DEV_ID_E810C_BACKPLANE:
58 	case ICE_DEV_ID_E810C_QSFP:
59 	case ICE_DEV_ID_E810C_SFP:
60 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
61 	case ICE_DEV_ID_E810_XXV_QSFP:
62 	case ICE_DEV_ID_E810_XXV_SFP:
63 		hw->mac_type = ICE_MAC_E810;
64 		break;
65 	case ICE_DEV_ID_E822C_10G_BASE_T:
66 	case ICE_DEV_ID_E822C_BACKPLANE:
67 	case ICE_DEV_ID_E822C_QSFP:
68 	case ICE_DEV_ID_E822C_SFP:
69 	case ICE_DEV_ID_E822C_SGMII:
70 	case ICE_DEV_ID_E822L_10G_BASE_T:
71 	case ICE_DEV_ID_E822L_BACKPLANE:
72 	case ICE_DEV_ID_E822L_SFP:
73 	case ICE_DEV_ID_E822L_SGMII:
74 	case ICE_DEV_ID_E823L_10G_BASE_T:
75 	case ICE_DEV_ID_E823L_1GBE:
76 	case ICE_DEV_ID_E823L_BACKPLANE:
77 	case ICE_DEV_ID_E823L_QSFP:
78 	case ICE_DEV_ID_E823L_SFP:
79 		hw->mac_type = ICE_MAC_GENERIC;
80 		break;
81 	default:
82 		hw->mac_type = ICE_MAC_UNKNOWN;
83 		break;
84 	}
85 
86 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
87 	return ICE_SUCCESS;
88 }
89 
90 /**
91  * ice_clear_pf_cfg - Clear PF configuration
92  * @hw: pointer to the hardware structure
93  *
94  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95  * configuration, flow director filters, etc.).
96  */
97 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
98 {
99 	struct ice_aq_desc desc;
100 
101 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
102 
103 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
104 }
105 
106 /**
107  * ice_aq_manage_mac_read - manage MAC address read command
108  * @hw: pointer to the HW struct
109  * @buf: a virtual buffer to hold the manage MAC read response
110  * @buf_size: Size of the virtual buffer
111  * @cd: pointer to command details structure or NULL
112  *
113  * This function is used to return per PF station MAC address (0x0107).
114  * NOTE: Upon successful completion of this command, MAC address information
115  * is returned in user specified buffer. Please interpret user specified
116  * buffer as "manage_mac_read" response.
117  * Response such as various MAC addresses are stored in HW struct (port.mac)
118  * ice_discover_dev_caps is expected to be called before this function is
119  * called.
120  */
121 enum ice_status
122 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
123 		       struct ice_sq_cd *cd)
124 {
125 	struct ice_aqc_manage_mac_read_resp *resp;
126 	struct ice_aqc_manage_mac_read *cmd;
127 	struct ice_aq_desc desc;
128 	enum ice_status status;
129 	u16 flags;
130 	u8 i;
131 
132 	cmd = &desc.params.mac_read;
133 
134 	if (buf_size < sizeof(*resp))
135 		return ICE_ERR_BUF_TOO_SHORT;
136 
137 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
138 
139 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
140 	if (status)
141 		return status;
142 
143 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
144 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
145 
146 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
147 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
148 		return ICE_ERR_CFG;
149 	}
150 
151 	/* A single port can report up to two (LAN and WoL) addresses */
152 	for (i = 0; i < cmd->num_addr; i++)
153 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
154 			ice_memcpy(hw->port_info->mac.lan_addr,
155 				   resp[i].mac_addr, ETH_ALEN,
156 				   ICE_DMA_TO_NONDMA);
157 			ice_memcpy(hw->port_info->mac.perm_addr,
158 				   resp[i].mac_addr,
159 				   ETH_ALEN, ICE_DMA_TO_NONDMA);
160 			break;
161 		}
162 	return ICE_SUCCESS;
163 }
164 
165 /**
166  * ice_aq_get_phy_caps - returns PHY capabilities
167  * @pi: port information structure
168  * @qual_mods: report qualified modules
169  * @report_mode: report mode capabilities
170  * @pcaps: structure for PHY capabilities to be filled
171  * @cd: pointer to command details structure or NULL
172  *
173  * Returns the various PHY capabilities supported on the Port (0x0600)
174  */
175 enum ice_status
176 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
177 		    struct ice_aqc_get_phy_caps_data *pcaps,
178 		    struct ice_sq_cd *cd)
179 {
180 	struct ice_aqc_get_phy_caps *cmd;
181 	u16 pcaps_size = sizeof(*pcaps);
182 	struct ice_aq_desc desc;
183 	enum ice_status status;
184 	struct ice_hw *hw;
185 
186 	cmd = &desc.params.get_phy;
187 
188 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
189 		return ICE_ERR_PARAM;
190 	hw = pi->hw;
191 
192 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
193 	    !ice_fw_supports_report_dflt_cfg(hw))
194 		return ICE_ERR_PARAM;
195 
196 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
197 
198 	if (qual_mods)
199 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
200 
201 	cmd->param0 |= CPU_TO_LE16(report_mode);
202 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
203 
204 	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
205 		  report_mode);
206 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
207 		  (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
208 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
209 		  (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
210 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
211 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
212 		  pcaps->low_power_ctrl_an);
213 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
214 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
215 		  pcaps->eeer_value);
216 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
217 		  pcaps->link_fec_options);
218 	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
219 		  pcaps->module_compliance_enforcement);
220 	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
221 		  pcaps->extended_compliance_code);
222 	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
223 		  pcaps->module_type[0]);
224 	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
225 		  pcaps->module_type[1]);
226 	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
227 		  pcaps->module_type[2]);
228 
229 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
230 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
231 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
232 		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
233 			   sizeof(pi->phy.link_info.module_type),
234 			   ICE_NONDMA_TO_NONDMA);
235 	}
236 
237 	return status;
238 }
239 
240 /**
241  * ice_aq_get_link_topo_handle - get link topology node return status
242  * @pi: port information structure
243  * @node_type: requested node type
244  * @cd: pointer to command details structure or NULL
245  *
246  * Get link topology node return status for specified node type (0x06E0)
247  *
248  * Node type cage can be used to determine if cage is present. If AQC
249  * returns error (ENOENT), then no cage present. If no cage present, then
250  * connection type is backplane or BASE-T.
251  */
252 static enum ice_status
253 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
254 			    struct ice_sq_cd *cd)
255 {
256 	struct ice_aqc_get_link_topo *cmd;
257 	struct ice_aq_desc desc;
258 
259 	cmd = &desc.params.get_link_topo;
260 
261 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
262 
263 	cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
264 				   ICE_AQC_LINK_TOPO_NODE_CTX_S);
265 
266 	/* set node type */
267 	cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
268 
269 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
270 }
271 
272 /**
273  * ice_is_media_cage_present
274  * @pi: port information structure
275  *
276  * Returns true if media cage is present, else false. If no cage, then
277  * media type is backplane or BASE-T.
278  */
279 static bool ice_is_media_cage_present(struct ice_port_info *pi)
280 {
281 	/* Node type cage can be used to determine if cage is present. If AQC
282 	 * returns error (ENOENT), then no cage present. If no cage present then
283 	 * connection type is backplane or BASE-T.
284 	 */
285 	return !ice_aq_get_link_topo_handle(pi,
286 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
287 					    NULL);
288 }
289 
290 /**
291  * ice_get_media_type - Gets media type
292  * @pi: port information structure
293  */
294 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
295 {
296 	struct ice_link_status *hw_link_info;
297 
298 	if (!pi)
299 		return ICE_MEDIA_UNKNOWN;
300 
301 	hw_link_info = &pi->phy.link_info;
302 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
303 		/* If more than one media type is selected, report unknown */
304 		return ICE_MEDIA_UNKNOWN;
305 
306 	if (hw_link_info->phy_type_low) {
307 		/* 1G SGMII is a special case where some DA cable PHYs
308 		 * may show this as an option when it really shouldn't
309 		 * be since SGMII is meant to be between a MAC and a PHY
310 		 * in a backplane. Try to detect this case and handle it
311 		 */
312 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
313 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
314 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
315 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
316 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
317 			return ICE_MEDIA_DA;
318 
319 		switch (hw_link_info->phy_type_low) {
320 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
321 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
322 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
323 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
324 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
325 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
326 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
327 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
328 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
329 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
330 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
331 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
332 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
333 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
334 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
335 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
336 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
337 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
338 			return ICE_MEDIA_FIBER;
339 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
340 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
341 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
342 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
343 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
344 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
345 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
346 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
347 			return ICE_MEDIA_FIBER;
348 		case ICE_PHY_TYPE_LOW_100BASE_TX:
349 		case ICE_PHY_TYPE_LOW_1000BASE_T:
350 		case ICE_PHY_TYPE_LOW_2500BASE_T:
351 		case ICE_PHY_TYPE_LOW_5GBASE_T:
352 		case ICE_PHY_TYPE_LOW_10GBASE_T:
353 		case ICE_PHY_TYPE_LOW_25GBASE_T:
354 			return ICE_MEDIA_BASET;
355 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
356 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
357 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
358 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
359 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
360 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
361 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
362 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
363 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
364 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
365 			return ICE_MEDIA_DA;
366 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
367 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
368 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
369 		case ICE_PHY_TYPE_LOW_50G_AUI2:
370 		case ICE_PHY_TYPE_LOW_50G_AUI1:
371 		case ICE_PHY_TYPE_LOW_100G_AUI4:
372 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
373 			if (ice_is_media_cage_present(pi))
374 				return ICE_MEDIA_AUI;
375 			/* fall-through */
376 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
377 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
378 		case ICE_PHY_TYPE_LOW_2500BASE_X:
379 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
380 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
381 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
382 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
383 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
384 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
385 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
386 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
387 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
388 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
389 			return ICE_MEDIA_BACKPLANE;
390 		}
391 	} else {
392 		switch (hw_link_info->phy_type_high) {
393 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
394 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
395 			if (ice_is_media_cage_present(pi))
396 				return ICE_MEDIA_AUI;
397 			/* fall-through */
398 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
399 			return ICE_MEDIA_BACKPLANE;
400 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
401 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
402 			return ICE_MEDIA_FIBER;
403 		}
404 	}
405 	return ICE_MEDIA_UNKNOWN;
406 }
407 
408 /**
409  * ice_aq_get_link_info
410  * @pi: port information structure
411  * @ena_lse: enable/disable LinkStatusEvent reporting
412  * @link: pointer to link status structure - optional
413  * @cd: pointer to command details structure or NULL
414  *
415  * Get Link Status (0x607). Returns the link status of the adapter.
416  */
417 enum ice_status
418 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
419 		     struct ice_link_status *link, struct ice_sq_cd *cd)
420 {
421 	struct ice_aqc_get_link_status_data link_data = { 0 };
422 	struct ice_aqc_get_link_status *resp;
423 	struct ice_link_status *li_old, *li;
424 	enum ice_media_type *hw_media_type;
425 	struct ice_fc_info *hw_fc_info;
426 	bool tx_pause, rx_pause;
427 	struct ice_aq_desc desc;
428 	enum ice_status status;
429 	struct ice_hw *hw;
430 	u16 cmd_flags;
431 
432 	if (!pi)
433 		return ICE_ERR_PARAM;
434 	hw = pi->hw;
435 
436 	li_old = &pi->phy.link_info_old;
437 	hw_media_type = &pi->phy.media_type;
438 	li = &pi->phy.link_info;
439 	hw_fc_info = &pi->fc;
440 
441 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
442 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
443 	resp = &desc.params.get_link_status;
444 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
445 	resp->lport_num = pi->lport;
446 
447 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
448 
449 	if (status != ICE_SUCCESS)
450 		return status;
451 
452 	/* save off old link status information */
453 	*li_old = *li;
454 
455 	/* update current link status information */
456 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
457 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
458 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
459 	*hw_media_type = ice_get_media_type(pi);
460 	li->link_info = link_data.link_info;
461 	li->link_cfg_err = link_data.link_cfg_err;
462 	li->an_info = link_data.an_info;
463 	li->ext_info = link_data.ext_info;
464 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
465 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
466 	li->topo_media_conflict = link_data.topo_media_conflict;
467 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
468 				      ICE_AQ_CFG_PACING_TYPE_M);
469 
470 	/* update fc info */
471 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
472 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
473 	if (tx_pause && rx_pause)
474 		hw_fc_info->current_mode = ICE_FC_FULL;
475 	else if (tx_pause)
476 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
477 	else if (rx_pause)
478 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
479 	else
480 		hw_fc_info->current_mode = ICE_FC_NONE;
481 
482 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
483 
484 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
485 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
486 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
487 		  (unsigned long long)li->phy_type_low);
488 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
489 		  (unsigned long long)li->phy_type_high);
490 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
491 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
492 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
493 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
494 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
495 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
496 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
497 		  li->max_frame_size);
498 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
499 
500 	/* save link status information */
501 	if (link)
502 		*link = *li;
503 
504 	/* flag cleared so calling functions don't call AQ again */
505 	pi->phy.get_link_info = false;
506 
507 	return ICE_SUCCESS;
508 }
509 
510 /**
511  * ice_fill_tx_timer_and_fc_thresh
512  * @hw: pointer to the HW struct
513  * @cmd: pointer to MAC cfg structure
514  *
515  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
516  * descriptor
517  */
518 static void
519 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
520 				struct ice_aqc_set_mac_cfg *cmd)
521 {
522 	u16 fc_thres_val, tx_timer_val;
523 	u32 val;
524 
525 	/* We read back the transmit timer and fc threshold value of
526 	 * LFC. Thus, we will use index =
527 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
528 	 *
529 	 * Also, because we are opearating on transmit timer and fc
530 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
531 	 */
532 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
533 
534 	/* Retrieve the transmit timer */
535 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
536 	tx_timer_val = val &
537 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
538 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
539 
540 	/* Retrieve the fc threshold */
541 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
542 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
543 
544 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
545 }
546 
547 /**
548  * ice_aq_set_mac_cfg
549  * @hw: pointer to the HW struct
550  * @max_frame_size: Maximum Frame Size to be supported
551  * @cd: pointer to command details structure or NULL
552  *
553  * Set MAC configuration (0x0603)
554  */
555 enum ice_status
556 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
557 {
558 	struct ice_aqc_set_mac_cfg *cmd;
559 	struct ice_aq_desc desc;
560 
561 	cmd = &desc.params.set_mac_cfg;
562 
563 	if (max_frame_size == 0)
564 		return ICE_ERR_PARAM;
565 
566 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
567 
568 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
569 
570 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
571 
572 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
573 }
574 
575 /**
576  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
577  * @hw: pointer to the HW struct
578  */
579 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
580 {
581 	struct ice_switch_info *sw;
582 	enum ice_status status;
583 
584 	hw->switch_info = (struct ice_switch_info *)
585 			  ice_malloc(hw, sizeof(*hw->switch_info));
586 
587 	sw = hw->switch_info;
588 
589 	if (!sw)
590 		return ICE_ERR_NO_MEMORY;
591 
592 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
593 	sw->prof_res_bm_init = 0;
594 
595 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
596 	if (status) {
597 		ice_free(hw, hw->switch_info);
598 		return status;
599 	}
600 	return ICE_SUCCESS;
601 }
602 
603 /**
604  * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
605  * @hw: pointer to the HW struct
606  * @sw: pointer to switch info struct for which function clears filters
607  */
608 static void
609 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
610 {
611 	struct ice_vsi_list_map_info *v_pos_map;
612 	struct ice_vsi_list_map_info *v_tmp_map;
613 	struct ice_sw_recipe *recps;
614 	u8 i;
615 
616 	if (!sw)
617 		return;
618 
619 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
620 				 ice_vsi_list_map_info, list_entry) {
621 		LIST_DEL(&v_pos_map->list_entry);
622 		ice_free(hw, v_pos_map);
623 	}
624 	recps = sw->recp_list;
625 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
626 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
627 
628 		recps[i].root_rid = i;
629 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
630 					 &recps[i].rg_list, ice_recp_grp_entry,
631 					 l_entry) {
632 			LIST_DEL(&rg_entry->l_entry);
633 			ice_free(hw, rg_entry);
634 		}
635 
636 		if (recps[i].adv_rule) {
637 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
638 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
639 
640 			ice_destroy_lock(&recps[i].filt_rule_lock);
641 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
642 						 &recps[i].filt_rules,
643 						 ice_adv_fltr_mgmt_list_entry,
644 						 list_entry) {
645 				LIST_DEL(&lst_itr->list_entry);
646 				ice_free(hw, lst_itr->lkups);
647 				ice_free(hw, lst_itr);
648 			}
649 		} else {
650 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
651 
652 			ice_destroy_lock(&recps[i].filt_rule_lock);
653 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
654 						 &recps[i].filt_rules,
655 						 ice_fltr_mgmt_list_entry,
656 						 list_entry) {
657 				LIST_DEL(&lst_itr->list_entry);
658 				ice_free(hw, lst_itr);
659 			}
660 		}
661 		if (recps[i].root_buf)
662 			ice_free(hw, recps[i].root_buf);
663 	}
664 	ice_rm_sw_replay_rule_info(hw, sw);
665 	ice_free(hw, sw->recp_list);
666 	ice_free(hw, sw);
667 }
668 
669 /**
670  * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
671  * @hw: pointer to the HW struct
672  */
673 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
674 {
675 	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
676 }
677 
678 /**
679  * ice_get_itr_intrl_gran
680  * @hw: pointer to the HW struct
681  *
682  * Determines the ITR/INTRL granularities based on the maximum aggregate
683  * bandwidth according to the device's configuration during power-on.
684  */
685 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
686 {
687 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
688 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
689 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
690 
691 	switch (max_agg_bw) {
692 	case ICE_MAX_AGG_BW_200G:
693 	case ICE_MAX_AGG_BW_100G:
694 	case ICE_MAX_AGG_BW_50G:
695 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
696 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
697 		break;
698 	case ICE_MAX_AGG_BW_25G:
699 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
700 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
701 		break;
702 	}
703 }
704 
705 /**
706  * ice_print_rollback_msg - print FW rollback message
707  * @hw: pointer to the hardware structure
708  */
709 void ice_print_rollback_msg(struct ice_hw *hw)
710 {
711 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
712 	struct ice_orom_info *orom;
713 	struct ice_nvm_info *nvm;
714 
715 	orom = &hw->flash.orom;
716 	nvm = &hw->flash.nvm;
717 
718 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
719 		 nvm->major, nvm->minor, nvm->eetrack, orom->major,
720 		 orom->build, orom->patch);
721 	ice_warn(hw,
722 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
723 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
724 }
725 
726 /**
727  * ice_init_hw - main hardware initialization routine
728  * @hw: pointer to the hardware structure
729  */
730 enum ice_status ice_init_hw(struct ice_hw *hw)
731 {
732 	struct ice_aqc_get_phy_caps_data *pcaps;
733 	enum ice_status status;
734 	u16 mac_buf_len;
735 	void *mac_buf;
736 
737 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
738 
739 	/* Set MAC type based on DeviceID */
740 	status = ice_set_mac_type(hw);
741 	if (status)
742 		return status;
743 
744 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
745 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
746 		PF_FUNC_RID_FUNCTION_NUMBER_S;
747 
748 	status = ice_reset(hw, ICE_RESET_PFR);
749 	if (status)
750 		return status;
751 	ice_get_itr_intrl_gran(hw);
752 
753 	status = ice_create_all_ctrlq(hw);
754 	if (status)
755 		goto err_unroll_cqinit;
756 
757 	status = ice_init_nvm(hw);
758 	if (status)
759 		goto err_unroll_cqinit;
760 
761 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
762 		ice_print_rollback_msg(hw);
763 
764 	status = ice_clear_pf_cfg(hw);
765 	if (status)
766 		goto err_unroll_cqinit;
767 
768 	ice_clear_pxe_mode(hw);
769 
770 	status = ice_get_caps(hw);
771 	if (status)
772 		goto err_unroll_cqinit;
773 
774 	hw->port_info = (struct ice_port_info *)
775 			ice_malloc(hw, sizeof(*hw->port_info));
776 	if (!hw->port_info) {
777 		status = ICE_ERR_NO_MEMORY;
778 		goto err_unroll_cqinit;
779 	}
780 
781 	/* set the back pointer to HW */
782 	hw->port_info->hw = hw;
783 
784 	/* Initialize port_info struct with switch configuration data */
785 	status = ice_get_initial_sw_cfg(hw);
786 	if (status)
787 		goto err_unroll_alloc;
788 
789 	hw->evb_veb = true;
790 	/* Query the allocated resources for Tx scheduler */
791 	status = ice_sched_query_res_alloc(hw);
792 	if (status) {
793 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
794 		goto err_unroll_alloc;
795 	}
796 	ice_sched_get_psm_clk_freq(hw);
797 
798 	/* Initialize port_info struct with scheduler data */
799 	status = ice_sched_init_port(hw->port_info);
800 	if (status)
801 		goto err_unroll_sched;
802 	pcaps = (struct ice_aqc_get_phy_caps_data *)
803 		ice_malloc(hw, sizeof(*pcaps));
804 	if (!pcaps) {
805 		status = ICE_ERR_NO_MEMORY;
806 		goto err_unroll_sched;
807 	}
808 
809 	/* Initialize port_info struct with PHY capabilities */
810 	status = ice_aq_get_phy_caps(hw->port_info, false,
811 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
812 	ice_free(hw, pcaps);
813 	if (status)
814 		ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
815 			 status);
816 
817 	/* Initialize port_info struct with link information */
818 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
819 	if (status)
820 		goto err_unroll_sched;
821 	/* need a valid SW entry point to build a Tx tree */
822 	if (!hw->sw_entry_point_layer) {
823 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
824 		status = ICE_ERR_CFG;
825 		goto err_unroll_sched;
826 	}
827 	INIT_LIST_HEAD(&hw->agg_list);
828 	/* Initialize max burst size */
829 	if (!hw->max_burst_size)
830 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
831 	status = ice_init_fltr_mgmt_struct(hw);
832 	if (status)
833 		goto err_unroll_sched;
834 
835 	/* Get MAC information */
836 	/* A single port can report up to two (LAN and WoL) addresses */
837 	mac_buf = ice_calloc(hw, 2,
838 			     sizeof(struct ice_aqc_manage_mac_read_resp));
839 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
840 
841 	if (!mac_buf) {
842 		status = ICE_ERR_NO_MEMORY;
843 		goto err_unroll_fltr_mgmt_struct;
844 	}
845 
846 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
847 	ice_free(hw, mac_buf);
848 
849 	if (status)
850 		goto err_unroll_fltr_mgmt_struct;
851 	/* enable jumbo frame support at MAC level */
852 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
853 	if (status)
854 		goto err_unroll_fltr_mgmt_struct;
855 	status = ice_init_hw_tbls(hw);
856 	if (status)
857 		goto err_unroll_fltr_mgmt_struct;
858 	ice_init_lock(&hw->tnl_lock);
859 
860 	return ICE_SUCCESS;
861 
862 err_unroll_fltr_mgmt_struct:
863 	ice_cleanup_fltr_mgmt_struct(hw);
864 err_unroll_sched:
865 	ice_sched_cleanup_all(hw);
866 err_unroll_alloc:
867 	ice_free(hw, hw->port_info);
868 	hw->port_info = NULL;
869 err_unroll_cqinit:
870 	ice_destroy_all_ctrlq(hw);
871 	return status;
872 }
873 
874 /**
875  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
876  * @hw: pointer to the hardware structure
877  *
878  * This should be called only during nominal operation, not as a result of
879  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
880  * applicable initializations if it fails for any reason.
881  */
882 void ice_deinit_hw(struct ice_hw *hw)
883 {
884 	ice_cleanup_fltr_mgmt_struct(hw);
885 
886 	ice_sched_cleanup_all(hw);
887 	ice_sched_clear_agg(hw);
888 	ice_free_seg(hw);
889 	ice_free_hw_tbls(hw);
890 	ice_destroy_lock(&hw->tnl_lock);
891 
892 	if (hw->port_info) {
893 		ice_free(hw, hw->port_info);
894 		hw->port_info = NULL;
895 	}
896 
897 	ice_destroy_all_ctrlq(hw);
898 
899 	/* Clear VSI contexts if not already cleared */
900 	ice_clear_all_vsi_ctx(hw);
901 }
902 
903 /**
904  * ice_check_reset - Check to see if a global reset is complete
905  * @hw: pointer to the hardware structure
906  */
907 enum ice_status ice_check_reset(struct ice_hw *hw)
908 {
909 	u32 cnt, reg = 0, grst_timeout, uld_mask;
910 
911 	/* Poll for Device Active state in case a recent CORER, GLOBR,
912 	 * or EMPR has occurred. The grst delay value is in 100ms units.
913 	 * Add 1sec for outstanding AQ commands that can take a long time.
914 	 */
915 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
916 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
917 
918 	for (cnt = 0; cnt < grst_timeout; cnt++) {
919 		ice_msec_delay(100, true);
920 		reg = rd32(hw, GLGEN_RSTAT);
921 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
922 			break;
923 	}
924 
925 	if (cnt == grst_timeout) {
926 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
927 		return ICE_ERR_RESET_FAILED;
928 	}
929 
930 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
931 				 GLNVM_ULD_PCIER_DONE_1_M |\
932 				 GLNVM_ULD_CORER_DONE_M |\
933 				 GLNVM_ULD_GLOBR_DONE_M |\
934 				 GLNVM_ULD_POR_DONE_M |\
935 				 GLNVM_ULD_POR_DONE_1_M |\
936 				 GLNVM_ULD_PCIER_DONE_2_M)
937 
938 	uld_mask = ICE_RESET_DONE_MASK;
939 
940 	/* Device is Active; check Global Reset processes are done */
941 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
942 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
943 		if (reg == uld_mask) {
944 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
945 			break;
946 		}
947 		ice_msec_delay(10, true);
948 	}
949 
950 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
951 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
952 			  reg);
953 		return ICE_ERR_RESET_FAILED;
954 	}
955 
956 	return ICE_SUCCESS;
957 }
958 
959 /**
960  * ice_pf_reset - Reset the PF
961  * @hw: pointer to the hardware structure
962  *
963  * If a global reset has been triggered, this function checks
964  * for its completion and then issues the PF reset
965  */
966 static enum ice_status ice_pf_reset(struct ice_hw *hw)
967 {
968 	u32 cnt, reg;
969 
970 	/* If at function entry a global reset was already in progress, i.e.
971 	 * state is not 'device active' or any of the reset done bits are not
972 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
973 	 * global reset is done.
974 	 */
975 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
976 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
977 		/* poll on global reset currently in progress until done */
978 		if (ice_check_reset(hw))
979 			return ICE_ERR_RESET_FAILED;
980 
981 		return ICE_SUCCESS;
982 	}
983 
984 	/* Reset the PF */
985 	reg = rd32(hw, PFGEN_CTRL);
986 
987 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
988 
989 	/* Wait for the PFR to complete. The wait time is the global config lock
990 	 * timeout plus the PFR timeout which will account for a possible reset
991 	 * that is occurring during a download package operation.
992 	 */
993 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
994 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
995 		reg = rd32(hw, PFGEN_CTRL);
996 		if (!(reg & PFGEN_CTRL_PFSWR_M))
997 			break;
998 
999 		ice_msec_delay(1, true);
1000 	}
1001 
1002 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1003 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1004 		return ICE_ERR_RESET_FAILED;
1005 	}
1006 
1007 	return ICE_SUCCESS;
1008 }
1009 
1010 /**
1011  * ice_reset - Perform different types of reset
1012  * @hw: pointer to the hardware structure
1013  * @req: reset request
1014  *
1015  * This function triggers a reset as specified by the req parameter.
1016  *
1017  * Note:
1018  * If anything other than a PF reset is triggered, PXE mode is restored.
1019  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1020  * interface has been restored in the rebuild flow.
1021  */
1022 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1023 {
1024 	u32 val = 0;
1025 
1026 	switch (req) {
1027 	case ICE_RESET_PFR:
1028 		return ice_pf_reset(hw);
1029 	case ICE_RESET_CORER:
1030 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1031 		val = GLGEN_RTRIG_CORER_M;
1032 		break;
1033 	case ICE_RESET_GLOBR:
1034 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1035 		val = GLGEN_RTRIG_GLOBR_M;
1036 		break;
1037 	default:
1038 		return ICE_ERR_PARAM;
1039 	}
1040 
1041 	val |= rd32(hw, GLGEN_RTRIG);
1042 	wr32(hw, GLGEN_RTRIG, val);
1043 	ice_flush(hw);
1044 
1045 	/* wait for the FW to be ready */
1046 	return ice_check_reset(hw);
1047 }
1048 
1049 /**
1050  * ice_copy_rxq_ctx_to_hw
1051  * @hw: pointer to the hardware structure
1052  * @ice_rxq_ctx: pointer to the rxq context
1053  * @rxq_index: the index of the Rx queue
1054  *
1055  * Copies rxq context from dense structure to HW register space
1056  */
1057 static enum ice_status
1058 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1059 {
1060 	u8 i;
1061 
1062 	if (!ice_rxq_ctx)
1063 		return ICE_ERR_BAD_PTR;
1064 
1065 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1066 		return ICE_ERR_PARAM;
1067 
1068 	/* Copy each dword separately to HW */
1069 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1070 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1071 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1072 
1073 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1074 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1075 	}
1076 
1077 	return ICE_SUCCESS;
1078 }
1079 
1080 /* LAN Rx Queue Context */
1081 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1082 	/* Field		Width	LSB */
1083 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1084 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1085 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1086 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1087 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1088 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1089 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1090 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1091 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1092 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1093 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1094 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1095 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1096 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1097 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1098 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1099 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1100 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1101 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1102 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1103 	{ 0 }
1104 };
1105 
1106 /**
1107  * ice_write_rxq_ctx
1108  * @hw: pointer to the hardware structure
1109  * @rlan_ctx: pointer to the rxq context
1110  * @rxq_index: the index of the Rx queue
1111  *
1112  * Converts rxq context from sparse to dense structure and then writes
1113  * it to HW register space and enables the hardware to prefetch descriptors
1114  * instead of only fetching them on demand
1115  */
1116 enum ice_status
1117 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1118 		  u32 rxq_index)
1119 {
1120 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1121 
1122 	if (!rlan_ctx)
1123 		return ICE_ERR_BAD_PTR;
1124 
1125 	rlan_ctx->prefena = 1;
1126 
1127 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1128 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1129 }
1130 
1131 /**
1132  * ice_clear_rxq_ctx
1133  * @hw: pointer to the hardware structure
1134  * @rxq_index: the index of the Rx queue to clear
1135  *
1136  * Clears rxq context in HW register space
1137  */
1138 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1139 {
1140 	u8 i;
1141 
1142 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1143 		return ICE_ERR_PARAM;
1144 
1145 	/* Clear each dword register separately */
1146 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1147 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1148 
1149 	return ICE_SUCCESS;
1150 }
1151 
1152 /* LAN Tx Queue Context */
1153 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1154 				    /* Field			Width	LSB */
1155 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1156 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1157 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1158 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1159 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1160 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1161 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1162 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1163 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1164 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1165 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1166 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1167 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1168 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1169 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1170 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1171 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1172 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1173 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1174 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1175 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1176 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1177 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1178 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1179 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1180 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1181 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1182 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1183 	{ 0 }
1184 };
1185 
1186 /**
1187  * ice_copy_tx_cmpltnq_ctx_to_hw
1188  * @hw: pointer to the hardware structure
1189  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1190  * @tx_cmpltnq_index: the index of the completion queue
1191  *
1192  * Copies Tx completion queue context from dense structure to HW register space
1193  */
1194 static enum ice_status
1195 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1196 			      u32 tx_cmpltnq_index)
1197 {
1198 	u8 i;
1199 
1200 	if (!ice_tx_cmpltnq_ctx)
1201 		return ICE_ERR_BAD_PTR;
1202 
1203 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1204 		return ICE_ERR_PARAM;
1205 
1206 	/* Copy each dword separately to HW */
1207 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1208 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1209 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1210 
1211 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1212 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1213 	}
1214 
1215 	return ICE_SUCCESS;
1216 }
1217 
1218 /* LAN Tx Completion Queue Context */
1219 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1220 				       /* Field			Width   LSB */
1221 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1222 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1223 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1224 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1225 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1226 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1227 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1228 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1229 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1230 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1231 	{ 0 }
1232 };
1233 
1234 /**
1235  * ice_write_tx_cmpltnq_ctx
1236  * @hw: pointer to the hardware structure
1237  * @tx_cmpltnq_ctx: pointer to the completion queue context
1238  * @tx_cmpltnq_index: the index of the completion queue
1239  *
1240  * Converts completion queue context from sparse to dense structure and then
1241  * writes it to HW register space
1242  */
1243 enum ice_status
1244 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1245 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1246 			 u32 tx_cmpltnq_index)
1247 {
1248 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1249 
1250 	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1251 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1252 }
1253 
1254 /**
1255  * ice_clear_tx_cmpltnq_ctx
1256  * @hw: pointer to the hardware structure
1257  * @tx_cmpltnq_index: the index of the completion queue to clear
1258  *
1259  * Clears Tx completion queue context in HW register space
1260  */
1261 enum ice_status
1262 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1263 {
1264 	u8 i;
1265 
1266 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1267 		return ICE_ERR_PARAM;
1268 
1269 	/* Clear each dword register separately */
1270 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1271 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1272 
1273 	return ICE_SUCCESS;
1274 }
1275 
1276 /**
1277  * ice_copy_tx_drbell_q_ctx_to_hw
1278  * @hw: pointer to the hardware structure
1279  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1280  * @tx_drbell_q_index: the index of the doorbell queue
1281  *
1282  * Copies doorbell queue context from dense structure to HW register space
1283  */
1284 static enum ice_status
1285 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1286 			       u32 tx_drbell_q_index)
1287 {
1288 	u8 i;
1289 
1290 	if (!ice_tx_drbell_q_ctx)
1291 		return ICE_ERR_BAD_PTR;
1292 
1293 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1294 		return ICE_ERR_PARAM;
1295 
1296 	/* Copy each dword separately to HW */
1297 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1298 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1299 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1300 
1301 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1302 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1303 	}
1304 
1305 	return ICE_SUCCESS;
1306 }
1307 
1308 /* LAN Tx Doorbell Queue Context info */
1309 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1310 					/* Field		Width   LSB */
1311 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1312 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1313 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1314 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1315 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1316 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1317 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1318 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1319 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1320 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1321 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1322 	{ 0 }
1323 };
1324 
1325 /**
1326  * ice_write_tx_drbell_q_ctx
1327  * @hw: pointer to the hardware structure
1328  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1329  * @tx_drbell_q_index: the index of the doorbell queue
1330  *
1331  * Converts doorbell queue context from sparse to dense structure and then
1332  * writes it to HW register space
1333  */
1334 enum ice_status
1335 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1336 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1337 			  u32 tx_drbell_q_index)
1338 {
1339 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1340 
1341 	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1342 		    ice_tx_drbell_q_ctx_info);
1343 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1344 }
1345 
1346 /**
1347  * ice_clear_tx_drbell_q_ctx
1348  * @hw: pointer to the hardware structure
1349  * @tx_drbell_q_index: the index of the doorbell queue to clear
1350  *
1351  * Clears doorbell queue context in HW register space
1352  */
1353 enum ice_status
1354 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1355 {
1356 	u8 i;
1357 
1358 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1359 		return ICE_ERR_PARAM;
1360 
1361 	/* Clear each dword register separately */
1362 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1363 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1364 
1365 	return ICE_SUCCESS;
1366 }
1367 
1368 /* FW Admin Queue command wrappers */
1369 
1370 /**
1371  * ice_should_retry_sq_send_cmd
1372  * @opcode: AQ opcode
1373  *
1374  * Decide if we should retry the send command routine for the ATQ, depending
1375  * on the opcode.
1376  */
1377 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1378 {
1379 	switch (opcode) {
1380 	case ice_aqc_opc_dnl_get_status:
1381 	case ice_aqc_opc_dnl_run:
1382 	case ice_aqc_opc_dnl_call:
1383 	case ice_aqc_opc_dnl_read_sto:
1384 	case ice_aqc_opc_dnl_write_sto:
1385 	case ice_aqc_opc_dnl_set_breakpoints:
1386 	case ice_aqc_opc_dnl_read_log:
1387 	case ice_aqc_opc_get_link_topo:
1388 	case ice_aqc_opc_done_alt_write:
1389 	case ice_aqc_opc_lldp_stop:
1390 	case ice_aqc_opc_lldp_start:
1391 	case ice_aqc_opc_lldp_filter_ctrl:
1392 		return true;
1393 	}
1394 
1395 	return false;
1396 }
1397 
1398 /**
1399  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1400  * @hw: pointer to the HW struct
1401  * @cq: pointer to the specific Control queue
1402  * @desc: prefilled descriptor describing the command
1403  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1404  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1405  * @cd: pointer to command details structure
1406  *
1407  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1408  * Queue if the EBUSY AQ error is returned.
1409  */
1410 static enum ice_status
1411 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1412 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1413 		      struct ice_sq_cd *cd)
1414 {
1415 	struct ice_aq_desc desc_cpy;
1416 	enum ice_status status;
1417 	bool is_cmd_for_retry;
1418 	u8 *buf_cpy = NULL;
1419 	u8 idx = 0;
1420 	u16 opcode;
1421 
1422 	opcode = LE16_TO_CPU(desc->opcode);
1423 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1424 	ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1425 
1426 	if (is_cmd_for_retry) {
1427 		if (buf) {
1428 			buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1429 			if (!buf_cpy)
1430 				return ICE_ERR_NO_MEMORY;
1431 		}
1432 
1433 		ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1434 			   ICE_NONDMA_TO_NONDMA);
1435 	}
1436 
1437 	do {
1438 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1439 
1440 		if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1441 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1442 			break;
1443 
1444 		if (buf_cpy)
1445 			ice_memcpy(buf, buf_cpy, buf_size,
1446 				   ICE_NONDMA_TO_NONDMA);
1447 
1448 		ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1449 			   ICE_NONDMA_TO_NONDMA);
1450 
1451 		ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1452 
1453 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1454 
1455 	if (buf_cpy)
1456 		ice_free(hw, buf_cpy);
1457 
1458 	return status;
1459 }
1460 
1461 /**
1462  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1463  * @hw: pointer to the HW struct
1464  * @desc: descriptor describing the command
1465  * @buf: buffer to use for indirect commands (NULL for direct commands)
1466  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1467  * @cd: pointer to command details structure
1468  *
1469  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1470  */
1471 enum ice_status
1472 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1473 		u16 buf_size, struct ice_sq_cd *cd)
1474 {
1475 	return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1476 }
1477 
1478 /**
1479  * ice_aq_get_fw_ver
1480  * @hw: pointer to the HW struct
1481  * @cd: pointer to command details structure or NULL
1482  *
1483  * Get the firmware version (0x0001) from the admin queue commands
1484  */
1485 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1486 {
1487 	struct ice_aqc_get_ver *resp;
1488 	struct ice_aq_desc desc;
1489 	enum ice_status status;
1490 
1491 	resp = &desc.params.get_ver;
1492 
1493 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1494 
1495 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1496 
1497 	if (!status) {
1498 		hw->fw_branch = resp->fw_branch;
1499 		hw->fw_maj_ver = resp->fw_major;
1500 		hw->fw_min_ver = resp->fw_minor;
1501 		hw->fw_patch = resp->fw_patch;
1502 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1503 		hw->api_branch = resp->api_branch;
1504 		hw->api_maj_ver = resp->api_major;
1505 		hw->api_min_ver = resp->api_minor;
1506 		hw->api_patch = resp->api_patch;
1507 	}
1508 
1509 	return status;
1510 }
1511 
1512 /**
1513  * ice_aq_send_driver_ver
1514  * @hw: pointer to the HW struct
1515  * @dv: driver's major, minor version
1516  * @cd: pointer to command details structure or NULL
1517  *
1518  * Send the driver version (0x0002) to the firmware
1519  */
1520 enum ice_status
1521 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1522 		       struct ice_sq_cd *cd)
1523 {
1524 	struct ice_aqc_driver_ver *cmd;
1525 	struct ice_aq_desc desc;
1526 	u16 len;
1527 
1528 	cmd = &desc.params.driver_ver;
1529 
1530 	if (!dv)
1531 		return ICE_ERR_PARAM;
1532 
1533 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1534 
1535 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1536 	cmd->major_ver = dv->major_ver;
1537 	cmd->minor_ver = dv->minor_ver;
1538 	cmd->build_ver = dv->build_ver;
1539 	cmd->subbuild_ver = dv->subbuild_ver;
1540 
1541 	len = 0;
1542 	while (len < sizeof(dv->driver_string) &&
1543 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1544 		len++;
1545 
1546 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1547 }
1548 
1549 /**
1550  * ice_aq_q_shutdown
1551  * @hw: pointer to the HW struct
1552  * @unloading: is the driver unloading itself
1553  *
1554  * Tell the Firmware that we're shutting down the AdminQ and whether
1555  * or not the driver is unloading as well (0x0003).
1556  */
1557 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1558 {
1559 	struct ice_aqc_q_shutdown *cmd;
1560 	struct ice_aq_desc desc;
1561 
1562 	cmd = &desc.params.q_shutdown;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1565 
1566 	if (unloading)
1567 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1568 
1569 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1570 }
1571 
1572 /**
1573  * ice_aq_req_res
1574  * @hw: pointer to the HW struct
1575  * @res: resource ID
1576  * @access: access type
1577  * @sdp_number: resource number
1578  * @timeout: the maximum time in ms that the driver may hold the resource
1579  * @cd: pointer to command details structure or NULL
1580  *
1581  * Requests common resource using the admin queue commands (0x0008).
1582  * When attempting to acquire the Global Config Lock, the driver can
1583  * learn of three states:
1584  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1585  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1586  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1587  *                          successfully downloaded the package; the driver does
1588  *                          not have to download the package and can continue
1589  *                          loading
1590  *
1591  * Note that if the caller is in an acquire lock, perform action, release lock
1592  * phase of operation, it is possible that the FW may detect a timeout and issue
1593  * a CORER. In this case, the driver will receive a CORER interrupt and will
1594  * have to determine its cause. The calling thread that is handling this flow
1595  * will likely get an error propagated back to it indicating the Download
1596  * Package, Update Package or the Release Resource AQ commands timed out.
1597  */
1598 static enum ice_status
1599 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1600 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1601 	       struct ice_sq_cd *cd)
1602 {
1603 	struct ice_aqc_req_res *cmd_resp;
1604 	struct ice_aq_desc desc;
1605 	enum ice_status status;
1606 
1607 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1608 
1609 	cmd_resp = &desc.params.res_owner;
1610 
1611 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1612 
1613 	cmd_resp->res_id = CPU_TO_LE16(res);
1614 	cmd_resp->access_type = CPU_TO_LE16(access);
1615 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1616 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1617 	*timeout = 0;
1618 
1619 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1620 
1621 	/* The completion specifies the maximum time in ms that the driver
1622 	 * may hold the resource in the Timeout field.
1623 	 */
1624 
1625 	/* Global config lock response utilizes an additional status field.
1626 	 *
1627 	 * If the Global config lock resource is held by some other driver, the
1628 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1629 	 * and the timeout field indicates the maximum time the current owner
1630 	 * of the resource has to free it.
1631 	 */
1632 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1633 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1634 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1635 			return ICE_SUCCESS;
1636 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1637 			   ICE_AQ_RES_GLBL_IN_PROG) {
1638 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1639 			return ICE_ERR_AQ_ERROR;
1640 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1641 			   ICE_AQ_RES_GLBL_DONE) {
1642 			return ICE_ERR_AQ_NO_WORK;
1643 		}
1644 
1645 		/* invalid FW response, force a timeout immediately */
1646 		*timeout = 0;
1647 		return ICE_ERR_AQ_ERROR;
1648 	}
1649 
1650 	/* If the resource is held by some other driver, the command completes
1651 	 * with a busy return value and the timeout field indicates the maximum
1652 	 * time the current owner of the resource has to free it.
1653 	 */
1654 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1655 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1656 
1657 	return status;
1658 }
1659 
1660 /**
1661  * ice_aq_release_res
1662  * @hw: pointer to the HW struct
1663  * @res: resource ID
1664  * @sdp_number: resource number
1665  * @cd: pointer to command details structure or NULL
1666  *
1667  * release common resource using the admin queue commands (0x0009)
1668  */
1669 static enum ice_status
1670 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1671 		   struct ice_sq_cd *cd)
1672 {
1673 	struct ice_aqc_req_res *cmd;
1674 	struct ice_aq_desc desc;
1675 
1676 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1677 
1678 	cmd = &desc.params.res_owner;
1679 
1680 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1681 
1682 	cmd->res_id = CPU_TO_LE16(res);
1683 	cmd->res_number = CPU_TO_LE32(sdp_number);
1684 
1685 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1686 }
1687 
1688 /**
1689  * ice_acquire_res
1690  * @hw: pointer to the HW structure
1691  * @res: resource ID
1692  * @access: access type (read or write)
1693  * @timeout: timeout in milliseconds
1694  *
1695  * This function will attempt to acquire the ownership of a resource.
1696  */
1697 enum ice_status
1698 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1699 		enum ice_aq_res_access_type access, u32 timeout)
1700 {
1701 #define ICE_RES_POLLING_DELAY_MS	10
1702 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1703 	u32 time_left = timeout;
1704 	enum ice_status status;
1705 
1706 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1707 
1708 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1709 
1710 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1711 	 * previously acquired the resource and performed any necessary updates;
1712 	 * in this case the caller does not obtain the resource and has no
1713 	 * further work to do.
1714 	 */
1715 	if (status == ICE_ERR_AQ_NO_WORK)
1716 		goto ice_acquire_res_exit;
1717 
1718 	if (status)
1719 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1720 
1721 	/* If necessary, poll until the current lock owner timeouts */
1722 	timeout = time_left;
1723 	while (status && timeout && time_left) {
1724 		ice_msec_delay(delay, true);
1725 		timeout = (timeout > delay) ? timeout - delay : 0;
1726 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1727 
1728 		if (status == ICE_ERR_AQ_NO_WORK)
1729 			/* lock free, but no work to do */
1730 			break;
1731 
1732 		if (!status)
1733 			/* lock acquired */
1734 			break;
1735 	}
1736 	if (status && status != ICE_ERR_AQ_NO_WORK)
1737 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1738 
1739 ice_acquire_res_exit:
1740 	if (status == ICE_ERR_AQ_NO_WORK) {
1741 		if (access == ICE_RES_WRITE)
1742 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1743 		else
1744 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1745 	}
1746 	return status;
1747 }
1748 
1749 /**
1750  * ice_release_res
1751  * @hw: pointer to the HW structure
1752  * @res: resource ID
1753  *
1754  * This function will release a resource using the proper Admin Command.
1755  */
1756 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1757 {
1758 	enum ice_status status;
1759 	u32 total_delay = 0;
1760 
1761 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1762 
1763 	status = ice_aq_release_res(hw, res, 0, NULL);
1764 
1765 	/* there are some rare cases when trying to release the resource
1766 	 * results in an admin queue timeout, so handle them correctly
1767 	 */
1768 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1769 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1770 		ice_msec_delay(1, true);
1771 		status = ice_aq_release_res(hw, res, 0, NULL);
1772 		total_delay++;
1773 	}
1774 }
1775 
1776 /**
1777  * ice_aq_alloc_free_res - command to allocate/free resources
1778  * @hw: pointer to the HW struct
1779  * @num_entries: number of resource entries in buffer
1780  * @buf: Indirect buffer to hold data parameters and response
1781  * @buf_size: size of buffer for indirect commands
1782  * @opc: pass in the command opcode
1783  * @cd: pointer to command details structure or NULL
1784  *
1785  * Helper function to allocate/free resources using the admin queue commands
1786  */
1787 enum ice_status
1788 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1789 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1790 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1791 {
1792 	struct ice_aqc_alloc_free_res_cmd *cmd;
1793 	struct ice_aq_desc desc;
1794 
1795 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1796 
1797 	cmd = &desc.params.sw_res_ctrl;
1798 
1799 	if (!buf)
1800 		return ICE_ERR_PARAM;
1801 
1802 	if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1803 		return ICE_ERR_PARAM;
1804 
1805 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1806 
1807 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1808 
1809 	cmd->num_entries = CPU_TO_LE16(num_entries);
1810 
1811 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1812 }
1813 
1814 /**
1815  * ice_alloc_hw_res - allocate resource
1816  * @hw: pointer to the HW struct
1817  * @type: type of resource
1818  * @num: number of resources to allocate
1819  * @btm: allocate from bottom
1820  * @res: pointer to array that will receive the resources
1821  */
1822 enum ice_status
1823 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1824 {
1825 	struct ice_aqc_alloc_free_res_elem *buf;
1826 	enum ice_status status;
1827 	u16 buf_len;
1828 
1829 	buf_len = ice_struct_size(buf, elem, num);
1830 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1831 	if (!buf)
1832 		return ICE_ERR_NO_MEMORY;
1833 
1834 	/* Prepare buffer to allocate resource. */
1835 	buf->num_elems = CPU_TO_LE16(num);
1836 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1837 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1838 	if (btm)
1839 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1840 
1841 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1842 				       ice_aqc_opc_alloc_res, NULL);
1843 	if (status)
1844 		goto ice_alloc_res_exit;
1845 
1846 	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1847 		   ICE_NONDMA_TO_NONDMA);
1848 
1849 ice_alloc_res_exit:
1850 	ice_free(hw, buf);
1851 	return status;
1852 }
1853 
1854 /**
1855  * ice_free_hw_res - free allocated HW resource
1856  * @hw: pointer to the HW struct
1857  * @type: type of resource to free
1858  * @num: number of resources
1859  * @res: pointer to array that contains the resources to free
1860  */
1861 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1862 {
1863 	struct ice_aqc_alloc_free_res_elem *buf;
1864 	enum ice_status status;
1865 	u16 buf_len;
1866 
1867 	buf_len = ice_struct_size(buf, elem, num);
1868 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1869 	if (!buf)
1870 		return ICE_ERR_NO_MEMORY;
1871 
1872 	/* Prepare buffer to free resource. */
1873 	buf->num_elems = CPU_TO_LE16(num);
1874 	buf->res_type = CPU_TO_LE16(type);
1875 	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1876 		   ICE_NONDMA_TO_NONDMA);
1877 
1878 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1879 				       ice_aqc_opc_free_res, NULL);
1880 	if (status)
1881 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1882 
1883 	ice_free(hw, buf);
1884 	return status;
1885 }
1886 
1887 /**
1888  * ice_get_num_per_func - determine number of resources per PF
1889  * @hw: pointer to the HW structure
1890  * @max: value to be evenly split between each PF
1891  *
1892  * Determine the number of valid functions by going through the bitmap returned
1893  * from parsing capabilities and use this to calculate the number of resources
1894  * per PF based on the max value passed in.
1895  */
1896 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1897 {
1898 	u8 funcs;
1899 
1900 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1901 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1902 			     ICE_CAPS_VALID_FUNCS_M);
1903 
1904 	if (!funcs)
1905 		return 0;
1906 
1907 	return max / funcs;
1908 }
1909 
1910 /**
1911  * ice_print_led_caps - print LED capabilities
1912  * @hw: pointer to the ice_hw instance
1913  * @caps: pointer to common caps instance
1914  * @prefix: string to prefix when printing
1915  * @dbg: set to indicate debug print
1916  */
1917 static void
1918 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1919 		   char const *prefix, bool dbg)
1920 {
1921 	u8 i;
1922 
1923 	if (dbg)
1924 		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1925 			  caps->led_pin_num);
1926 	else
1927 		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1928 			 caps->led_pin_num);
1929 
1930 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1931 		if (!caps->led[i])
1932 			continue;
1933 
1934 		if (dbg)
1935 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1936 				  prefix, i, caps->led[i]);
1937 		else
1938 			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1939 				 caps->led[i]);
1940 	}
1941 }
1942 
1943 /**
1944  * ice_print_sdp_caps - print SDP capabilities
1945  * @hw: pointer to the ice_hw instance
1946  * @caps: pointer to common caps instance
1947  * @prefix: string to prefix when printing
1948  * @dbg: set to indicate debug print
1949  */
1950 static void
1951 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1952 		   char const *prefix, bool dbg)
1953 {
1954 	u8 i;
1955 
1956 	if (dbg)
1957 		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1958 			  caps->sdp_pin_num);
1959 	else
1960 		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1961 			 caps->sdp_pin_num);
1962 
1963 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1964 		if (!caps->sdp[i])
1965 			continue;
1966 
1967 		if (dbg)
1968 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1969 				  prefix, i, caps->sdp[i]);
1970 		else
1971 			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1972 				 i, caps->sdp[i]);
1973 	}
1974 }
1975 
1976 /**
1977  * ice_parse_common_caps - parse common device/function capabilities
1978  * @hw: pointer to the HW struct
1979  * @caps: pointer to common capabilities structure
1980  * @elem: the capability element to parse
1981  * @prefix: message prefix for tracing capabilities
1982  *
1983  * Given a capability element, extract relevant details into the common
1984  * capability structure.
1985  *
1986  * Returns: true if the capability matches one of the common capability ids,
1987  * false otherwise.
1988  */
1989 static bool
1990 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1991 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1992 {
1993 	u32 logical_id = LE32_TO_CPU(elem->logical_id);
1994 	u32 phys_id = LE32_TO_CPU(elem->phys_id);
1995 	u32 number = LE32_TO_CPU(elem->number);
1996 	u16 cap = LE16_TO_CPU(elem->cap);
1997 	bool found = true;
1998 
1999 	switch (cap) {
2000 	case ICE_AQC_CAPS_SWITCHING_MODE:
2001 		caps->switching_mode = number;
2002 		ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2003 			  caps->switching_mode);
2004 		break;
2005 	case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2006 		caps->mgmt_mode = number;
2007 		caps->mgmt_protocols_mctp = logical_id;
2008 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2009 			  caps->mgmt_mode);
2010 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2011 			  caps->mgmt_protocols_mctp);
2012 		break;
2013 	case ICE_AQC_CAPS_OS2BMC:
2014 		caps->os2bmc = number;
2015 		ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2016 		break;
2017 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
2018 		caps->valid_functions = number;
2019 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2020 			  caps->valid_functions);
2021 		break;
2022 	case ICE_AQC_CAPS_SRIOV:
2023 		caps->sr_iov_1_1 = (number == 1);
2024 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2025 			  caps->sr_iov_1_1);
2026 		break;
2027 	case ICE_AQC_CAPS_802_1QBG:
2028 		caps->evb_802_1_qbg = (number == 1);
2029 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2030 		break;
2031 	case ICE_AQC_CAPS_802_1BR:
2032 		caps->evb_802_1_qbh = (number == 1);
2033 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2034 		break;
2035 	case ICE_AQC_CAPS_DCB:
2036 		caps->dcb = (number == 1);
2037 		caps->active_tc_bitmap = logical_id;
2038 		caps->maxtc = phys_id;
2039 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2040 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2041 			  caps->active_tc_bitmap);
2042 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2043 		break;
2044 	case ICE_AQC_CAPS_ISCSI:
2045 		caps->iscsi = (number == 1);
2046 		ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2047 		break;
2048 	case ICE_AQC_CAPS_RSS:
2049 		caps->rss_table_size = number;
2050 		caps->rss_table_entry_width = logical_id;
2051 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2052 			  caps->rss_table_size);
2053 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2054 			  caps->rss_table_entry_width);
2055 		break;
2056 	case ICE_AQC_CAPS_RXQS:
2057 		caps->num_rxq = number;
2058 		caps->rxq_first_id = phys_id;
2059 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2060 			  caps->num_rxq);
2061 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2062 			  caps->rxq_first_id);
2063 		break;
2064 	case ICE_AQC_CAPS_TXQS:
2065 		caps->num_txq = number;
2066 		caps->txq_first_id = phys_id;
2067 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2068 			  caps->num_txq);
2069 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2070 			  caps->txq_first_id);
2071 		break;
2072 	case ICE_AQC_CAPS_MSIX:
2073 		caps->num_msix_vectors = number;
2074 		caps->msix_vector_first_id = phys_id;
2075 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2076 			  caps->num_msix_vectors);
2077 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2078 			  caps->msix_vector_first_id);
2079 		break;
2080 	case ICE_AQC_CAPS_NVM_VER:
2081 		break;
2082 	case ICE_AQC_CAPS_NVM_MGMT:
2083 		caps->sec_rev_disabled =
2084 			(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2085 			true : false;
2086 		ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2087 			  caps->sec_rev_disabled);
2088 		caps->update_disabled =
2089 			(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2090 			true : false;
2091 		ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2092 			  caps->update_disabled);
2093 		caps->nvm_unified_update =
2094 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2095 			true : false;
2096 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2097 			  caps->nvm_unified_update);
2098 		break;
2099 	case ICE_AQC_CAPS_CEM:
2100 		caps->mgmt_cem = (number == 1);
2101 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2102 			  caps->mgmt_cem);
2103 		break;
2104 	case ICE_AQC_CAPS_LED:
2105 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2106 			caps->led[phys_id] = true;
2107 			caps->led_pin_num++;
2108 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2109 		}
2110 		break;
2111 	case ICE_AQC_CAPS_SDP:
2112 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2113 			caps->sdp[phys_id] = true;
2114 			caps->sdp_pin_num++;
2115 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2116 		}
2117 		break;
2118 	case ICE_AQC_CAPS_WR_CSR_PROT:
2119 		caps->wr_csr_prot = number;
2120 		caps->wr_csr_prot |= (u64)logical_id << 32;
2121 		ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2122 			  (unsigned long long)caps->wr_csr_prot);
2123 		break;
2124 	case ICE_AQC_CAPS_WOL_PROXY:
2125 		caps->num_wol_proxy_fltr = number;
2126 		caps->wol_proxy_vsi_seid = logical_id;
2127 		caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2128 		caps->acpi_prog_mthd = !!(phys_id &
2129 					  ICE_ACPI_PROG_MTHD_M);
2130 		caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2131 		ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2132 			  caps->num_wol_proxy_fltr);
2133 		ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2134 			  caps->wol_proxy_vsi_seid);
2135 		break;
2136 	case ICE_AQC_CAPS_MAX_MTU:
2137 		caps->max_mtu = number;
2138 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2139 			  prefix, caps->max_mtu);
2140 		break;
2141 	default:
2142 		/* Not one of the recognized common capabilities */
2143 		found = false;
2144 	}
2145 
2146 	return found;
2147 }
2148 
2149 /**
2150  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2151  * @hw: pointer to the HW structure
2152  * @caps: pointer to capabilities structure to fix
2153  *
2154  * Re-calculate the capabilities that are dependent on the number of physical
2155  * ports; i.e. some features are not supported or function differently on
2156  * devices with more than 4 ports.
2157  */
2158 static void
2159 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2160 {
2161 	/* This assumes device capabilities are always scanned before function
2162 	 * capabilities during the initialization flow.
2163 	 */
2164 	if (hw->dev_caps.num_funcs > 4) {
2165 		/* Max 4 TCs per port */
2166 		caps->maxtc = 4;
2167 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2168 			  caps->maxtc);
2169 	}
2170 }
2171 
2172 /**
2173  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2174  * @hw: pointer to the HW struct
2175  * @func_p: pointer to function capabilities structure
2176  * @cap: pointer to the capability element to parse
2177  *
2178  * Extract function capabilities for ICE_AQC_CAPS_VF.
2179  */
2180 static void
2181 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2182 		       struct ice_aqc_list_caps_elem *cap)
2183 {
2184 	u32 number = LE32_TO_CPU(cap->number);
2185 	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2186 
2187 	func_p->num_allocd_vfs = number;
2188 	func_p->vf_base_id = logical_id;
2189 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2190 		  func_p->num_allocd_vfs);
2191 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2192 		  func_p->vf_base_id);
2193 }
2194 
2195 /**
2196  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2197  * @hw: pointer to the HW struct
2198  * @func_p: pointer to function capabilities structure
2199  * @cap: pointer to the capability element to parse
2200  *
2201  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2202  */
2203 static void
2204 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2205 			struct ice_aqc_list_caps_elem *cap)
2206 {
2207 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2208 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2209 		  LE32_TO_CPU(cap->number));
2210 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2211 		  func_p->guar_num_vsi);
2212 }
2213 
2214 /**
2215  * ice_parse_func_caps - Parse function capabilities
2216  * @hw: pointer to the HW struct
2217  * @func_p: pointer to function capabilities structure
2218  * @buf: buffer containing the function capability records
2219  * @cap_count: the number of capabilities
2220  *
2221  * Helper function to parse function (0x000A) capabilities list. For
2222  * capabilities shared between device and function, this relies on
2223  * ice_parse_common_caps.
2224  *
2225  * Loop through the list of provided capabilities and extract the relevant
2226  * data into the function capabilities structured.
2227  */
2228 static void
2229 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2230 		    void *buf, u32 cap_count)
2231 {
2232 	struct ice_aqc_list_caps_elem *cap_resp;
2233 	u32 i;
2234 
2235 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2236 
2237 	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2238 
2239 	for (i = 0; i < cap_count; i++) {
2240 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2241 		bool found;
2242 
2243 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2244 					      &cap_resp[i], "func caps");
2245 
2246 		switch (cap) {
2247 		case ICE_AQC_CAPS_VF:
2248 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2249 			break;
2250 		case ICE_AQC_CAPS_VSI:
2251 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2252 			break;
2253 		default:
2254 			/* Don't list common capabilities as unknown */
2255 			if (!found)
2256 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2257 					  i, cap);
2258 			break;
2259 		}
2260 	}
2261 
2262 	ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2263 	ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2264 
2265 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2266 }
2267 
2268 /**
2269  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2270  * @hw: pointer to the HW struct
2271  * @dev_p: pointer to device capabilities structure
2272  * @cap: capability element to parse
2273  *
2274  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2275  */
2276 static void
2277 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2278 			      struct ice_aqc_list_caps_elem *cap)
2279 {
2280 	u32 number = LE32_TO_CPU(cap->number);
2281 
2282 	dev_p->num_funcs = ice_hweight32(number);
2283 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2284 		  dev_p->num_funcs);
2285 }
2286 
2287 /**
2288  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2289  * @hw: pointer to the HW struct
2290  * @dev_p: pointer to device capabilities structure
2291  * @cap: capability element to parse
2292  *
2293  * Parse ICE_AQC_CAPS_VF for device capabilities.
2294  */
2295 static void
2296 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2297 		      struct ice_aqc_list_caps_elem *cap)
2298 {
2299 	u32 number = LE32_TO_CPU(cap->number);
2300 
2301 	dev_p->num_vfs_exposed = number;
2302 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2303 		  dev_p->num_vfs_exposed);
2304 }
2305 
2306 /**
2307  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2308  * @hw: pointer to the HW struct
2309  * @dev_p: pointer to device capabilities structure
2310  * @cap: capability element to parse
2311  *
2312  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2313  */
2314 static void
2315 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2316 		       struct ice_aqc_list_caps_elem *cap)
2317 {
2318 	u32 number = LE32_TO_CPU(cap->number);
2319 
2320 	dev_p->num_vsi_allocd_to_host = number;
2321 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2322 		  dev_p->num_vsi_allocd_to_host);
2323 }
2324 
2325 /**
2326  * ice_parse_dev_caps - Parse device capabilities
2327  * @hw: pointer to the HW struct
2328  * @dev_p: pointer to device capabilities structure
2329  * @buf: buffer containing the device capability records
2330  * @cap_count: the number of capabilities
2331  *
2332  * Helper device to parse device (0x000B) capabilities list. For
2333  * capabilities shared between device and function, this relies on
2334  * ice_parse_common_caps.
2335  *
2336  * Loop through the list of provided capabilities and extract the relevant
2337  * data into the device capabilities structured.
2338  */
2339 static void
2340 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2341 		   void *buf, u32 cap_count)
2342 {
2343 	struct ice_aqc_list_caps_elem *cap_resp;
2344 	u32 i;
2345 
2346 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2347 
2348 	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2349 
2350 	for (i = 0; i < cap_count; i++) {
2351 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2352 		bool found;
2353 
2354 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2355 					      &cap_resp[i], "dev caps");
2356 
2357 		switch (cap) {
2358 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2359 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2360 			break;
2361 		case ICE_AQC_CAPS_VF:
2362 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2363 			break;
2364 		case ICE_AQC_CAPS_VSI:
2365 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2366 			break;
2367 		default:
2368 			/* Don't list common capabilities as unknown */
2369 			if (!found)
2370 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2371 					  i, cap);
2372 			break;
2373 		}
2374 	}
2375 
2376 	ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2377 	ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2378 
2379 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2380 }
2381 
2382 /**
2383  * ice_aq_list_caps - query function/device capabilities
2384  * @hw: pointer to the HW struct
2385  * @buf: a buffer to hold the capabilities
2386  * @buf_size: size of the buffer
2387  * @cap_count: if not NULL, set to the number of capabilities reported
2388  * @opc: capabilities type to discover, device or function
2389  * @cd: pointer to command details structure or NULL
2390  *
2391  * Get the function (0x000A) or device (0x000B) capabilities description from
2392  * firmware and store it in the buffer.
2393  *
2394  * If the cap_count pointer is not NULL, then it is set to the number of
2395  * capabilities firmware will report. Note that if the buffer size is too
2396  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2397  * cap_count will still be updated in this case. It is recommended that the
2398  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2399  * firmware could return) to avoid this.
2400  */
2401 static enum ice_status
2402 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2403 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2404 {
2405 	struct ice_aqc_list_caps *cmd;
2406 	struct ice_aq_desc desc;
2407 	enum ice_status status;
2408 
2409 	cmd = &desc.params.get_cap;
2410 
2411 	if (opc != ice_aqc_opc_list_func_caps &&
2412 	    opc != ice_aqc_opc_list_dev_caps)
2413 		return ICE_ERR_PARAM;
2414 
2415 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2416 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2417 
2418 	if (cap_count)
2419 		*cap_count = LE32_TO_CPU(cmd->count);
2420 
2421 	return status;
2422 }
2423 
2424 /**
2425  * ice_discover_dev_caps - Read and extract device capabilities
2426  * @hw: pointer to the hardware structure
2427  * @dev_caps: pointer to device capabilities structure
2428  *
2429  * Read the device capabilities and extract them into the dev_caps structure
2430  * for later use.
2431  */
2432 static enum ice_status
2433 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2434 {
2435 	enum ice_status status;
2436 	u32 cap_count = 0;
2437 	void *cbuf;
2438 
2439 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2440 	if (!cbuf)
2441 		return ICE_ERR_NO_MEMORY;
2442 
2443 	/* Although the driver doesn't know the number of capabilities the
2444 	 * device will return, we can simply send a 4KB buffer, the maximum
2445 	 * possible size that firmware can return.
2446 	 */
2447 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2448 
2449 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2450 				  ice_aqc_opc_list_dev_caps, NULL);
2451 	if (!status)
2452 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2453 	ice_free(hw, cbuf);
2454 
2455 	return status;
2456 }
2457 
2458 /**
2459  * ice_discover_func_caps - Read and extract function capabilities
2460  * @hw: pointer to the hardware structure
2461  * @func_caps: pointer to function capabilities structure
2462  *
2463  * Read the function capabilities and extract them into the func_caps structure
2464  * for later use.
2465  */
2466 static enum ice_status
2467 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2468 {
2469 	enum ice_status status;
2470 	u32 cap_count = 0;
2471 	void *cbuf;
2472 
2473 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2474 	if (!cbuf)
2475 		return ICE_ERR_NO_MEMORY;
2476 
2477 	/* Although the driver doesn't know the number of capabilities the
2478 	 * device will return, we can simply send a 4KB buffer, the maximum
2479 	 * possible size that firmware can return.
2480 	 */
2481 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2482 
2483 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2484 				  ice_aqc_opc_list_func_caps, NULL);
2485 	if (!status)
2486 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2487 	ice_free(hw, cbuf);
2488 
2489 	return status;
2490 }
2491 
2492 /**
2493  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2494  * @hw: pointer to the hardware structure
2495  */
2496 void ice_set_safe_mode_caps(struct ice_hw *hw)
2497 {
2498 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2499 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2500 	struct ice_hw_common_caps cached_caps;
2501 	u32 num_funcs;
2502 
2503 	/* cache some func_caps values that should be restored after memset */
2504 	cached_caps = func_caps->common_cap;
2505 
2506 	/* unset func capabilities */
2507 	memset(func_caps, 0, sizeof(*func_caps));
2508 
2509 #define ICE_RESTORE_FUNC_CAP(name) \
2510 	func_caps->common_cap.name = cached_caps.name
2511 
2512 	/* restore cached values */
2513 	ICE_RESTORE_FUNC_CAP(valid_functions);
2514 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2515 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2516 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2517 	ICE_RESTORE_FUNC_CAP(max_mtu);
2518 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2519 
2520 	/* one Tx and one Rx queue in safe mode */
2521 	func_caps->common_cap.num_rxq = 1;
2522 	func_caps->common_cap.num_txq = 1;
2523 
2524 	/* two MSIX vectors, one for traffic and one for misc causes */
2525 	func_caps->common_cap.num_msix_vectors = 2;
2526 	func_caps->guar_num_vsi = 1;
2527 
2528 	/* cache some dev_caps values that should be restored after memset */
2529 	cached_caps = dev_caps->common_cap;
2530 	num_funcs = dev_caps->num_funcs;
2531 
2532 	/* unset dev capabilities */
2533 	memset(dev_caps, 0, sizeof(*dev_caps));
2534 
2535 #define ICE_RESTORE_DEV_CAP(name) \
2536 	dev_caps->common_cap.name = cached_caps.name
2537 
2538 	/* restore cached values */
2539 	ICE_RESTORE_DEV_CAP(valid_functions);
2540 	ICE_RESTORE_DEV_CAP(txq_first_id);
2541 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2542 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2543 	ICE_RESTORE_DEV_CAP(max_mtu);
2544 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2545 	dev_caps->num_funcs = num_funcs;
2546 
2547 	/* one Tx and one Rx queue per function in safe mode */
2548 	dev_caps->common_cap.num_rxq = num_funcs;
2549 	dev_caps->common_cap.num_txq = num_funcs;
2550 
2551 	/* two MSIX vectors per function */
2552 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2553 }
2554 
2555 /**
2556  * ice_get_caps - get info about the HW
2557  * @hw: pointer to the hardware structure
2558  */
2559 enum ice_status ice_get_caps(struct ice_hw *hw)
2560 {
2561 	enum ice_status status;
2562 
2563 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2564 	if (status)
2565 		return status;
2566 
2567 	return ice_discover_func_caps(hw, &hw->func_caps);
2568 }
2569 
2570 /**
2571  * ice_aq_manage_mac_write - manage MAC address write command
2572  * @hw: pointer to the HW struct
2573  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2574  * @flags: flags to control write behavior
2575  * @cd: pointer to command details structure or NULL
2576  *
2577  * This function is used to write MAC address to the NVM (0x0108).
2578  */
2579 enum ice_status
2580 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2581 			struct ice_sq_cd *cd)
2582 {
2583 	struct ice_aqc_manage_mac_write *cmd;
2584 	struct ice_aq_desc desc;
2585 
2586 	cmd = &desc.params.mac_write;
2587 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2588 
2589 	cmd->flags = flags;
2590 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2591 
2592 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2593 }
2594 
2595 /**
2596  * ice_aq_clear_pxe_mode
2597  * @hw: pointer to the HW struct
2598  *
2599  * Tell the firmware that the driver is taking over from PXE (0x0110).
2600  */
2601 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2602 {
2603 	struct ice_aq_desc desc;
2604 
2605 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2606 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2607 
2608 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2609 }
2610 
2611 /**
2612  * ice_clear_pxe_mode - clear pxe operations mode
2613  * @hw: pointer to the HW struct
2614  *
2615  * Make sure all PXE mode settings are cleared, including things
2616  * like descriptor fetch/write-back mode.
2617  */
2618 void ice_clear_pxe_mode(struct ice_hw *hw)
2619 {
2620 	if (ice_check_sq_alive(hw, &hw->adminq))
2621 		ice_aq_clear_pxe_mode(hw);
2622 }
2623 
2624 /**
2625  * ice_aq_set_port_params - set physical port parameters.
2626  * @pi: pointer to the port info struct
2627  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2628  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2629  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2630  * @double_vlan: if set double VLAN is enabled
2631  * @cd: pointer to command details structure or NULL
2632  *
2633  * Set Physical port parameters (0x0203)
2634  */
2635 enum ice_status
2636 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2637 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2638 		       struct ice_sq_cd *cd)
2639 
2640 {
2641 	struct ice_aqc_set_port_params *cmd;
2642 	struct ice_hw *hw = pi->hw;
2643 	struct ice_aq_desc desc;
2644 	u16 cmd_flags = 0;
2645 
2646 	cmd = &desc.params.set_port_params;
2647 
2648 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2649 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2650 	if (save_bad_pac)
2651 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2652 	if (pad_short_pac)
2653 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2654 	if (double_vlan)
2655 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2656 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2657 
2658 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2659 }
2660 
2661 /**
2662  * ice_get_link_speed_based_on_phy_type - returns link speed
2663  * @phy_type_low: lower part of phy_type
2664  * @phy_type_high: higher part of phy_type
2665  *
2666  * This helper function will convert an entry in PHY type structure
2667  * [phy_type_low, phy_type_high] to its corresponding link speed.
2668  * Note: In the structure of [phy_type_low, phy_type_high], there should
2669  * be one bit set, as this function will convert one PHY type to its
2670  * speed.
2671  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2672  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2673  */
2674 static u16
2675 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2676 {
2677 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2678 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2679 
2680 	switch (phy_type_low) {
2681 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2682 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2683 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2684 		break;
2685 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2686 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2687 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2688 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2689 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2690 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2691 		break;
2692 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2693 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2694 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2695 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2696 		break;
2697 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2698 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2699 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2700 		break;
2701 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2702 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2703 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2704 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2705 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2706 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2707 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2708 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2709 		break;
2710 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2711 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2712 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2713 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2714 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2715 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2716 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2717 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2718 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2719 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2720 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2721 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2722 		break;
2723 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2724 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2725 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2726 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2727 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2728 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2729 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2730 		break;
2731 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2732 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2733 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2734 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2735 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2736 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2737 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2738 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2739 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2740 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2741 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2742 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2743 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2744 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2745 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2746 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2747 		break;
2748 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2749 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2750 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2751 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2752 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2753 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2754 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2755 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2756 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2757 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2758 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2759 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2760 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2761 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2762 		break;
2763 	default:
2764 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2765 		break;
2766 	}
2767 
2768 	switch (phy_type_high) {
2769 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2770 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2771 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2772 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2773 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2774 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2775 		break;
2776 	default:
2777 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2778 		break;
2779 	}
2780 
2781 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2782 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2783 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2784 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2785 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2786 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2787 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2788 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2789 		return speed_phy_type_low;
2790 	else
2791 		return speed_phy_type_high;
2792 }
2793 
2794 /**
2795  * ice_update_phy_type
2796  * @phy_type_low: pointer to the lower part of phy_type
2797  * @phy_type_high: pointer to the higher part of phy_type
2798  * @link_speeds_bitmap: targeted link speeds bitmap
2799  *
2800  * Note: For the link_speeds_bitmap structure, you can check it at
2801  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2802  * link_speeds_bitmap include multiple speeds.
2803  *
2804  * Each entry in this [phy_type_low, phy_type_high] structure will
2805  * present a certain link speed. This helper function will turn on bits
2806  * in [phy_type_low, phy_type_high] structure based on the value of
2807  * link_speeds_bitmap input parameter.
2808  */
2809 void
2810 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2811 		    u16 link_speeds_bitmap)
2812 {
2813 	u64 pt_high;
2814 	u64 pt_low;
2815 	int index;
2816 	u16 speed;
2817 
2818 	/* We first check with low part of phy_type */
2819 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2820 		pt_low = BIT_ULL(index);
2821 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2822 
2823 		if (link_speeds_bitmap & speed)
2824 			*phy_type_low |= BIT_ULL(index);
2825 	}
2826 
2827 	/* We then check with high part of phy_type */
2828 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2829 		pt_high = BIT_ULL(index);
2830 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2831 
2832 		if (link_speeds_bitmap & speed)
2833 			*phy_type_high |= BIT_ULL(index);
2834 	}
2835 }
2836 
2837 /**
2838  * ice_aq_set_phy_cfg
2839  * @hw: pointer to the HW struct
2840  * @pi: port info structure of the interested logical port
2841  * @cfg: structure with PHY configuration data to be set
2842  * @cd: pointer to command details structure or NULL
2843  *
2844  * Set the various PHY configuration parameters supported on the Port.
2845  * One or more of the Set PHY config parameters may be ignored in an MFP
2846  * mode as the PF may not have the privilege to set some of the PHY Config
2847  * parameters. This status will be indicated by the command response (0x0601).
2848  */
2849 enum ice_status
2850 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2851 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2852 {
2853 	struct ice_aq_desc desc;
2854 	enum ice_status status;
2855 
2856 	if (!cfg)
2857 		return ICE_ERR_PARAM;
2858 
2859 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2860 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2861 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2862 			  cfg->caps);
2863 
2864 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2865 	}
2866 
2867 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2868 	desc.params.set_phy.lport_num = pi->lport;
2869 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2870 
2871 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2872 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2873 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2874 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2875 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2876 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2877 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2878 		  cfg->low_power_ctrl_an);
2879 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2880 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2881 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2882 		  cfg->link_fec_opt);
2883 
2884 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2885 
2886 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2887 		status = ICE_SUCCESS;
2888 
2889 	if (!status)
2890 		pi->phy.curr_user_phy_cfg = *cfg;
2891 
2892 	return status;
2893 }
2894 
2895 /**
2896  * ice_update_link_info - update status of the HW network link
2897  * @pi: port info structure of the interested logical port
2898  */
2899 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2900 {
2901 	struct ice_link_status *li;
2902 	enum ice_status status;
2903 
2904 	if (!pi)
2905 		return ICE_ERR_PARAM;
2906 
2907 	li = &pi->phy.link_info;
2908 
2909 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2910 	if (status)
2911 		return status;
2912 
2913 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2914 		struct ice_aqc_get_phy_caps_data *pcaps;
2915 		struct ice_hw *hw;
2916 
2917 		hw = pi->hw;
2918 		pcaps = (struct ice_aqc_get_phy_caps_data *)
2919 			ice_malloc(hw, sizeof(*pcaps));
2920 		if (!pcaps)
2921 			return ICE_ERR_NO_MEMORY;
2922 
2923 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2924 					     pcaps, NULL);
2925 
2926 		if (status == ICE_SUCCESS)
2927 			ice_memcpy(li->module_type, &pcaps->module_type,
2928 				   sizeof(li->module_type),
2929 				   ICE_NONDMA_TO_NONDMA);
2930 
2931 		ice_free(hw, pcaps);
2932 	}
2933 
2934 	return status;
2935 }
2936 
2937 /**
2938  * ice_cache_phy_user_req
2939  * @pi: port information structure
2940  * @cache_data: PHY logging data
2941  * @cache_mode: PHY logging mode
2942  *
2943  * Log the user request on (FC, FEC, SPEED) for later user.
2944  */
2945 static void
2946 ice_cache_phy_user_req(struct ice_port_info *pi,
2947 		       struct ice_phy_cache_mode_data cache_data,
2948 		       enum ice_phy_cache_mode cache_mode)
2949 {
2950 	if (!pi)
2951 		return;
2952 
2953 	switch (cache_mode) {
2954 	case ICE_FC_MODE:
2955 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2956 		break;
2957 	case ICE_SPEED_MODE:
2958 		pi->phy.curr_user_speed_req =
2959 			cache_data.data.curr_user_speed_req;
2960 		break;
2961 	case ICE_FEC_MODE:
2962 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2963 		break;
2964 	default:
2965 		break;
2966 	}
2967 }
2968 
2969 /**
2970  * ice_caps_to_fc_mode
2971  * @caps: PHY capabilities
2972  *
2973  * Convert PHY FC capabilities to ice FC mode
2974  */
2975 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2976 {
2977 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2978 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2979 		return ICE_FC_FULL;
2980 
2981 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2982 		return ICE_FC_TX_PAUSE;
2983 
2984 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2985 		return ICE_FC_RX_PAUSE;
2986 
2987 	return ICE_FC_NONE;
2988 }
2989 
2990 /**
2991  * ice_caps_to_fec_mode
2992  * @caps: PHY capabilities
2993  * @fec_options: Link FEC options
2994  *
2995  * Convert PHY FEC capabilities to ice FEC mode
2996  */
2997 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2998 {
2999 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3000 		return ICE_FEC_AUTO;
3001 
3002 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3003 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3004 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3005 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3006 		return ICE_FEC_BASER;
3007 
3008 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3009 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3010 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3011 		return ICE_FEC_RS;
3012 
3013 	return ICE_FEC_NONE;
3014 }
3015 
3016 /**
3017  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3018  * @pi: port information structure
3019  * @cfg: PHY configuration data to set FC mode
3020  * @req_mode: FC mode to configure
3021  */
3022 static enum ice_status
3023 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3024 	       enum ice_fc_mode req_mode)
3025 {
3026 	struct ice_phy_cache_mode_data cache_data;
3027 	u8 pause_mask = 0x0;
3028 
3029 	if (!pi || !cfg)
3030 		return ICE_ERR_BAD_PTR;
3031 	switch (req_mode) {
3032 	case ICE_FC_AUTO:
3033 	{
3034 		struct ice_aqc_get_phy_caps_data *pcaps;
3035 		enum ice_status status;
3036 
3037 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3038 			ice_malloc(pi->hw, sizeof(*pcaps));
3039 		if (!pcaps)
3040 			return ICE_ERR_NO_MEMORY;
3041 		/* Query the value of FC that both the NIC and attached media
3042 		 * can do.
3043 		 */
3044 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3045 					     pcaps, NULL);
3046 		if (status) {
3047 			ice_free(pi->hw, pcaps);
3048 			return status;
3049 		}
3050 
3051 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3052 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3053 
3054 		ice_free(pi->hw, pcaps);
3055 		break;
3056 	}
3057 	case ICE_FC_FULL:
3058 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3059 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3060 		break;
3061 	case ICE_FC_RX_PAUSE:
3062 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3063 		break;
3064 	case ICE_FC_TX_PAUSE:
3065 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3066 		break;
3067 	default:
3068 		break;
3069 	}
3070 
3071 	/* clear the old pause settings */
3072 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3073 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3074 
3075 	/* set the new capabilities */
3076 	cfg->caps |= pause_mask;
3077 
3078 	/* Cache user FC request */
3079 	cache_data.data.curr_user_fc_req = req_mode;
3080 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3081 
3082 	return ICE_SUCCESS;
3083 }
3084 
3085 /**
3086  * ice_set_fc
3087  * @pi: port information structure
3088  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3089  * @ena_auto_link_update: enable automatic link update
3090  *
3091  * Set the requested flow control mode.
3092  */
3093 enum ice_status
3094 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3095 {
3096 	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
3097 	struct ice_aqc_get_phy_caps_data *pcaps;
3098 	enum ice_status status;
3099 	struct ice_hw *hw;
3100 
3101 	if (!pi || !aq_failures)
3102 		return ICE_ERR_BAD_PTR;
3103 
3104 	*aq_failures = 0;
3105 	hw = pi->hw;
3106 
3107 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3108 		ice_malloc(hw, sizeof(*pcaps));
3109 	if (!pcaps)
3110 		return ICE_ERR_NO_MEMORY;
3111 
3112 	/* Get the current PHY config */
3113 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3114 				     pcaps, NULL);
3115 
3116 	if (status) {
3117 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3118 		goto out;
3119 	}
3120 
3121 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3122 
3123 	/* Configure the set PHY data */
3124 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3125 	if (status) {
3126 		if (status != ICE_ERR_BAD_PTR)
3127 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3128 
3129 		goto out;
3130 	}
3131 
3132 	/* If the capabilities have changed, then set the new config */
3133 	if (cfg.caps != pcaps->caps) {
3134 		int retry_count, retry_max = 10;
3135 
3136 		/* Auto restart link so settings take effect */
3137 		if (ena_auto_link_update)
3138 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3139 
3140 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3141 		if (status) {
3142 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3143 			goto out;
3144 		}
3145 
3146 		/* Update the link info
3147 		 * It sometimes takes a really long time for link to
3148 		 * come back from the atomic reset. Thus, we wait a
3149 		 * little bit.
3150 		 */
3151 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3152 			status = ice_update_link_info(pi);
3153 
3154 			if (status == ICE_SUCCESS)
3155 				break;
3156 
3157 			ice_msec_delay(100, true);
3158 		}
3159 
3160 		if (status)
3161 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3162 	}
3163 
3164 out:
3165 	ice_free(hw, pcaps);
3166 	return status;
3167 }
3168 
3169 /**
3170  * ice_phy_caps_equals_cfg
3171  * @phy_caps: PHY capabilities
3172  * @phy_cfg: PHY configuration
3173  *
3174  * Helper function to determine if PHY capabilities matches PHY
3175  * configuration
3176  */
3177 bool
3178 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3179 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3180 {
3181 	u8 caps_mask, cfg_mask;
3182 
3183 	if (!phy_caps || !phy_cfg)
3184 		return false;
3185 
3186 	/* These bits are not common between capabilities and configuration.
3187 	 * Do not use them to determine equality.
3188 	 */
3189 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3190 					      ICE_AQC_PHY_EN_MOD_QUAL);
3191 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3192 
3193 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3194 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3195 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3196 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3197 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3198 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3199 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3200 		return false;
3201 
3202 	return true;
3203 }
3204 
3205 /**
3206  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3207  * @pi: port information structure
3208  * @caps: PHY ability structure to copy date from
3209  * @cfg: PHY configuration structure to copy data to
3210  *
3211  * Helper function to copy AQC PHY get ability data to PHY set configuration
3212  * data structure
3213  */
3214 void
3215 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3216 			 struct ice_aqc_get_phy_caps_data *caps,
3217 			 struct ice_aqc_set_phy_cfg_data *cfg)
3218 {
3219 	if (!pi || !caps || !cfg)
3220 		return;
3221 
3222 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3223 	cfg->phy_type_low = caps->phy_type_low;
3224 	cfg->phy_type_high = caps->phy_type_high;
3225 	cfg->caps = caps->caps;
3226 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3227 	cfg->eee_cap = caps->eee_cap;
3228 	cfg->eeer_value = caps->eeer_value;
3229 	cfg->link_fec_opt = caps->link_fec_options;
3230 	cfg->module_compliance_enforcement =
3231 		caps->module_compliance_enforcement;
3232 }
3233 
3234 /**
3235  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3236  * @pi: port information structure
3237  * @cfg: PHY configuration data to set FEC mode
3238  * @fec: FEC mode to configure
3239  */
3240 enum ice_status
3241 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3242 		enum ice_fec_mode fec)
3243 {
3244 	struct ice_aqc_get_phy_caps_data *pcaps;
3245 	enum ice_status status = ICE_SUCCESS;
3246 	struct ice_hw *hw;
3247 
3248 	if (!pi || !cfg)
3249 		return ICE_ERR_BAD_PTR;
3250 
3251 	hw = pi->hw;
3252 
3253 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3254 		ice_malloc(hw, sizeof(*pcaps));
3255 	if (!pcaps)
3256 		return ICE_ERR_NO_MEMORY;
3257 
3258 	status = ice_aq_get_phy_caps(pi, false,
3259 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3260 				      ICE_AQC_REPORT_DFLT_CFG :
3261 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3262 
3263 	if (status)
3264 		goto out;
3265 
3266 	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3267 	cfg->link_fec_opt = pcaps->link_fec_options;
3268 
3269 	switch (fec) {
3270 	case ICE_FEC_BASER:
3271 		/* Clear RS bits, and AND BASE-R ability
3272 		 * bits and OR request bits.
3273 		 */
3274 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3275 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3276 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3277 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3278 		break;
3279 	case ICE_FEC_RS:
3280 		/* Clear BASE-R bits, and AND RS ability
3281 		 * bits and OR request bits.
3282 		 */
3283 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3284 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3285 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3286 		break;
3287 	case ICE_FEC_NONE:
3288 		/* Clear all FEC option bits. */
3289 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3290 		break;
3291 	case ICE_FEC_AUTO:
3292 		/* AND auto FEC bit, and all caps bits. */
3293 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3294 		cfg->link_fec_opt |= pcaps->link_fec_options;
3295 		break;
3296 	default:
3297 		status = ICE_ERR_PARAM;
3298 		break;
3299 	}
3300 
3301 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3302 	    !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3303 		struct ice_link_default_override_tlv tlv;
3304 
3305 		if (ice_get_link_default_override(&tlv, pi))
3306 			goto out;
3307 
3308 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3309 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3310 			cfg->link_fec_opt = tlv.fec_options;
3311 	}
3312 
3313 out:
3314 	ice_free(hw, pcaps);
3315 
3316 	return status;
3317 }
3318 
3319 /**
3320  * ice_get_link_status - get status of the HW network link
3321  * @pi: port information structure
3322  * @link_up: pointer to bool (true/false = linkup/linkdown)
3323  *
3324  * Variable link_up is true if link is up, false if link is down.
3325  * The variable link_up is invalid if status is non zero. As a
3326  * result of this call, link status reporting becomes enabled
3327  */
3328 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3329 {
3330 	struct ice_phy_info *phy_info;
3331 	enum ice_status status = ICE_SUCCESS;
3332 
3333 	if (!pi || !link_up)
3334 		return ICE_ERR_PARAM;
3335 
3336 	phy_info = &pi->phy;
3337 
3338 	if (phy_info->get_link_info) {
3339 		status = ice_update_link_info(pi);
3340 
3341 		if (status)
3342 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3343 				  status);
3344 	}
3345 
3346 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3347 
3348 	return status;
3349 }
3350 
3351 /**
3352  * ice_aq_set_link_restart_an
3353  * @pi: pointer to the port information structure
3354  * @ena_link: if true: enable link, if false: disable link
3355  * @cd: pointer to command details structure or NULL
3356  *
3357  * Sets up the link and restarts the Auto-Negotiation over the link.
3358  */
3359 enum ice_status
3360 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3361 			   struct ice_sq_cd *cd)
3362 {
3363 	struct ice_aqc_restart_an *cmd;
3364 	struct ice_aq_desc desc;
3365 
3366 	cmd = &desc.params.restart_an;
3367 
3368 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3369 
3370 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3371 	cmd->lport_num = pi->lport;
3372 	if (ena_link)
3373 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3374 	else
3375 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3376 
3377 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3378 }
3379 
3380 /**
3381  * ice_aq_set_event_mask
3382  * @hw: pointer to the HW struct
3383  * @port_num: port number of the physical function
3384  * @mask: event mask to be set
3385  * @cd: pointer to command details structure or NULL
3386  *
3387  * Set event mask (0x0613)
3388  */
3389 enum ice_status
3390 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3391 		      struct ice_sq_cd *cd)
3392 {
3393 	struct ice_aqc_set_event_mask *cmd;
3394 	struct ice_aq_desc desc;
3395 
3396 	cmd = &desc.params.set_event_mask;
3397 
3398 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3399 
3400 	cmd->lport_num = port_num;
3401 
3402 	cmd->event_mask = CPU_TO_LE16(mask);
3403 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3404 }
3405 
3406 /**
3407  * ice_aq_set_mac_loopback
3408  * @hw: pointer to the HW struct
3409  * @ena_lpbk: Enable or Disable loopback
3410  * @cd: pointer to command details structure or NULL
3411  *
3412  * Enable/disable loopback on a given port
3413  */
3414 enum ice_status
3415 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3416 {
3417 	struct ice_aqc_set_mac_lb *cmd;
3418 	struct ice_aq_desc desc;
3419 
3420 	cmd = &desc.params.set_mac_lb;
3421 
3422 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3423 	if (ena_lpbk)
3424 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3425 
3426 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3427 }
3428 
3429 /**
3430  * ice_aq_set_port_id_led
3431  * @pi: pointer to the port information
3432  * @is_orig_mode: is this LED set to original mode (by the net-list)
3433  * @cd: pointer to command details structure or NULL
3434  *
3435  * Set LED value for the given port (0x06e9)
3436  */
3437 enum ice_status
3438 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3439 		       struct ice_sq_cd *cd)
3440 {
3441 	struct ice_aqc_set_port_id_led *cmd;
3442 	struct ice_hw *hw = pi->hw;
3443 	struct ice_aq_desc desc;
3444 
3445 	cmd = &desc.params.set_port_id_led;
3446 
3447 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3448 
3449 	if (is_orig_mode)
3450 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3451 	else
3452 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3453 
3454 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3455 }
3456 
3457 /**
3458  * ice_aq_sff_eeprom
3459  * @hw: pointer to the HW struct
3460  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3461  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3462  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3463  * @page: QSFP page
3464  * @set_page: set or ignore the page
3465  * @data: pointer to data buffer to be read/written to the I2C device.
3466  * @length: 1-16 for read, 1 for write.
3467  * @write: 0 read, 1 for write.
3468  * @cd: pointer to command details structure or NULL
3469  *
3470  * Read/Write SFF EEPROM (0x06EE)
3471  */
3472 enum ice_status
3473 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3474 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3475 		  bool write, struct ice_sq_cd *cd)
3476 {
3477 	struct ice_aqc_sff_eeprom *cmd;
3478 	struct ice_aq_desc desc;
3479 	enum ice_status status;
3480 
3481 	if (!data || (mem_addr & 0xff00))
3482 		return ICE_ERR_PARAM;
3483 
3484 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3485 	cmd = &desc.params.read_write_sff_param;
3486 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3487 	cmd->lport_num = (u8)(lport & 0xff);
3488 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3489 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3490 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3491 					((set_page <<
3492 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3493 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3494 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3495 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3496 	if (write)
3497 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3498 
3499 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3500 	return status;
3501 }
3502 
3503 /**
3504  * __ice_aq_get_set_rss_lut
3505  * @hw: pointer to the hardware structure
3506  * @params: RSS LUT parameters
3507  * @set: set true to set the table, false to get the table
3508  *
3509  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3510  */
3511 static enum ice_status
3512 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3513 {
3514 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3515 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3516 	struct ice_aq_desc desc;
3517 	enum ice_status status;
3518 	u8 *lut;
3519 
3520 	if (!params)
3521 		return ICE_ERR_PARAM;
3522 
3523 	vsi_handle = params->vsi_handle;
3524 	lut = params->lut;
3525 
3526 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3527 		return ICE_ERR_PARAM;
3528 
3529 	lut_size = params->lut_size;
3530 	lut_type = params->lut_type;
3531 	glob_lut_idx = params->global_lut_id;
3532 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3533 
3534 	cmd_resp = &desc.params.get_set_rss_lut;
3535 
3536 	if (set) {
3537 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3538 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3539 	} else {
3540 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3541 	}
3542 
3543 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3544 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3545 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3546 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3547 
3548 	switch (lut_type) {
3549 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3550 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3551 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3552 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3553 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3554 		break;
3555 	default:
3556 		status = ICE_ERR_PARAM;
3557 		goto ice_aq_get_set_rss_lut_exit;
3558 	}
3559 
3560 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3561 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3562 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3563 
3564 		if (!set)
3565 			goto ice_aq_get_set_rss_lut_send;
3566 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3567 		if (!set)
3568 			goto ice_aq_get_set_rss_lut_send;
3569 	} else {
3570 		goto ice_aq_get_set_rss_lut_send;
3571 	}
3572 
3573 	/* LUT size is only valid for Global and PF table types */
3574 	switch (lut_size) {
3575 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3576 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3577 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3578 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3579 		break;
3580 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3581 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3582 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3583 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3584 		break;
3585 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3586 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3587 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3588 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3589 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3590 			break;
3591 		}
3592 		/* fall-through */
3593 	default:
3594 		status = ICE_ERR_PARAM;
3595 		goto ice_aq_get_set_rss_lut_exit;
3596 	}
3597 
3598 ice_aq_get_set_rss_lut_send:
3599 	cmd_resp->flags = CPU_TO_LE16(flags);
3600 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3601 
3602 ice_aq_get_set_rss_lut_exit:
3603 	return status;
3604 }
3605 
3606 /**
3607  * ice_aq_get_rss_lut
3608  * @hw: pointer to the hardware structure
3609  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3610  *
3611  * get the RSS lookup table, PF or VSI type
3612  */
3613 enum ice_status
3614 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3615 {
3616 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
3617 }
3618 
3619 /**
3620  * ice_aq_set_rss_lut
3621  * @hw: pointer to the hardware structure
3622  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3623  *
3624  * set the RSS lookup table, PF or VSI type
3625  */
3626 enum ice_status
3627 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3628 {
3629 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
3630 }
3631 
3632 /**
3633  * __ice_aq_get_set_rss_key
3634  * @hw: pointer to the HW struct
3635  * @vsi_id: VSI FW index
3636  * @key: pointer to key info struct
3637  * @set: set true to set the key, false to get the key
3638  *
3639  * get (0x0B04) or set (0x0B02) the RSS key per VSI
3640  */
3641 static enum
3642 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3643 				    struct ice_aqc_get_set_rss_keys *key,
3644 				    bool set)
3645 {
3646 	struct ice_aqc_get_set_rss_key *cmd_resp;
3647 	u16 key_size = sizeof(*key);
3648 	struct ice_aq_desc desc;
3649 
3650 	cmd_resp = &desc.params.get_set_rss_key;
3651 
3652 	if (set) {
3653 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3654 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3655 	} else {
3656 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3657 	}
3658 
3659 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3660 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3661 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3662 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3663 
3664 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3665 }
3666 
3667 /**
3668  * ice_aq_get_rss_key
3669  * @hw: pointer to the HW struct
3670  * @vsi_handle: software VSI handle
3671  * @key: pointer to key info struct
3672  *
3673  * get the RSS key per VSI
3674  */
3675 enum ice_status
3676 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3677 		   struct ice_aqc_get_set_rss_keys *key)
3678 {
3679 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3680 		return ICE_ERR_PARAM;
3681 
3682 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3683 					key, false);
3684 }
3685 
3686 /**
3687  * ice_aq_set_rss_key
3688  * @hw: pointer to the HW struct
3689  * @vsi_handle: software VSI handle
3690  * @keys: pointer to key info struct
3691  *
3692  * set the RSS key per VSI
3693  */
3694 enum ice_status
3695 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3696 		   struct ice_aqc_get_set_rss_keys *keys)
3697 {
3698 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3699 		return ICE_ERR_PARAM;
3700 
3701 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3702 					keys, true);
3703 }
3704 
3705 /**
3706  * ice_aq_add_lan_txq
3707  * @hw: pointer to the hardware structure
3708  * @num_qgrps: Number of added queue groups
3709  * @qg_list: list of queue groups to be added
3710  * @buf_size: size of buffer for indirect command
3711  * @cd: pointer to command details structure or NULL
3712  *
3713  * Add Tx LAN queue (0x0C30)
3714  *
3715  * NOTE:
3716  * Prior to calling add Tx LAN queue:
3717  * Initialize the following as part of the Tx queue context:
3718  * Completion queue ID if the queue uses Completion queue, Quanta profile,
3719  * Cache profile and Packet shaper profile.
3720  *
3721  * After add Tx LAN queue AQ command is completed:
3722  * Interrupts should be associated with specific queues,
3723  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3724  * flow.
3725  */
3726 enum ice_status
3727 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3728 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3729 		   struct ice_sq_cd *cd)
3730 {
3731 	struct ice_aqc_add_tx_qgrp *list;
3732 	struct ice_aqc_add_txqs *cmd;
3733 	struct ice_aq_desc desc;
3734 	u16 i, sum_size = 0;
3735 
3736 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3737 
3738 	cmd = &desc.params.add_txqs;
3739 
3740 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3741 
3742 	if (!qg_list)
3743 		return ICE_ERR_PARAM;
3744 
3745 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3746 		return ICE_ERR_PARAM;
3747 
3748 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3749 		sum_size += ice_struct_size(list, txqs, list->num_txqs);
3750 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3751 						      list->num_txqs);
3752 	}
3753 
3754 	if (buf_size != sum_size)
3755 		return ICE_ERR_PARAM;
3756 
3757 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3758 
3759 	cmd->num_qgrps = num_qgrps;
3760 
3761 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3762 }
3763 
3764 /**
3765  * ice_aq_dis_lan_txq
3766  * @hw: pointer to the hardware structure
3767  * @num_qgrps: number of groups in the list
3768  * @qg_list: the list of groups to disable
3769  * @buf_size: the total size of the qg_list buffer in bytes
3770  * @rst_src: if called due to reset, specifies the reset source
3771  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3772  * @cd: pointer to command details structure or NULL
3773  *
3774  * Disable LAN Tx queue (0x0C31)
3775  */
3776 static enum ice_status
3777 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3778 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3779 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3780 		   struct ice_sq_cd *cd)
3781 {
3782 	struct ice_aqc_dis_txq_item *item;
3783 	struct ice_aqc_dis_txqs *cmd;
3784 	struct ice_aq_desc desc;
3785 	enum ice_status status;
3786 	u16 i, sz = 0;
3787 
3788 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3789 	cmd = &desc.params.dis_txqs;
3790 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3791 
3792 	/* qg_list can be NULL only in VM/VF reset flow */
3793 	if (!qg_list && !rst_src)
3794 		return ICE_ERR_PARAM;
3795 
3796 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3797 		return ICE_ERR_PARAM;
3798 
3799 	cmd->num_entries = num_qgrps;
3800 
3801 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3802 					    ICE_AQC_Q_DIS_TIMEOUT_M);
3803 
3804 	switch (rst_src) {
3805 	case ICE_VM_RESET:
3806 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3807 		cmd->vmvf_and_timeout |=
3808 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3809 		break;
3810 	case ICE_VF_RESET:
3811 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3812 		/* In this case, FW expects vmvf_num to be absolute VF ID */
3813 		cmd->vmvf_and_timeout |=
3814 			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3815 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3816 		break;
3817 	case ICE_NO_RESET:
3818 	default:
3819 		break;
3820 	}
3821 
3822 	/* flush pipe on time out */
3823 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3824 	/* If no queue group info, we are in a reset flow. Issue the AQ */
3825 	if (!qg_list)
3826 		goto do_aq;
3827 
3828 	/* set RD bit to indicate that command buffer is provided by the driver
3829 	 * and it needs to be read by the firmware
3830 	 */
3831 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3832 
3833 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3834 		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3835 
3836 		/* If the num of queues is even, add 2 bytes of padding */
3837 		if ((item->num_qs % 2) == 0)
3838 			item_size += 2;
3839 
3840 		sz += item_size;
3841 
3842 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3843 	}
3844 
3845 	if (buf_size != sz)
3846 		return ICE_ERR_PARAM;
3847 
3848 do_aq:
3849 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3850 	if (status) {
3851 		if (!qg_list)
3852 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3853 				  vmvf_num, hw->adminq.sq_last_status);
3854 		else
3855 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3856 				  LE16_TO_CPU(qg_list[0].q_id[0]),
3857 				  hw->adminq.sq_last_status);
3858 	}
3859 	return status;
3860 }
3861 
3862 /**
3863  * ice_aq_move_recfg_lan_txq
3864  * @hw: pointer to the hardware structure
3865  * @num_qs: number of queues to move/reconfigure
3866  * @is_move: true if this operation involves node movement
3867  * @is_tc_change: true if this operation involves a TC change
3868  * @subseq_call: true if this operation is a subsequent call
3869  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3870  * @timeout: timeout in units of 100 usec (valid values 0-50)
3871  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3872  * @buf: struct containing src/dest TEID and per-queue info
3873  * @buf_size: size of buffer for indirect command
3874  * @txqs_moved: out param, number of queues successfully moved
3875  * @cd: pointer to command details structure or NULL
3876  *
3877  * Move / Reconfigure Tx LAN queues (0x0C32)
3878  */
3879 enum ice_status
3880 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3881 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
3882 			  u8 timeout, u32 *blocked_cgds,
3883 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3884 			  u8 *txqs_moved, struct ice_sq_cd *cd)
3885 {
3886 	struct ice_aqc_move_txqs *cmd;
3887 	struct ice_aq_desc desc;
3888 	enum ice_status status;
3889 
3890 	cmd = &desc.params.move_txqs;
3891 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3892 
3893 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3894 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3895 		return ICE_ERR_PARAM;
3896 
3897 	if (is_tc_change && !flush_pipe && !blocked_cgds)
3898 		return ICE_ERR_PARAM;
3899 
3900 	if (!is_move && !is_tc_change)
3901 		return ICE_ERR_PARAM;
3902 
3903 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3904 
3905 	if (is_move)
3906 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3907 
3908 	if (is_tc_change)
3909 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3910 
3911 	if (subseq_call)
3912 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3913 
3914 	if (flush_pipe)
3915 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3916 
3917 	cmd->num_qs = num_qs;
3918 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3919 			ICE_AQC_Q_CMD_TIMEOUT_M);
3920 
3921 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3922 
3923 	if (!status && txqs_moved)
3924 		*txqs_moved = cmd->num_qs;
3925 
3926 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3927 	    is_tc_change && !flush_pipe)
3928 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3929 
3930 	return status;
3931 }
3932 
3933 /* End of FW Admin Queue command wrappers */
3934 
3935 /**
3936  * ice_write_byte - write a byte to a packed context structure
3937  * @src_ctx:  the context structure to read from
3938  * @dest_ctx: the context to be written to
3939  * @ce_info:  a description of the struct to be filled
3940  */
3941 static void
3942 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3943 {
3944 	u8 src_byte, dest_byte, mask;
3945 	u8 *from, *dest;
3946 	u16 shift_width;
3947 
3948 	/* copy from the next struct field */
3949 	from = src_ctx + ce_info->offset;
3950 
3951 	/* prepare the bits and mask */
3952 	shift_width = ce_info->lsb % 8;
3953 	mask = (u8)(BIT(ce_info->width) - 1);
3954 
3955 	src_byte = *from;
3956 	src_byte &= mask;
3957 
3958 	/* shift to correct alignment */
3959 	mask <<= shift_width;
3960 	src_byte <<= shift_width;
3961 
3962 	/* get the current bits from the target bit string */
3963 	dest = dest_ctx + (ce_info->lsb / 8);
3964 
3965 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3966 
3967 	dest_byte &= ~mask;	/* get the bits not changing */
3968 	dest_byte |= src_byte;	/* add in the new bits */
3969 
3970 	/* put it all back */
3971 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3972 }
3973 
3974 /**
3975  * ice_write_word - write a word to a packed context structure
3976  * @src_ctx:  the context structure to read from
3977  * @dest_ctx: the context to be written to
3978  * @ce_info:  a description of the struct to be filled
3979  */
3980 static void
3981 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3982 {
3983 	u16 src_word, mask;
3984 	__le16 dest_word;
3985 	u8 *from, *dest;
3986 	u16 shift_width;
3987 
3988 	/* copy from the next struct field */
3989 	from = src_ctx + ce_info->offset;
3990 
3991 	/* prepare the bits and mask */
3992 	shift_width = ce_info->lsb % 8;
3993 	mask = BIT(ce_info->width) - 1;
3994 
3995 	/* don't swizzle the bits until after the mask because the mask bits
3996 	 * will be in a different bit position on big endian machines
3997 	 */
3998 	src_word = *(u16 *)from;
3999 	src_word &= mask;
4000 
4001 	/* shift to correct alignment */
4002 	mask <<= shift_width;
4003 	src_word <<= shift_width;
4004 
4005 	/* get the current bits from the target bit string */
4006 	dest = dest_ctx + (ce_info->lsb / 8);
4007 
4008 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4009 
4010 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
4011 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
4012 
4013 	/* put it all back */
4014 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4015 }
4016 
4017 /**
4018  * ice_write_dword - write a dword to a packed context structure
4019  * @src_ctx:  the context structure to read from
4020  * @dest_ctx: the context to be written to
4021  * @ce_info:  a description of the struct to be filled
4022  */
4023 static void
4024 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4025 {
4026 	u32 src_dword, mask;
4027 	__le32 dest_dword;
4028 	u8 *from, *dest;
4029 	u16 shift_width;
4030 
4031 	/* copy from the next struct field */
4032 	from = src_ctx + ce_info->offset;
4033 
4034 	/* prepare the bits and mask */
4035 	shift_width = ce_info->lsb % 8;
4036 
4037 	/* if the field width is exactly 32 on an x86 machine, then the shift
4038 	 * operation will not work because the SHL instructions count is masked
4039 	 * to 5 bits so the shift will do nothing
4040 	 */
4041 	if (ce_info->width < 32)
4042 		mask = BIT(ce_info->width) - 1;
4043 	else
4044 		mask = (u32)~0;
4045 
4046 	/* don't swizzle the bits until after the mask because the mask bits
4047 	 * will be in a different bit position on big endian machines
4048 	 */
4049 	src_dword = *(u32 *)from;
4050 	src_dword &= mask;
4051 
4052 	/* shift to correct alignment */
4053 	mask <<= shift_width;
4054 	src_dword <<= shift_width;
4055 
4056 	/* get the current bits from the target bit string */
4057 	dest = dest_ctx + (ce_info->lsb / 8);
4058 
4059 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4060 
4061 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
4062 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
4063 
4064 	/* put it all back */
4065 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4066 }
4067 
4068 /**
4069  * ice_write_qword - write a qword to a packed context structure
4070  * @src_ctx:  the context structure to read from
4071  * @dest_ctx: the context to be written to
4072  * @ce_info:  a description of the struct to be filled
4073  */
4074 static void
4075 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4076 {
4077 	u64 src_qword, mask;
4078 	__le64 dest_qword;
4079 	u8 *from, *dest;
4080 	u16 shift_width;
4081 
4082 	/* copy from the next struct field */
4083 	from = src_ctx + ce_info->offset;
4084 
4085 	/* prepare the bits and mask */
4086 	shift_width = ce_info->lsb % 8;
4087 
4088 	/* if the field width is exactly 64 on an x86 machine, then the shift
4089 	 * operation will not work because the SHL instructions count is masked
4090 	 * to 6 bits so the shift will do nothing
4091 	 */
4092 	if (ce_info->width < 64)
4093 		mask = BIT_ULL(ce_info->width) - 1;
4094 	else
4095 		mask = (u64)~0;
4096 
4097 	/* don't swizzle the bits until after the mask because the mask bits
4098 	 * will be in a different bit position on big endian machines
4099 	 */
4100 	src_qword = *(u64 *)from;
4101 	src_qword &= mask;
4102 
4103 	/* shift to correct alignment */
4104 	mask <<= shift_width;
4105 	src_qword <<= shift_width;
4106 
4107 	/* get the current bits from the target bit string */
4108 	dest = dest_ctx + (ce_info->lsb / 8);
4109 
4110 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4111 
4112 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4113 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4114 
4115 	/* put it all back */
4116 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4117 }
4118 
4119 /**
4120  * ice_set_ctx - set context bits in packed structure
4121  * @hw: pointer to the hardware structure
4122  * @src_ctx:  pointer to a generic non-packed context structure
4123  * @dest_ctx: pointer to memory for the packed structure
4124  * @ce_info:  a description of the structure to be transformed
4125  */
4126 enum ice_status
4127 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4128 	    const struct ice_ctx_ele *ce_info)
4129 {
4130 	int f;
4131 
4132 	for (f = 0; ce_info[f].width; f++) {
4133 		/* We have to deal with each element of the FW response
4134 		 * using the correct size so that we are correct regardless
4135 		 * of the endianness of the machine.
4136 		 */
4137 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4138 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4139 				  f, ce_info[f].width, ce_info[f].size_of);
4140 			continue;
4141 		}
4142 		switch (ce_info[f].size_of) {
4143 		case sizeof(u8):
4144 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4145 			break;
4146 		case sizeof(u16):
4147 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4148 			break;
4149 		case sizeof(u32):
4150 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4151 			break;
4152 		case sizeof(u64):
4153 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4154 			break;
4155 		default:
4156 			return ICE_ERR_INVAL_SIZE;
4157 		}
4158 	}
4159 
4160 	return ICE_SUCCESS;
4161 }
4162 
4163 /**
4164  * ice_read_byte - read context byte into struct
4165  * @src_ctx:  the context structure to read from
4166  * @dest_ctx: the context to be written to
4167  * @ce_info:  a description of the struct to be filled
4168  */
4169 static void
4170 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4171 {
4172 	u8 dest_byte, mask;
4173 	u8 *src, *target;
4174 	u16 shift_width;
4175 
4176 	/* prepare the bits and mask */
4177 	shift_width = ce_info->lsb % 8;
4178 	mask = (u8)(BIT(ce_info->width) - 1);
4179 
4180 	/* shift to correct alignment */
4181 	mask <<= shift_width;
4182 
4183 	/* get the current bits from the src bit string */
4184 	src = src_ctx + (ce_info->lsb / 8);
4185 
4186 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4187 
4188 	dest_byte &= ~(mask);
4189 
4190 	dest_byte >>= shift_width;
4191 
4192 	/* get the address from the struct field */
4193 	target = dest_ctx + ce_info->offset;
4194 
4195 	/* put it back in the struct */
4196 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4197 }
4198 
4199 /**
4200  * ice_read_word - read context word into struct
4201  * @src_ctx:  the context structure to read from
4202  * @dest_ctx: the context to be written to
4203  * @ce_info:  a description of the struct to be filled
4204  */
4205 static void
4206 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4207 {
4208 	u16 dest_word, mask;
4209 	u8 *src, *target;
4210 	__le16 src_word;
4211 	u16 shift_width;
4212 
4213 	/* prepare the bits and mask */
4214 	shift_width = ce_info->lsb % 8;
4215 	mask = BIT(ce_info->width) - 1;
4216 
4217 	/* shift to correct alignment */
4218 	mask <<= shift_width;
4219 
4220 	/* get the current bits from the src bit string */
4221 	src = src_ctx + (ce_info->lsb / 8);
4222 
4223 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4224 
4225 	/* the data in the memory is stored as little endian so mask it
4226 	 * correctly
4227 	 */
4228 	src_word &= ~(CPU_TO_LE16(mask));
4229 
4230 	/* get the data back into host order before shifting */
4231 	dest_word = LE16_TO_CPU(src_word);
4232 
4233 	dest_word >>= shift_width;
4234 
4235 	/* get the address from the struct field */
4236 	target = dest_ctx + ce_info->offset;
4237 
4238 	/* put it back in the struct */
4239 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4240 }
4241 
4242 /**
4243  * ice_read_dword - read context dword into struct
4244  * @src_ctx:  the context structure to read from
4245  * @dest_ctx: the context to be written to
4246  * @ce_info:  a description of the struct to be filled
4247  */
4248 static void
4249 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4250 {
4251 	u32 dest_dword, mask;
4252 	__le32 src_dword;
4253 	u8 *src, *target;
4254 	u16 shift_width;
4255 
4256 	/* prepare the bits and mask */
4257 	shift_width = ce_info->lsb % 8;
4258 
4259 	/* if the field width is exactly 32 on an x86 machine, then the shift
4260 	 * operation will not work because the SHL instructions count is masked
4261 	 * to 5 bits so the shift will do nothing
4262 	 */
4263 	if (ce_info->width < 32)
4264 		mask = BIT(ce_info->width) - 1;
4265 	else
4266 		mask = (u32)~0;
4267 
4268 	/* shift to correct alignment */
4269 	mask <<= shift_width;
4270 
4271 	/* get the current bits from the src bit string */
4272 	src = src_ctx + (ce_info->lsb / 8);
4273 
4274 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4275 
4276 	/* the data in the memory is stored as little endian so mask it
4277 	 * correctly
4278 	 */
4279 	src_dword &= ~(CPU_TO_LE32(mask));
4280 
4281 	/* get the data back into host order before shifting */
4282 	dest_dword = LE32_TO_CPU(src_dword);
4283 
4284 	dest_dword >>= shift_width;
4285 
4286 	/* get the address from the struct field */
4287 	target = dest_ctx + ce_info->offset;
4288 
4289 	/* put it back in the struct */
4290 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4291 }
4292 
4293 /**
4294  * ice_read_qword - read context qword into struct
4295  * @src_ctx:  the context structure to read from
4296  * @dest_ctx: the context to be written to
4297  * @ce_info:  a description of the struct to be filled
4298  */
4299 static void
4300 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4301 {
4302 	u64 dest_qword, mask;
4303 	__le64 src_qword;
4304 	u8 *src, *target;
4305 	u16 shift_width;
4306 
4307 	/* prepare the bits and mask */
4308 	shift_width = ce_info->lsb % 8;
4309 
4310 	/* if the field width is exactly 64 on an x86 machine, then the shift
4311 	 * operation will not work because the SHL instructions count is masked
4312 	 * to 6 bits so the shift will do nothing
4313 	 */
4314 	if (ce_info->width < 64)
4315 		mask = BIT_ULL(ce_info->width) - 1;
4316 	else
4317 		mask = (u64)~0;
4318 
4319 	/* shift to correct alignment */
4320 	mask <<= shift_width;
4321 
4322 	/* get the current bits from the src bit string */
4323 	src = src_ctx + (ce_info->lsb / 8);
4324 
4325 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4326 
4327 	/* the data in the memory is stored as little endian so mask it
4328 	 * correctly
4329 	 */
4330 	src_qword &= ~(CPU_TO_LE64(mask));
4331 
4332 	/* get the data back into host order before shifting */
4333 	dest_qword = LE64_TO_CPU(src_qword);
4334 
4335 	dest_qword >>= shift_width;
4336 
4337 	/* get the address from the struct field */
4338 	target = dest_ctx + ce_info->offset;
4339 
4340 	/* put it back in the struct */
4341 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4342 }
4343 
4344 /**
4345  * ice_get_ctx - extract context bits from a packed structure
4346  * @src_ctx:  pointer to a generic packed context structure
4347  * @dest_ctx: pointer to a generic non-packed context structure
4348  * @ce_info:  a description of the structure to be read from
4349  */
4350 enum ice_status
4351 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4352 {
4353 	int f;
4354 
4355 	for (f = 0; ce_info[f].width; f++) {
4356 		switch (ce_info[f].size_of) {
4357 		case 1:
4358 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4359 			break;
4360 		case 2:
4361 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4362 			break;
4363 		case 4:
4364 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4365 			break;
4366 		case 8:
4367 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4368 			break;
4369 		default:
4370 			/* nothing to do, just keep going */
4371 			break;
4372 		}
4373 	}
4374 
4375 	return ICE_SUCCESS;
4376 }
4377 
4378 /**
4379  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4380  * @hw: pointer to the HW struct
4381  * @vsi_handle: software VSI handle
4382  * @tc: TC number
4383  * @q_handle: software queue handle
4384  */
4385 struct ice_q_ctx *
4386 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4387 {
4388 	struct ice_vsi_ctx *vsi;
4389 	struct ice_q_ctx *q_ctx;
4390 
4391 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4392 	if (!vsi)
4393 		return NULL;
4394 	if (q_handle >= vsi->num_lan_q_entries[tc])
4395 		return NULL;
4396 	if (!vsi->lan_q_ctx[tc])
4397 		return NULL;
4398 	q_ctx = vsi->lan_q_ctx[tc];
4399 	return &q_ctx[q_handle];
4400 }
4401 
4402 /**
4403  * ice_ena_vsi_txq
4404  * @pi: port information structure
4405  * @vsi_handle: software VSI handle
4406  * @tc: TC number
4407  * @q_handle: software queue handle
4408  * @num_qgrps: Number of added queue groups
4409  * @buf: list of queue groups to be added
4410  * @buf_size: size of buffer for indirect command
4411  * @cd: pointer to command details structure or NULL
4412  *
4413  * This function adds one LAN queue
4414  */
4415 enum ice_status
4416 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4417 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4418 		struct ice_sq_cd *cd)
4419 {
4420 	struct ice_aqc_txsched_elem_data node = { 0 };
4421 	struct ice_sched_node *parent;
4422 	struct ice_q_ctx *q_ctx;
4423 	enum ice_status status;
4424 	struct ice_hw *hw;
4425 
4426 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4427 		return ICE_ERR_CFG;
4428 
4429 	if (num_qgrps > 1 || buf->num_txqs > 1)
4430 		return ICE_ERR_MAX_LIMIT;
4431 
4432 	hw = pi->hw;
4433 
4434 	if (!ice_is_vsi_valid(hw, vsi_handle))
4435 		return ICE_ERR_PARAM;
4436 
4437 	ice_acquire_lock(&pi->sched_lock);
4438 
4439 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4440 	if (!q_ctx) {
4441 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4442 			  q_handle);
4443 		status = ICE_ERR_PARAM;
4444 		goto ena_txq_exit;
4445 	}
4446 
4447 	/* find a parent node */
4448 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4449 					    ICE_SCHED_NODE_OWNER_LAN);
4450 	if (!parent) {
4451 		status = ICE_ERR_PARAM;
4452 		goto ena_txq_exit;
4453 	}
4454 
4455 	buf->parent_teid = parent->info.node_teid;
4456 	node.parent_teid = parent->info.node_teid;
4457 	/* Mark that the values in the "generic" section as valid. The default
4458 	 * value in the "generic" section is zero. This means that :
4459 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4460 	 * - 0 priority among siblings, indicated by Bit 1-3.
4461 	 * - WFQ, indicated by Bit 4.
4462 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4463 	 * Bit 5-6.
4464 	 * - Bit 7 is reserved.
4465 	 * Without setting the generic section as valid in valid_sections, the
4466 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4467 	 */
4468 	buf->txqs[0].info.valid_sections =
4469 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4470 		ICE_AQC_ELEM_VALID_EIR;
4471 	buf->txqs[0].info.generic = 0;
4472 	buf->txqs[0].info.cir_bw.bw_profile_idx =
4473 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4474 	buf->txqs[0].info.cir_bw.bw_alloc =
4475 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4476 	buf->txqs[0].info.eir_bw.bw_profile_idx =
4477 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4478 	buf->txqs[0].info.eir_bw.bw_alloc =
4479 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4480 
4481 	/* add the LAN queue */
4482 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4483 	if (status != ICE_SUCCESS) {
4484 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4485 			  LE16_TO_CPU(buf->txqs[0].txq_id),
4486 			  hw->adminq.sq_last_status);
4487 		goto ena_txq_exit;
4488 	}
4489 
4490 	node.node_teid = buf->txqs[0].q_teid;
4491 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4492 	q_ctx->q_handle = q_handle;
4493 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4494 
4495 	/* add a leaf node into scheduler tree queue layer */
4496 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4497 	if (!status)
4498 		status = ice_sched_replay_q_bw(pi, q_ctx);
4499 
4500 ena_txq_exit:
4501 	ice_release_lock(&pi->sched_lock);
4502 	return status;
4503 }
4504 
4505 /**
4506  * ice_dis_vsi_txq
4507  * @pi: port information structure
4508  * @vsi_handle: software VSI handle
4509  * @tc: TC number
4510  * @num_queues: number of queues
4511  * @q_handles: pointer to software queue handle array
4512  * @q_ids: pointer to the q_id array
4513  * @q_teids: pointer to queue node teids
4514  * @rst_src: if called due to reset, specifies the reset source
4515  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4516  * @cd: pointer to command details structure or NULL
4517  *
4518  * This function removes queues and their corresponding nodes in SW DB
4519  */
4520 enum ice_status
4521 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4522 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
4523 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
4524 		struct ice_sq_cd *cd)
4525 {
4526 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4527 	struct ice_aqc_dis_txq_item *qg_list;
4528 	struct ice_q_ctx *q_ctx;
4529 	struct ice_hw *hw;
4530 	u16 i, buf_size;
4531 
4532 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4533 		return ICE_ERR_CFG;
4534 
4535 	hw = pi->hw;
4536 
4537 	if (!num_queues) {
4538 		/* if queue is disabled already yet the disable queue command
4539 		 * has to be sent to complete the VF reset, then call
4540 		 * ice_aq_dis_lan_txq without any queue information
4541 		 */
4542 		if (rst_src)
4543 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4544 						  vmvf_num, NULL);
4545 		return ICE_ERR_CFG;
4546 	}
4547 
4548 	buf_size = ice_struct_size(qg_list, q_id, 1);
4549 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4550 	if (!qg_list)
4551 		return ICE_ERR_NO_MEMORY;
4552 
4553 	ice_acquire_lock(&pi->sched_lock);
4554 
4555 	for (i = 0; i < num_queues; i++) {
4556 		struct ice_sched_node *node;
4557 
4558 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4559 		if (!node)
4560 			continue;
4561 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4562 		if (!q_ctx) {
4563 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4564 				  q_handles[i]);
4565 			continue;
4566 		}
4567 		if (q_ctx->q_handle != q_handles[i]) {
4568 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4569 				  q_ctx->q_handle, q_handles[i]);
4570 			continue;
4571 		}
4572 		qg_list->parent_teid = node->info.parent_teid;
4573 		qg_list->num_qs = 1;
4574 		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4575 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4576 					    vmvf_num, cd);
4577 
4578 		if (status != ICE_SUCCESS)
4579 			break;
4580 		ice_free_sched_node(pi, node);
4581 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4582 	}
4583 	ice_release_lock(&pi->sched_lock);
4584 	ice_free(hw, qg_list);
4585 	return status;
4586 }
4587 
4588 /**
4589  * ice_cfg_vsi_qs - configure the new/existing VSI queues
4590  * @pi: port information structure
4591  * @vsi_handle: software VSI handle
4592  * @tc_bitmap: TC bitmap
4593  * @maxqs: max queues array per TC
4594  * @owner: LAN or RDMA
4595  *
4596  * This function adds/updates the VSI queues per TC.
4597  */
4598 static enum ice_status
4599 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4600 	       u16 *maxqs, u8 owner)
4601 {
4602 	enum ice_status status = ICE_SUCCESS;
4603 	u8 i;
4604 
4605 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4606 		return ICE_ERR_CFG;
4607 
4608 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4609 		return ICE_ERR_PARAM;
4610 
4611 	ice_acquire_lock(&pi->sched_lock);
4612 
4613 	ice_for_each_traffic_class(i) {
4614 		/* configuration is possible only if TC node is present */
4615 		if (!ice_sched_get_tc_node(pi, i))
4616 			continue;
4617 
4618 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4619 					   ice_is_tc_ena(tc_bitmap, i));
4620 		if (status)
4621 			break;
4622 	}
4623 
4624 	ice_release_lock(&pi->sched_lock);
4625 	return status;
4626 }
4627 
4628 /**
4629  * ice_cfg_vsi_lan - configure VSI LAN queues
4630  * @pi: port information structure
4631  * @vsi_handle: software VSI handle
4632  * @tc_bitmap: TC bitmap
4633  * @max_lanqs: max LAN queues array per TC
4634  *
4635  * This function adds/updates the VSI LAN queues per TC.
4636  */
4637 enum ice_status
4638 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4639 		u16 *max_lanqs)
4640 {
4641 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4642 			      ICE_SCHED_NODE_OWNER_LAN);
4643 }
4644 
4645 /**
4646  * ice_is_main_vsi - checks whether the VSI is main VSI
4647  * @hw: pointer to the HW struct
4648  * @vsi_handle: VSI handle
4649  *
4650  * Checks whether the VSI is the main VSI (the first PF VSI created on
4651  * given PF).
4652  */
4653 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4654 {
4655 	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4656 }
4657 
4658 /**
4659  * ice_replay_pre_init - replay pre initialization
4660  * @hw: pointer to the HW struct
4661  * @sw: pointer to switch info struct for which function initializes filters
4662  *
4663  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4664  */
4665 static enum ice_status
4666 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4667 {
4668 	enum ice_status status;
4669 	u8 i;
4670 
4671 	/* Delete old entries from replay filter list head if there is any */
4672 	ice_rm_sw_replay_rule_info(hw, sw);
4673 	/* In start of replay, move entries into replay_rules list, it
4674 	 * will allow adding rules entries back to filt_rules list,
4675 	 * which is operational list.
4676 	 */
4677 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4678 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4679 				  &sw->recp_list[i].filt_replay_rules);
4680 	ice_sched_replay_agg_vsi_preinit(hw);
4681 
4682 	status = ice_sched_replay_root_node_bw(hw->port_info);
4683 	if (status)
4684 		return status;
4685 
4686 	return ice_sched_replay_tc_node_bw(hw->port_info);
4687 }
4688 
4689 /**
4690  * ice_replay_vsi - replay VSI configuration
4691  * @hw: pointer to the HW struct
4692  * @vsi_handle: driver VSI handle
4693  *
4694  * Restore all VSI configuration after reset. It is required to call this
4695  * function with main VSI first.
4696  */
4697 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4698 {
4699 	struct ice_switch_info *sw = hw->switch_info;
4700 	struct ice_port_info *pi = hw->port_info;
4701 	enum ice_status status;
4702 
4703 	if (!ice_is_vsi_valid(hw, vsi_handle))
4704 		return ICE_ERR_PARAM;
4705 
4706 	/* Replay pre-initialization if there is any */
4707 	if (ice_is_main_vsi(hw, vsi_handle)) {
4708 		status = ice_replay_pre_init(hw, sw);
4709 		if (status)
4710 			return status;
4711 	}
4712 	/* Replay per VSI all RSS configurations */
4713 	status = ice_replay_rss_cfg(hw, vsi_handle);
4714 	if (status)
4715 		return status;
4716 	/* Replay per VSI all filters */
4717 	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4718 	if (!status)
4719 		status = ice_replay_vsi_agg(hw, vsi_handle);
4720 	return status;
4721 }
4722 
4723 /**
4724  * ice_replay_post - post replay configuration cleanup
4725  * @hw: pointer to the HW struct
4726  *
4727  * Post replay cleanup.
4728  */
4729 void ice_replay_post(struct ice_hw *hw)
4730 {
4731 	/* Delete old entries from replay filter list head */
4732 	ice_rm_all_sw_replay_rule_info(hw);
4733 	ice_sched_replay_agg(hw);
4734 }
4735 
4736 /**
4737  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4738  * @hw: ptr to the hardware info
4739  * @reg: offset of 64 bit HW register to read from
4740  * @prev_stat_loaded: bool to specify if previous stats are loaded
4741  * @prev_stat: ptr to previous loaded stat value
4742  * @cur_stat: ptr to current stat value
4743  */
4744 void
4745 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4746 		  u64 *prev_stat, u64 *cur_stat)
4747 {
4748 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4749 
4750 	/* device stats are not reset at PFR, they likely will not be zeroed
4751 	 * when the driver starts. Thus, save the value from the first read
4752 	 * without adding to the statistic value so that we report stats which
4753 	 * count up from zero.
4754 	 */
4755 	if (!prev_stat_loaded) {
4756 		*prev_stat = new_data;
4757 		return;
4758 	}
4759 
4760 	/* Calculate the difference between the new and old values, and then
4761 	 * add it to the software stat value.
4762 	 */
4763 	if (new_data >= *prev_stat)
4764 		*cur_stat += new_data - *prev_stat;
4765 	else
4766 		/* to manage the potential roll-over */
4767 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4768 
4769 	/* Update the previously stored value to prepare for next read */
4770 	*prev_stat = new_data;
4771 }
4772 
4773 /**
4774  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4775  * @hw: ptr to the hardware info
4776  * @reg: offset of HW register to read from
4777  * @prev_stat_loaded: bool to specify if previous stats are loaded
4778  * @prev_stat: ptr to previous loaded stat value
4779  * @cur_stat: ptr to current stat value
4780  */
4781 void
4782 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4783 		  u64 *prev_stat, u64 *cur_stat)
4784 {
4785 	u32 new_data;
4786 
4787 	new_data = rd32(hw, reg);
4788 
4789 	/* device stats are not reset at PFR, they likely will not be zeroed
4790 	 * when the driver starts. Thus, save the value from the first read
4791 	 * without adding to the statistic value so that we report stats which
4792 	 * count up from zero.
4793 	 */
4794 	if (!prev_stat_loaded) {
4795 		*prev_stat = new_data;
4796 		return;
4797 	}
4798 
4799 	/* Calculate the difference between the new and old values, and then
4800 	 * add it to the software stat value.
4801 	 */
4802 	if (new_data >= *prev_stat)
4803 		*cur_stat += new_data - *prev_stat;
4804 	else
4805 		/* to manage the potential roll-over */
4806 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4807 
4808 	/* Update the previously stored value to prepare for next read */
4809 	*prev_stat = new_data;
4810 }
4811 
4812 /**
4813  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4814  * @hw: ptr to the hardware info
4815  * @vsi_handle: VSI handle
4816  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4817  * @cur_stats: ptr to current stats structure
4818  *
4819  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4820  * thus cannot be read using the normal ice_stat_update32 function.
4821  *
4822  * Read the GLV_REPC register associated with the given VSI, and update the
4823  * rx_no_desc and rx_error values in the ice_eth_stats structure.
4824  *
4825  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4826  * cleared each time it's read.
4827  *
4828  * Note that the GLV_RDPC register also counts the causes that would trigger
4829  * GLV_REPC. However, it does not give the finer grained detail about why the
4830  * packets are being dropped. The GLV_REPC values can be used to distinguish
4831  * whether Rx packets are dropped due to errors or due to no available
4832  * descriptors.
4833  */
4834 void
4835 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4836 		     struct ice_eth_stats *cur_stats)
4837 {
4838 	u16 vsi_num, no_desc, error_cnt;
4839 	u32 repc;
4840 
4841 	if (!ice_is_vsi_valid(hw, vsi_handle))
4842 		return;
4843 
4844 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4845 
4846 	/* If we haven't loaded stats yet, just clear the current value */
4847 	if (!prev_stat_loaded) {
4848 		wr32(hw, GLV_REPC(vsi_num), 0);
4849 		return;
4850 	}
4851 
4852 	repc = rd32(hw, GLV_REPC(vsi_num));
4853 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4854 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4855 
4856 	/* Clear the count by writing to the stats register */
4857 	wr32(hw, GLV_REPC(vsi_num), 0);
4858 
4859 	cur_stats->rx_no_desc += no_desc;
4860 	cur_stats->rx_errors += error_cnt;
4861 }
4862 
4863 /**
4864  * ice_aq_alternate_write
4865  * @hw: pointer to the hardware structure
4866  * @reg_addr0: address of first dword to be written
4867  * @reg_val0: value to be written under 'reg_addr0'
4868  * @reg_addr1: address of second dword to be written
4869  * @reg_val1: value to be written under 'reg_addr1'
4870  *
4871  * Write one or two dwords to alternate structure. Fields are indicated
4872  * by 'reg_addr0' and 'reg_addr1' register numbers.
4873  */
4874 enum ice_status
4875 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4876 		       u32 reg_addr1, u32 reg_val1)
4877 {
4878 	struct ice_aqc_read_write_alt_direct *cmd;
4879 	struct ice_aq_desc desc;
4880 	enum ice_status status;
4881 
4882 	cmd = &desc.params.read_write_alt_direct;
4883 
4884 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4885 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4886 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4887 	cmd->dword0_value = CPU_TO_LE32(reg_val0);
4888 	cmd->dword1_value = CPU_TO_LE32(reg_val1);
4889 
4890 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4891 
4892 	return status;
4893 }
4894 
4895 /**
4896  * ice_aq_alternate_read
4897  * @hw: pointer to the hardware structure
4898  * @reg_addr0: address of first dword to be read
4899  * @reg_val0: pointer for data read from 'reg_addr0'
4900  * @reg_addr1: address of second dword to be read
4901  * @reg_val1: pointer for data read from 'reg_addr1'
4902  *
4903  * Read one or two dwords from alternate structure. Fields are indicated
4904  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4905  * is not passed then only register at 'reg_addr0' is read.
4906  */
4907 enum ice_status
4908 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4909 		      u32 reg_addr1, u32 *reg_val1)
4910 {
4911 	struct ice_aqc_read_write_alt_direct *cmd;
4912 	struct ice_aq_desc desc;
4913 	enum ice_status status;
4914 
4915 	cmd = &desc.params.read_write_alt_direct;
4916 
4917 	if (!reg_val0)
4918 		return ICE_ERR_PARAM;
4919 
4920 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4921 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4922 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4923 
4924 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4925 
4926 	if (status == ICE_SUCCESS) {
4927 		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4928 
4929 		if (reg_val1)
4930 			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4931 	}
4932 
4933 	return status;
4934 }
4935 
4936 /**
4937  *  ice_aq_alternate_write_done
4938  *  @hw: pointer to the HW structure.
4939  *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4940  *  @reset_needed: indicates the SW should trigger GLOBAL reset
4941  *
4942  *  Indicates to the FW that alternate structures have been changed.
4943  */
4944 enum ice_status
4945 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4946 {
4947 	struct ice_aqc_done_alt_write *cmd;
4948 	struct ice_aq_desc desc;
4949 	enum ice_status status;
4950 
4951 	cmd = &desc.params.done_alt_write;
4952 
4953 	if (!reset_needed)
4954 		return ICE_ERR_PARAM;
4955 
4956 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4957 	cmd->flags = bios_mode;
4958 
4959 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4960 	if (!status)
4961 		*reset_needed = (LE16_TO_CPU(cmd->flags) &
4962 				 ICE_AQC_RESP_RESET_NEEDED) != 0;
4963 
4964 	return status;
4965 }
4966 
4967 /**
4968  *  ice_aq_alternate_clear
4969  *  @hw: pointer to the HW structure.
4970  *
4971  *  Clear the alternate structures of the port from which the function
4972  *  is called.
4973  */
4974 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4975 {
4976 	struct ice_aq_desc desc;
4977 	enum ice_status status;
4978 
4979 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4980 
4981 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4982 
4983 	return status;
4984 }
4985 
4986 /**
4987  * ice_sched_query_elem - query element information from HW
4988  * @hw: pointer to the HW struct
4989  * @node_teid: node TEID to be queried
4990  * @buf: buffer to element information
4991  *
4992  * This function queries HW element information
4993  */
4994 enum ice_status
4995 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4996 		     struct ice_aqc_txsched_elem_data *buf)
4997 {
4998 	u16 buf_size, num_elem_ret = 0;
4999 	enum ice_status status;
5000 
5001 	buf_size = sizeof(*buf);
5002 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5003 	buf->node_teid = CPU_TO_LE32(node_teid);
5004 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5005 					  NULL);
5006 	if (status != ICE_SUCCESS || num_elem_ret != 1)
5007 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5008 	return status;
5009 }
5010 
5011 /**
5012  * ice_get_fw_mode - returns FW mode
5013  * @hw: pointer to the HW struct
5014  */
5015 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5016 {
5017 #define ICE_FW_MODE_DBG_M BIT(0)
5018 #define ICE_FW_MODE_REC_M BIT(1)
5019 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5020 	u32 fw_mode;
5021 
5022 	/* check the current FW mode */
5023 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5024 
5025 	if (fw_mode & ICE_FW_MODE_DBG_M)
5026 		return ICE_FW_MODE_DBG;
5027 	else if (fw_mode & ICE_FW_MODE_REC_M)
5028 		return ICE_FW_MODE_REC;
5029 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5030 		return ICE_FW_MODE_ROLLBACK;
5031 	else
5032 		return ICE_FW_MODE_NORMAL;
5033 }
5034 
5035 /**
5036  * ice_cfg_get_cur_lldp_persist_status
5037  * @hw: pointer to the HW struct
5038  * @lldp_status: return value of LLDP persistent status
5039  *
5040  * Get the current status of LLDP persistent
5041  */
5042 enum ice_status
5043 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5044 {
5045 	struct ice_port_info *pi = hw->port_info;
5046 	enum ice_status ret;
5047 	__le32 raw_data;
5048 	u32 data, mask;
5049 
5050 	if (!lldp_status)
5051 		return ICE_ERR_BAD_PTR;
5052 
5053 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5054 	if (ret)
5055 		return ret;
5056 
5057 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5058 			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5059 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5060 			      false, true, NULL);
5061 	if (!ret) {
5062 		data = LE32_TO_CPU(raw_data);
5063 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5064 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5065 		data = data & mask;
5066 		*lldp_status = data >>
5067 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5068 	}
5069 
5070 	ice_release_nvm(hw);
5071 
5072 	return ret;
5073 }
5074 
5075 /**
5076  * ice_get_dflt_lldp_persist_status
5077  * @hw: pointer to the HW struct
5078  * @lldp_status: return value of LLDP persistent status
5079  *
5080  * Get the default status of LLDP persistent
5081  */
5082 enum ice_status
5083 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5084 {
5085 	struct ice_port_info *pi = hw->port_info;
5086 	u32 data, mask, loc_data, loc_data_tmp;
5087 	enum ice_status ret;
5088 	__le16 loc_raw_data;
5089 	__le32 raw_data;
5090 
5091 	if (!lldp_status)
5092 		return ICE_ERR_BAD_PTR;
5093 
5094 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5095 	if (ret)
5096 		return ret;
5097 
5098 	/* Read the offset of EMP_SR_PTR */
5099 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5100 			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5101 			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5102 			      &loc_raw_data, false, true, NULL);
5103 	if (ret)
5104 		goto exit;
5105 
5106 	loc_data = LE16_TO_CPU(loc_raw_data);
5107 	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5108 		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5109 		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5110 	} else {
5111 		loc_data *= ICE_AQC_NVM_WORD_UNIT;
5112 	}
5113 
5114 	/* Read the offset of LLDP configuration pointer */
5115 	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5116 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5117 			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5118 			      false, true, NULL);
5119 	if (ret)
5120 		goto exit;
5121 
5122 	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5123 	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5124 	loc_data += loc_data_tmp;
5125 
5126 	/* We need to skip LLDP configuration section length (2 bytes) */
5127 	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5128 
5129 	/* Read the LLDP Default Configure */
5130 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5131 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5132 			      true, NULL);
5133 	if (!ret) {
5134 		data = LE32_TO_CPU(raw_data);
5135 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5136 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5137 		data = data & mask;
5138 		*lldp_status = data >>
5139 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5140 	}
5141 
5142 exit:
5143 	ice_release_nvm(hw);
5144 
5145 	return ret;
5146 }
5147 
5148 /**
5149  * ice_fw_supports_link_override
5150  * @hw: pointer to the hardware structure
5151  *
5152  * Checks if the firmware supports link override
5153  */
5154 bool ice_fw_supports_link_override(struct ice_hw *hw)
5155 {
5156 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5157 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5158 			return true;
5159 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5160 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5161 			return true;
5162 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5163 		return true;
5164 	}
5165 
5166 	return false;
5167 }
5168 
5169 /**
5170  * ice_get_link_default_override
5171  * @ldo: pointer to the link default override struct
5172  * @pi: pointer to the port info struct
5173  *
5174  * Gets the link default override for a port
5175  */
5176 enum ice_status
5177 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5178 			      struct ice_port_info *pi)
5179 {
5180 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
5181 	struct ice_hw *hw = pi->hw;
5182 	enum ice_status status;
5183 
5184 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5185 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5186 	if (status) {
5187 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5188 		return status;
5189 	}
5190 
5191 	/* Each port has its own config; calculate for our port */
5192 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5193 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5194 
5195 	/* link options first */
5196 	status = ice_read_sr_word(hw, tlv_start, &buf);
5197 	if (status) {
5198 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5199 		return status;
5200 	}
5201 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5202 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5203 		ICE_LINK_OVERRIDE_PHY_CFG_S;
5204 
5205 	/* link PHY config */
5206 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5207 	status = ice_read_sr_word(hw, offset, &buf);
5208 	if (status) {
5209 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5210 		return status;
5211 	}
5212 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5213 
5214 	/* PHY types low */
5215 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5216 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5217 		status = ice_read_sr_word(hw, (offset + i), &buf);
5218 		if (status) {
5219 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5220 			return status;
5221 		}
5222 		/* shift 16 bits at a time to fill 64 bits */
5223 		ldo->phy_type_low |= ((u64)buf << (i * 16));
5224 	}
5225 
5226 	/* PHY types high */
5227 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5228 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5229 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5230 		status = ice_read_sr_word(hw, (offset + i), &buf);
5231 		if (status) {
5232 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5233 			return status;
5234 		}
5235 		/* shift 16 bits at a time to fill 64 bits */
5236 		ldo->phy_type_high |= ((u64)buf << (i * 16));
5237 	}
5238 
5239 	return status;
5240 }
5241 
5242 /**
5243  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5244  * @caps: get PHY capability data
5245  */
5246 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5247 {
5248 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5249 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5250 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
5251 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
5252 		return true;
5253 
5254 	return false;
5255 }
5256 
5257 /**
5258  * ice_is_fw_health_report_supported
5259  * @hw: pointer to the hardware structure
5260  *
5261  * Return true if firmware supports health status reports,
5262  * false otherwise
5263  */
5264 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
5265 {
5266 	if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
5267 		return true;
5268 
5269 	if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
5270 		if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
5271 			return true;
5272 		if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
5273 		    hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
5274 			return true;
5275 	}
5276 
5277 	return false;
5278 }
5279 
5280 /**
5281  * ice_aq_set_health_status_config - Configure FW health events
5282  * @hw: pointer to the HW struct
5283  * @event_source: type of diagnostic events to enable
5284  * @cd: pointer to command details structure or NULL
5285  *
5286  * Configure the health status event types that the firmware will send to this
5287  * PF. The supported event types are: PF-specific, all PFs, and global
5288  */
5289 enum ice_status
5290 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
5291 				struct ice_sq_cd *cd)
5292 {
5293 	struct ice_aqc_set_health_status_config *cmd;
5294 	struct ice_aq_desc desc;
5295 
5296 	cmd = &desc.params.set_health_status_config;
5297 
5298 	ice_fill_dflt_direct_cmd_desc(&desc,
5299 				      ice_aqc_opc_set_health_status_config);
5300 
5301 	cmd->event_source = event_source;
5302 
5303 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5304 }
5305 
5306 /**
5307  * ice_aq_get_port_options
5308  * @hw: pointer to the hw struct
5309  * @options: buffer for the resultant port options
5310  * @option_count: input - size of the buffer in port options structures,
5311  *                output - number of returned port options
5312  * @lport: logical port to call the command with (optional)
5313  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
5314  *               when PF owns more than 1 port it must be true
5315  * @active_option_idx: index of active port option in returned buffer
5316  * @active_option_valid: active option in returned buffer is valid
5317  *
5318  * Calls Get Port Options AQC (0x06ea) and verifies result.
5319  */
5320 enum ice_status
5321 ice_aq_get_port_options(struct ice_hw *hw,
5322 			struct ice_aqc_get_port_options_elem *options,
5323 			u8 *option_count, u8 lport, bool lport_valid,
5324 			u8 *active_option_idx, bool *active_option_valid)
5325 {
5326 	struct ice_aqc_get_port_options *cmd;
5327 	struct ice_aq_desc desc;
5328 	enum ice_status status;
5329 	u8 pmd_count;
5330 	u8 max_speed;
5331 	u8 i;
5332 
5333 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5334 
5335 	/* options buffer shall be able to hold max returned options */
5336 	if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
5337 		return ICE_ERR_PARAM;
5338 
5339 	cmd = &desc.params.get_port_options;
5340 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
5341 
5342 	if (lport_valid)
5343 		cmd->lport_num = lport;
5344 	cmd->lport_num_valid = lport_valid;
5345 
5346 	status = ice_aq_send_cmd(hw, &desc, options,
5347 				 *option_count * sizeof(*options), NULL);
5348 	if (status != ICE_SUCCESS)
5349 		return status;
5350 
5351 	/* verify direct FW response & set output parameters */
5352 	*option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
5353 	ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
5354 	*active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
5355 	if (*active_option_valid) {
5356 		*active_option_idx = cmd->port_options &
5357 				     ICE_AQC_PORT_OPT_ACTIVE_M;
5358 		if (*active_option_idx > (*option_count - 1))
5359 			return ICE_ERR_OUT_OF_RANGE;
5360 		ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
5361 			  *active_option_idx);
5362 	}
5363 
5364 	/* verify indirect FW response & mask output options fields */
5365 	for (i = 0; i < *option_count; i++) {
5366 		options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
5367 		options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
5368 		pmd_count = options[i].pmd;
5369 		max_speed = options[i].max_lane_speed;
5370 		ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
5371 			  pmd_count, max_speed);
5372 
5373 		/* check only entries containing valid max pmd speed values,
5374 		 * other reserved values may be returned, when logical port
5375 		 * used is unrelated to specific option
5376 		 */
5377 		if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
5378 			if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
5379 				return ICE_ERR_OUT_OF_RANGE;
5380 			if (pmd_count > 2 &&
5381 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
5382 				return ICE_ERR_CFG;
5383 			if (pmd_count > 7 &&
5384 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
5385 				return ICE_ERR_CFG;
5386 		}
5387 	}
5388 
5389 	return ICE_SUCCESS;
5390 }
5391 
5392 /**
5393  * ice_aq_set_lldp_mib - Set the LLDP MIB
5394  * @hw: pointer to the HW struct
5395  * @mib_type: Local, Remote or both Local and Remote MIBs
5396  * @buf: pointer to the caller-supplied buffer to store the MIB block
5397  * @buf_size: size of the buffer (in bytes)
5398  * @cd: pointer to command details structure or NULL
5399  *
5400  * Set the LLDP MIB. (0x0A08)
5401  */
5402 enum ice_status
5403 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5404 		    struct ice_sq_cd *cd)
5405 {
5406 	struct ice_aqc_lldp_set_local_mib *cmd;
5407 	struct ice_aq_desc desc;
5408 
5409 	cmd = &desc.params.lldp_set_mib;
5410 
5411 	if (buf_size == 0 || !buf)
5412 		return ICE_ERR_PARAM;
5413 
5414 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5415 
5416 	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5417 	desc.datalen = CPU_TO_LE16(buf_size);
5418 
5419 	cmd->type = mib_type;
5420 	cmd->length = CPU_TO_LE16(buf_size);
5421 
5422 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5423 }
5424 
5425 /**
5426  * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
5427  * @hw: pointer to HW struct
5428  */
5429 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5430 {
5431 	if (hw->mac_type != ICE_MAC_E810)
5432 		return false;
5433 
5434 	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5435 		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5436 			return true;
5437 		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5438 		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5439 			return true;
5440 	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5441 		return true;
5442 	}
5443 	return false;
5444 }
5445 
5446 /**
5447  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5448  * @hw: pointer to HW struct
5449  * @vsi_num: absolute HW index for VSI
5450  * @add: boolean for if adding or removing a filter
5451  */
5452 enum ice_status
5453 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5454 {
5455 	struct ice_aqc_lldp_filter_ctrl *cmd;
5456 	struct ice_aq_desc desc;
5457 
5458 	cmd = &desc.params.lldp_filter_ctrl;
5459 
5460 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5461 
5462 	if (add)
5463 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5464 	else
5465 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5466 
5467 	cmd->vsi_num = CPU_TO_LE16(vsi_num);
5468 
5469 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5470 }
5471 
5472 /**
5473  * ice_fw_supports_report_dflt_cfg
5474  * @hw: pointer to the hardware structure
5475  *
5476  * Checks if the firmware supports report default configuration
5477  */
5478 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5479 {
5480 	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5481 		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5482 			return true;
5483 		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5484 		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5485 			return true;
5486 	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5487 		return true;
5488 	}
5489 	return false;
5490 }
5491