xref: /linux/drivers/net/ethernet/intel/ice/ice_common.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8 
9 #define ICE_PF_RESET_WAIT_COUNT	300
10 
11 /**
12  * ice_set_mac_type - Sets MAC type
13  * @hw: pointer to the HW structure
14  *
15  * This function sets the MAC type of the adapter based on the
16  * vendor ID and device ID stored in the HW structure.
17  */
18 static int ice_set_mac_type(struct ice_hw *hw)
19 {
20 	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 		return -ENODEV;
22 
23 	switch (hw->device_id) {
24 	case ICE_DEV_ID_E810C_BACKPLANE:
25 	case ICE_DEV_ID_E810C_QSFP:
26 	case ICE_DEV_ID_E810C_SFP:
27 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
28 	case ICE_DEV_ID_E810_XXV_QSFP:
29 	case ICE_DEV_ID_E810_XXV_SFP:
30 		hw->mac_type = ICE_MAC_E810;
31 		break;
32 	case ICE_DEV_ID_E823C_10G_BASE_T:
33 	case ICE_DEV_ID_E823C_BACKPLANE:
34 	case ICE_DEV_ID_E823C_QSFP:
35 	case ICE_DEV_ID_E823C_SFP:
36 	case ICE_DEV_ID_E823C_SGMII:
37 	case ICE_DEV_ID_E822C_10G_BASE_T:
38 	case ICE_DEV_ID_E822C_BACKPLANE:
39 	case ICE_DEV_ID_E822C_QSFP:
40 	case ICE_DEV_ID_E822C_SFP:
41 	case ICE_DEV_ID_E822C_SGMII:
42 	case ICE_DEV_ID_E822L_10G_BASE_T:
43 	case ICE_DEV_ID_E822L_BACKPLANE:
44 	case ICE_DEV_ID_E822L_SFP:
45 	case ICE_DEV_ID_E822L_SGMII:
46 	case ICE_DEV_ID_E823L_10G_BASE_T:
47 	case ICE_DEV_ID_E823L_1GBE:
48 	case ICE_DEV_ID_E823L_BACKPLANE:
49 	case ICE_DEV_ID_E823L_QSFP:
50 	case ICE_DEV_ID_E823L_SFP:
51 		hw->mac_type = ICE_MAC_GENERIC;
52 		break;
53 	default:
54 		hw->mac_type = ICE_MAC_UNKNOWN;
55 		break;
56 	}
57 
58 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 	return 0;
60 }
61 
62 /**
63  * ice_is_e810
64  * @hw: pointer to the hardware structure
65  *
66  * returns true if the device is E810 based, false if not.
67  */
68 bool ice_is_e810(struct ice_hw *hw)
69 {
70 	return hw->mac_type == ICE_MAC_E810;
71 }
72 
73 /**
74  * ice_is_e810t
75  * @hw: pointer to the hardware structure
76  *
77  * returns true if the device is E810T based, false if not.
78  */
79 bool ice_is_e810t(struct ice_hw *hw)
80 {
81 	switch (hw->device_id) {
82 	case ICE_DEV_ID_E810C_SFP:
83 		if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
84 		    hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
85 			return true;
86 		break;
87 	default:
88 		break;
89 	}
90 
91 	return false;
92 }
93 
94 /**
95  * ice_clear_pf_cfg - Clear PF configuration
96  * @hw: pointer to the hardware structure
97  *
98  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
99  * configuration, flow director filters, etc.).
100  */
101 int ice_clear_pf_cfg(struct ice_hw *hw)
102 {
103 	struct ice_aq_desc desc;
104 
105 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
106 
107 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
108 }
109 
110 /**
111  * ice_aq_manage_mac_read - manage MAC address read command
112  * @hw: pointer to the HW struct
113  * @buf: a virtual buffer to hold the manage MAC read response
114  * @buf_size: Size of the virtual buffer
115  * @cd: pointer to command details structure or NULL
116  *
117  * This function is used to return per PF station MAC address (0x0107).
118  * NOTE: Upon successful completion of this command, MAC address information
119  * is returned in user specified buffer. Please interpret user specified
120  * buffer as "manage_mac_read" response.
121  * Response such as various MAC addresses are stored in HW struct (port.mac)
122  * ice_discover_dev_caps is expected to be called before this function is
123  * called.
124  */
125 static int
126 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
127 		       struct ice_sq_cd *cd)
128 {
129 	struct ice_aqc_manage_mac_read_resp *resp;
130 	struct ice_aqc_manage_mac_read *cmd;
131 	struct ice_aq_desc desc;
132 	int status;
133 	u16 flags;
134 	u8 i;
135 
136 	cmd = &desc.params.mac_read;
137 
138 	if (buf_size < sizeof(*resp))
139 		return -EINVAL;
140 
141 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
142 
143 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
144 	if (status)
145 		return status;
146 
147 	resp = buf;
148 	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
149 
150 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
151 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
152 		return -EIO;
153 	}
154 
155 	/* A single port can report up to two (LAN and WoL) addresses */
156 	for (i = 0; i < cmd->num_addr; i++)
157 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
158 			ether_addr_copy(hw->port_info->mac.lan_addr,
159 					resp[i].mac_addr);
160 			ether_addr_copy(hw->port_info->mac.perm_addr,
161 					resp[i].mac_addr);
162 			break;
163 		}
164 
165 	return 0;
166 }
167 
168 /**
169  * ice_aq_get_phy_caps - returns PHY capabilities
170  * @pi: port information structure
171  * @qual_mods: report qualified modules
172  * @report_mode: report mode capabilities
173  * @pcaps: structure for PHY capabilities to be filled
174  * @cd: pointer to command details structure or NULL
175  *
176  * Returns the various PHY capabilities supported on the Port (0x0600)
177  */
178 int
179 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
180 		    struct ice_aqc_get_phy_caps_data *pcaps,
181 		    struct ice_sq_cd *cd)
182 {
183 	struct ice_aqc_get_phy_caps *cmd;
184 	u16 pcaps_size = sizeof(*pcaps);
185 	struct ice_aq_desc desc;
186 	struct ice_hw *hw;
187 	int status;
188 
189 	cmd = &desc.params.get_phy;
190 
191 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
192 		return -EINVAL;
193 	hw = pi->hw;
194 
195 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
196 	    !ice_fw_supports_report_dflt_cfg(hw))
197 		return -EINVAL;
198 
199 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
200 
201 	if (qual_mods)
202 		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
203 
204 	cmd->param0 |= cpu_to_le16(report_mode);
205 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
206 
207 	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
208 		  report_mode);
209 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
210 		  (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
211 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
212 		  (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
213 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
214 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
215 		  pcaps->low_power_ctrl_an);
216 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
217 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
218 		  pcaps->eeer_value);
219 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
220 		  pcaps->link_fec_options);
221 	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
222 		  pcaps->module_compliance_enforcement);
223 	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
224 		  pcaps->extended_compliance_code);
225 	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
226 		  pcaps->module_type[0]);
227 	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
228 		  pcaps->module_type[1]);
229 	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
230 		  pcaps->module_type[2]);
231 
232 	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
233 		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
234 		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
235 		memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
236 		       sizeof(pi->phy.link_info.module_type));
237 	}
238 
239 	return status;
240 }
241 
242 /**
243  * ice_aq_get_link_topo_handle - get link topology node return status
244  * @pi: port information structure
245  * @node_type: requested node type
246  * @cd: pointer to command details structure or NULL
247  *
248  * Get link topology node return status for specified node type (0x06E0)
249  *
250  * Node type cage can be used to determine if cage is present. If AQC
251  * returns error (ENOENT), then no cage present. If no cage present, then
252  * connection type is backplane or BASE-T.
253  */
254 static int
255 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
256 			    struct ice_sq_cd *cd)
257 {
258 	struct ice_aqc_get_link_topo *cmd;
259 	struct ice_aq_desc desc;
260 
261 	cmd = &desc.params.get_link_topo;
262 
263 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
264 
265 	cmd->addr.topo_params.node_type_ctx =
266 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
267 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
268 
269 	/* set node type */
270 	cmd->addr.topo_params.node_type_ctx |=
271 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
272 
273 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
274 }
275 
276 /**
277  * ice_is_media_cage_present
278  * @pi: port information structure
279  *
280  * Returns true if media cage is present, else false. If no cage, then
281  * media type is backplane or BASE-T.
282  */
283 static bool ice_is_media_cage_present(struct ice_port_info *pi)
284 {
285 	/* Node type cage can be used to determine if cage is present. If AQC
286 	 * returns error (ENOENT), then no cage present. If no cage present then
287 	 * connection type is backplane or BASE-T.
288 	 */
289 	return !ice_aq_get_link_topo_handle(pi,
290 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
291 					    NULL);
292 }
293 
294 /**
295  * ice_get_media_type - Gets media type
296  * @pi: port information structure
297  */
298 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
299 {
300 	struct ice_link_status *hw_link_info;
301 
302 	if (!pi)
303 		return ICE_MEDIA_UNKNOWN;
304 
305 	hw_link_info = &pi->phy.link_info;
306 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
307 		/* If more than one media type is selected, report unknown */
308 		return ICE_MEDIA_UNKNOWN;
309 
310 	if (hw_link_info->phy_type_low) {
311 		/* 1G SGMII is a special case where some DA cable PHYs
312 		 * may show this as an option when it really shouldn't
313 		 * be since SGMII is meant to be between a MAC and a PHY
314 		 * in a backplane. Try to detect this case and handle it
315 		 */
316 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
317 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
318 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
319 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
320 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
321 			return ICE_MEDIA_DA;
322 
323 		switch (hw_link_info->phy_type_low) {
324 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
325 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
326 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
327 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
328 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
329 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
330 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
331 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
332 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
333 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
334 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
335 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
336 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
337 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
338 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
339 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
340 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
341 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
342 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
343 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
344 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
345 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
346 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
347 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
348 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
349 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
350 			return ICE_MEDIA_FIBER;
351 		case ICE_PHY_TYPE_LOW_100BASE_TX:
352 		case ICE_PHY_TYPE_LOW_1000BASE_T:
353 		case ICE_PHY_TYPE_LOW_2500BASE_T:
354 		case ICE_PHY_TYPE_LOW_5GBASE_T:
355 		case ICE_PHY_TYPE_LOW_10GBASE_T:
356 		case ICE_PHY_TYPE_LOW_25GBASE_T:
357 			return ICE_MEDIA_BASET;
358 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
359 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
360 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
361 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
362 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
363 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
364 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
365 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
366 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
367 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
368 			return ICE_MEDIA_DA;
369 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
370 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
371 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
372 		case ICE_PHY_TYPE_LOW_50G_AUI2:
373 		case ICE_PHY_TYPE_LOW_50G_AUI1:
374 		case ICE_PHY_TYPE_LOW_100G_AUI4:
375 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
376 			if (ice_is_media_cage_present(pi))
377 				return ICE_MEDIA_DA;
378 			fallthrough;
379 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
380 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
381 		case ICE_PHY_TYPE_LOW_2500BASE_X:
382 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
383 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
384 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
385 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
386 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
387 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
388 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
389 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
390 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
391 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
392 			return ICE_MEDIA_BACKPLANE;
393 		}
394 	} else {
395 		switch (hw_link_info->phy_type_high) {
396 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
397 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
398 			if (ice_is_media_cage_present(pi))
399 				return ICE_MEDIA_DA;
400 			fallthrough;
401 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
402 			return ICE_MEDIA_BACKPLANE;
403 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
404 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
405 			return ICE_MEDIA_FIBER;
406 		}
407 	}
408 	return ICE_MEDIA_UNKNOWN;
409 }
410 
411 /**
412  * ice_aq_get_link_info
413  * @pi: port information structure
414  * @ena_lse: enable/disable LinkStatusEvent reporting
415  * @link: pointer to link status structure - optional
416  * @cd: pointer to command details structure or NULL
417  *
418  * Get Link Status (0x607). Returns the link status of the adapter.
419  */
420 int
421 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
422 		     struct ice_link_status *link, struct ice_sq_cd *cd)
423 {
424 	struct ice_aqc_get_link_status_data link_data = { 0 };
425 	struct ice_aqc_get_link_status *resp;
426 	struct ice_link_status *li_old, *li;
427 	enum ice_media_type *hw_media_type;
428 	struct ice_fc_info *hw_fc_info;
429 	bool tx_pause, rx_pause;
430 	struct ice_aq_desc desc;
431 	struct ice_hw *hw;
432 	u16 cmd_flags;
433 	int status;
434 
435 	if (!pi)
436 		return -EINVAL;
437 	hw = pi->hw;
438 	li_old = &pi->phy.link_info_old;
439 	hw_media_type = &pi->phy.media_type;
440 	li = &pi->phy.link_info;
441 	hw_fc_info = &pi->fc;
442 
443 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
444 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
445 	resp = &desc.params.get_link_status;
446 	resp->cmd_flags = cpu_to_le16(cmd_flags);
447 	resp->lport_num = pi->lport;
448 
449 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
450 
451 	if (status)
452 		return status;
453 
454 	/* save off old link status information */
455 	*li_old = *li;
456 
457 	/* update current link status information */
458 	li->link_speed = le16_to_cpu(link_data.link_speed);
459 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
460 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
461 	*hw_media_type = ice_get_media_type(pi);
462 	li->link_info = link_data.link_info;
463 	li->link_cfg_err = link_data.link_cfg_err;
464 	li->an_info = link_data.an_info;
465 	li->ext_info = link_data.ext_info;
466 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
467 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
468 	li->topo_media_conflict = link_data.topo_media_conflict;
469 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
470 				      ICE_AQ_CFG_PACING_TYPE_M);
471 
472 	/* update fc info */
473 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
474 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
475 	if (tx_pause && rx_pause)
476 		hw_fc_info->current_mode = ICE_FC_FULL;
477 	else if (tx_pause)
478 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
479 	else if (rx_pause)
480 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
481 	else
482 		hw_fc_info->current_mode = ICE_FC_NONE;
483 
484 	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
485 
486 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
487 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
488 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
489 		  (unsigned long long)li->phy_type_low);
490 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
491 		  (unsigned long long)li->phy_type_high);
492 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
493 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
494 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
495 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
496 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
497 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
498 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
499 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
500 		  li->max_frame_size);
501 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
502 
503 	/* save link status information */
504 	if (link)
505 		*link = *li;
506 
507 	/* flag cleared so calling functions don't call AQ again */
508 	pi->phy.get_link_info = false;
509 
510 	return 0;
511 }
512 
513 /**
514  * ice_fill_tx_timer_and_fc_thresh
515  * @hw: pointer to the HW struct
516  * @cmd: pointer to MAC cfg structure
517  *
518  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
519  * descriptor
520  */
521 static void
522 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
523 				struct ice_aqc_set_mac_cfg *cmd)
524 {
525 	u16 fc_thres_val, tx_timer_val;
526 	u32 val;
527 
528 	/* We read back the transmit timer and FC threshold value of
529 	 * LFC. Thus, we will use index =
530 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
531 	 *
532 	 * Also, because we are operating on transmit timer and FC
533 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
534 	 */
535 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
536 
537 	/* Retrieve the transmit timer */
538 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
539 	tx_timer_val = val &
540 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
541 	cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
542 
543 	/* Retrieve the FC threshold */
544 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
545 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
546 
547 	cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
548 }
549 
550 /**
551  * ice_aq_set_mac_cfg
552  * @hw: pointer to the HW struct
553  * @max_frame_size: Maximum Frame Size to be supported
554  * @cd: pointer to command details structure or NULL
555  *
556  * Set MAC configuration (0x0603)
557  */
558 int
559 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
560 {
561 	struct ice_aqc_set_mac_cfg *cmd;
562 	struct ice_aq_desc desc;
563 
564 	cmd = &desc.params.set_mac_cfg;
565 
566 	if (max_frame_size == 0)
567 		return -EINVAL;
568 
569 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
570 
571 	cmd->max_frame_size = cpu_to_le16(max_frame_size);
572 
573 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
574 
575 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
576 }
577 
578 /**
579  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
580  * @hw: pointer to the HW struct
581  */
582 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
583 {
584 	struct ice_switch_info *sw;
585 	int status;
586 
587 	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
588 				       sizeof(*hw->switch_info), GFP_KERNEL);
589 	sw = hw->switch_info;
590 
591 	if (!sw)
592 		return -ENOMEM;
593 
594 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
595 	sw->prof_res_bm_init = 0;
596 
597 	status = ice_init_def_sw_recp(hw);
598 	if (status) {
599 		devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
600 		return status;
601 	}
602 	return 0;
603 }
604 
605 /**
606  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
607  * @hw: pointer to the HW struct
608  */
609 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
610 {
611 	struct ice_switch_info *sw = hw->switch_info;
612 	struct ice_vsi_list_map_info *v_pos_map;
613 	struct ice_vsi_list_map_info *v_tmp_map;
614 	struct ice_sw_recipe *recps;
615 	u8 i;
616 
617 	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
618 				 list_entry) {
619 		list_del(&v_pos_map->list_entry);
620 		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
621 	}
622 	recps = sw->recp_list;
623 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
624 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
625 
626 		recps[i].root_rid = i;
627 		list_for_each_entry_safe(rg_entry, tmprg_entry,
628 					 &recps[i].rg_list, l_entry) {
629 			list_del(&rg_entry->l_entry);
630 			devm_kfree(ice_hw_to_dev(hw), rg_entry);
631 		}
632 
633 		if (recps[i].adv_rule) {
634 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
635 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
636 
637 			mutex_destroy(&recps[i].filt_rule_lock);
638 			list_for_each_entry_safe(lst_itr, tmp_entry,
639 						 &recps[i].filt_rules,
640 						 list_entry) {
641 				list_del(&lst_itr->list_entry);
642 				devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
643 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
644 			}
645 		} else {
646 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647 
648 			mutex_destroy(&recps[i].filt_rule_lock);
649 			list_for_each_entry_safe(lst_itr, tmp_entry,
650 						 &recps[i].filt_rules,
651 						 list_entry) {
652 				list_del(&lst_itr->list_entry);
653 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
654 			}
655 		}
656 		if (recps[i].root_buf)
657 			devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
658 	}
659 	ice_rm_all_sw_replay_rule_info(hw);
660 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
661 	devm_kfree(ice_hw_to_dev(hw), sw);
662 }
663 
664 /**
665  * ice_get_fw_log_cfg - get FW logging configuration
666  * @hw: pointer to the HW struct
667  */
668 static int ice_get_fw_log_cfg(struct ice_hw *hw)
669 {
670 	struct ice_aq_desc desc;
671 	__le16 *config;
672 	int status;
673 	u16 size;
674 
675 	size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
676 	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
677 	if (!config)
678 		return -ENOMEM;
679 
680 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
681 
682 	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
683 	if (!status) {
684 		u16 i;
685 
686 		/* Save FW logging information into the HW structure */
687 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
688 			u16 v, m, flgs;
689 
690 			v = le16_to_cpu(config[i]);
691 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
692 			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
693 
694 			if (m < ICE_AQC_FW_LOG_ID_MAX)
695 				hw->fw_log.evnts[m].cur = flgs;
696 		}
697 	}
698 
699 	devm_kfree(ice_hw_to_dev(hw), config);
700 
701 	return status;
702 }
703 
704 /**
705  * ice_cfg_fw_log - configure FW logging
706  * @hw: pointer to the HW struct
707  * @enable: enable certain FW logging events if true, disable all if false
708  *
709  * This function enables/disables the FW logging via Rx CQ events and a UART
710  * port based on predetermined configurations. FW logging via the Rx CQ can be
711  * enabled/disabled for individual PF's. However, FW logging via the UART can
712  * only be enabled/disabled for all PFs on the same device.
713  *
714  * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
715  * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
716  * before initializing the device.
717  *
718  * When re/configuring FW logging, callers need to update the "cfg" elements of
719  * the hw->fw_log.evnts array with the desired logging event configurations for
720  * modules of interest. When disabling FW logging completely, the callers can
721  * just pass false in the "enable" parameter. On completion, the function will
722  * update the "cur" element of the hw->fw_log.evnts array with the resulting
723  * logging event configurations of the modules that are being re/configured. FW
724  * logging modules that are not part of a reconfiguration operation retain their
725  * previous states.
726  *
727  * Before resetting the device, it is recommended that the driver disables FW
728  * logging before shutting down the control queue. When disabling FW logging
729  * ("enable" = false), the latest configurations of FW logging events stored in
730  * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
731  * a device reset.
732  *
733  * When enabling FW logging to emit log messages via the Rx CQ during the
734  * device's initialization phase, a mechanism alternative to interrupt handlers
735  * needs to be used to extract FW log messages from the Rx CQ periodically and
736  * to prevent the Rx CQ from being full and stalling other types of control
737  * messages from FW to SW. Interrupts are typically disabled during the device's
738  * initialization phase.
739  */
740 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
741 {
742 	struct ice_aqc_fw_logging *cmd;
743 	u16 i, chgs = 0, len = 0;
744 	struct ice_aq_desc desc;
745 	__le16 *data = NULL;
746 	u8 actv_evnts = 0;
747 	void *buf = NULL;
748 	int status = 0;
749 
750 	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
751 		return 0;
752 
753 	/* Disable FW logging only when the control queue is still responsive */
754 	if (!enable &&
755 	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
756 		return 0;
757 
758 	/* Get current FW log settings */
759 	status = ice_get_fw_log_cfg(hw);
760 	if (status)
761 		return status;
762 
763 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
764 	cmd = &desc.params.fw_logging;
765 
766 	/* Indicate which controls are valid */
767 	if (hw->fw_log.cq_en)
768 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
769 
770 	if (hw->fw_log.uart_en)
771 		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
772 
773 	if (enable) {
774 		/* Fill in an array of entries with FW logging modules and
775 		 * logging events being reconfigured.
776 		 */
777 		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
778 			u16 val;
779 
780 			/* Keep track of enabled event types */
781 			actv_evnts |= hw->fw_log.evnts[i].cfg;
782 
783 			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
784 				continue;
785 
786 			if (!data) {
787 				data = devm_kcalloc(ice_hw_to_dev(hw),
788 						    ICE_AQC_FW_LOG_ID_MAX,
789 						    sizeof(*data),
790 						    GFP_KERNEL);
791 				if (!data)
792 					return -ENOMEM;
793 			}
794 
795 			val = i << ICE_AQC_FW_LOG_ID_S;
796 			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
797 			data[chgs++] = cpu_to_le16(val);
798 		}
799 
800 		/* Only enable FW logging if at least one module is specified.
801 		 * If FW logging is currently enabled but all modules are not
802 		 * enabled to emit log messages, disable FW logging altogether.
803 		 */
804 		if (actv_evnts) {
805 			/* Leave if there is effectively no change */
806 			if (!chgs)
807 				goto out;
808 
809 			if (hw->fw_log.cq_en)
810 				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
811 
812 			if (hw->fw_log.uart_en)
813 				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
814 
815 			buf = data;
816 			len = sizeof(*data) * chgs;
817 			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
818 		}
819 	}
820 
821 	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
822 	if (!status) {
823 		/* Update the current configuration to reflect events enabled.
824 		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
825 		 * logging mode is enabled for the device. They do not reflect
826 		 * actual modules being enabled to emit log messages. So, their
827 		 * values remain unchanged even when all modules are disabled.
828 		 */
829 		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
830 
831 		hw->fw_log.actv_evnts = actv_evnts;
832 		for (i = 0; i < cnt; i++) {
833 			u16 v, m;
834 
835 			if (!enable) {
836 				/* When disabling all FW logging events as part
837 				 * of device's de-initialization, the original
838 				 * configurations are retained, and can be used
839 				 * to reconfigure FW logging later if the device
840 				 * is re-initialized.
841 				 */
842 				hw->fw_log.evnts[i].cur = 0;
843 				continue;
844 			}
845 
846 			v = le16_to_cpu(data[i]);
847 			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
848 			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
849 		}
850 	}
851 
852 out:
853 	if (data)
854 		devm_kfree(ice_hw_to_dev(hw), data);
855 
856 	return status;
857 }
858 
859 /**
860  * ice_output_fw_log
861  * @hw: pointer to the HW struct
862  * @desc: pointer to the AQ message descriptor
863  * @buf: pointer to the buffer accompanying the AQ message
864  *
865  * Formats a FW Log message and outputs it via the standard driver logs.
866  */
867 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
868 {
869 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
870 	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
871 			le16_to_cpu(desc->datalen));
872 	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
873 }
874 
875 /**
876  * ice_get_itr_intrl_gran
877  * @hw: pointer to the HW struct
878  *
879  * Determines the ITR/INTRL granularities based on the maximum aggregate
880  * bandwidth according to the device's configuration during power-on.
881  */
882 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
883 {
884 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
885 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
886 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
887 
888 	switch (max_agg_bw) {
889 	case ICE_MAX_AGG_BW_200G:
890 	case ICE_MAX_AGG_BW_100G:
891 	case ICE_MAX_AGG_BW_50G:
892 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
893 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
894 		break;
895 	case ICE_MAX_AGG_BW_25G:
896 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
897 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
898 		break;
899 	}
900 }
901 
902 /**
903  * ice_init_hw - main hardware initialization routine
904  * @hw: pointer to the hardware structure
905  */
906 int ice_init_hw(struct ice_hw *hw)
907 {
908 	struct ice_aqc_get_phy_caps_data *pcaps;
909 	u16 mac_buf_len;
910 	void *mac_buf;
911 	int status;
912 
913 	/* Set MAC type based on DeviceID */
914 	status = ice_set_mac_type(hw);
915 	if (status)
916 		return status;
917 
918 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
919 			 PF_FUNC_RID_FUNC_NUM_M) >>
920 		PF_FUNC_RID_FUNC_NUM_S;
921 
922 	status = ice_reset(hw, ICE_RESET_PFR);
923 	if (status)
924 		return status;
925 
926 	ice_get_itr_intrl_gran(hw);
927 
928 	status = ice_create_all_ctrlq(hw);
929 	if (status)
930 		goto err_unroll_cqinit;
931 
932 	/* Enable FW logging. Not fatal if this fails. */
933 	status = ice_cfg_fw_log(hw, true);
934 	if (status)
935 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
936 
937 	status = ice_clear_pf_cfg(hw);
938 	if (status)
939 		goto err_unroll_cqinit;
940 
941 	/* Set bit to enable Flow Director filters */
942 	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
943 	INIT_LIST_HEAD(&hw->fdir_list_head);
944 
945 	ice_clear_pxe_mode(hw);
946 
947 	status = ice_init_nvm(hw);
948 	if (status)
949 		goto err_unroll_cqinit;
950 
951 	status = ice_get_caps(hw);
952 	if (status)
953 		goto err_unroll_cqinit;
954 
955 	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
956 				     sizeof(*hw->port_info), GFP_KERNEL);
957 	if (!hw->port_info) {
958 		status = -ENOMEM;
959 		goto err_unroll_cqinit;
960 	}
961 
962 	/* set the back pointer to HW */
963 	hw->port_info->hw = hw;
964 
965 	/* Initialize port_info struct with switch configuration data */
966 	status = ice_get_initial_sw_cfg(hw);
967 	if (status)
968 		goto err_unroll_alloc;
969 
970 	hw->evb_veb = true;
971 
972 	/* Query the allocated resources for Tx scheduler */
973 	status = ice_sched_query_res_alloc(hw);
974 	if (status) {
975 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
976 		goto err_unroll_alloc;
977 	}
978 	ice_sched_get_psm_clk_freq(hw);
979 
980 	/* Initialize port_info struct with scheduler data */
981 	status = ice_sched_init_port(hw->port_info);
982 	if (status)
983 		goto err_unroll_sched;
984 
985 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
986 	if (!pcaps) {
987 		status = -ENOMEM;
988 		goto err_unroll_sched;
989 	}
990 
991 	/* Initialize port_info struct with PHY capabilities */
992 	status = ice_aq_get_phy_caps(hw->port_info, false,
993 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
994 				     NULL);
995 	devm_kfree(ice_hw_to_dev(hw), pcaps);
996 	if (status)
997 		dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
998 			 status);
999 
1000 	/* Initialize port_info struct with link information */
1001 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1002 	if (status)
1003 		goto err_unroll_sched;
1004 
1005 	/* need a valid SW entry point to build a Tx tree */
1006 	if (!hw->sw_entry_point_layer) {
1007 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1008 		status = -EIO;
1009 		goto err_unroll_sched;
1010 	}
1011 	INIT_LIST_HEAD(&hw->agg_list);
1012 	/* Initialize max burst size */
1013 	if (!hw->max_burst_size)
1014 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1015 
1016 	status = ice_init_fltr_mgmt_struct(hw);
1017 	if (status)
1018 		goto err_unroll_sched;
1019 
1020 	/* Get MAC information */
1021 	/* A single port can report up to two (LAN and WoL) addresses */
1022 	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1023 			       sizeof(struct ice_aqc_manage_mac_read_resp),
1024 			       GFP_KERNEL);
1025 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1026 
1027 	if (!mac_buf) {
1028 		status = -ENOMEM;
1029 		goto err_unroll_fltr_mgmt_struct;
1030 	}
1031 
1032 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1033 	devm_kfree(ice_hw_to_dev(hw), mac_buf);
1034 
1035 	if (status)
1036 		goto err_unroll_fltr_mgmt_struct;
1037 	/* enable jumbo frame support at MAC level */
1038 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1039 	if (status)
1040 		goto err_unroll_fltr_mgmt_struct;
1041 	/* Obtain counter base index which would be used by flow director */
1042 	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1043 	if (status)
1044 		goto err_unroll_fltr_mgmt_struct;
1045 	status = ice_init_hw_tbls(hw);
1046 	if (status)
1047 		goto err_unroll_fltr_mgmt_struct;
1048 	mutex_init(&hw->tnl_lock);
1049 	return 0;
1050 
1051 err_unroll_fltr_mgmt_struct:
1052 	ice_cleanup_fltr_mgmt_struct(hw);
1053 err_unroll_sched:
1054 	ice_sched_cleanup_all(hw);
1055 err_unroll_alloc:
1056 	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1057 err_unroll_cqinit:
1058 	ice_destroy_all_ctrlq(hw);
1059 	return status;
1060 }
1061 
1062 /**
1063  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1064  * @hw: pointer to the hardware structure
1065  *
1066  * This should be called only during nominal operation, not as a result of
1067  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1068  * applicable initializations if it fails for any reason.
1069  */
1070 void ice_deinit_hw(struct ice_hw *hw)
1071 {
1072 	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1073 	ice_cleanup_fltr_mgmt_struct(hw);
1074 
1075 	ice_sched_cleanup_all(hw);
1076 	ice_sched_clear_agg(hw);
1077 	ice_free_seg(hw);
1078 	ice_free_hw_tbls(hw);
1079 	mutex_destroy(&hw->tnl_lock);
1080 
1081 	if (hw->port_info) {
1082 		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1083 		hw->port_info = NULL;
1084 	}
1085 
1086 	/* Attempt to disable FW logging before shutting down control queues */
1087 	ice_cfg_fw_log(hw, false);
1088 	ice_destroy_all_ctrlq(hw);
1089 
1090 	/* Clear VSI contexts if not already cleared */
1091 	ice_clear_all_vsi_ctx(hw);
1092 }
1093 
1094 /**
1095  * ice_check_reset - Check to see if a global reset is complete
1096  * @hw: pointer to the hardware structure
1097  */
1098 int ice_check_reset(struct ice_hw *hw)
1099 {
1100 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1101 
1102 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1103 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1104 	 * Add 1sec for outstanding AQ commands that can take a long time.
1105 	 */
1106 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1107 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1108 
1109 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1110 		mdelay(100);
1111 		reg = rd32(hw, GLGEN_RSTAT);
1112 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1113 			break;
1114 	}
1115 
1116 	if (cnt == grst_timeout) {
1117 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1118 		return -EIO;
1119 	}
1120 
1121 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1122 				 GLNVM_ULD_PCIER_DONE_1_M |\
1123 				 GLNVM_ULD_CORER_DONE_M |\
1124 				 GLNVM_ULD_GLOBR_DONE_M |\
1125 				 GLNVM_ULD_POR_DONE_M |\
1126 				 GLNVM_ULD_POR_DONE_1_M |\
1127 				 GLNVM_ULD_PCIER_DONE_2_M)
1128 
1129 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1130 					  GLNVM_ULD_PE_DONE_M : 0);
1131 
1132 	/* Device is Active; check Global Reset processes are done */
1133 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1134 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1135 		if (reg == uld_mask) {
1136 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1137 			break;
1138 		}
1139 		mdelay(10);
1140 	}
1141 
1142 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1143 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1144 			  reg);
1145 		return -EIO;
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 /**
1152  * ice_pf_reset - Reset the PF
1153  * @hw: pointer to the hardware structure
1154  *
1155  * If a global reset has been triggered, this function checks
1156  * for its completion and then issues the PF reset
1157  */
1158 static int ice_pf_reset(struct ice_hw *hw)
1159 {
1160 	u32 cnt, reg;
1161 
1162 	/* If at function entry a global reset was already in progress, i.e.
1163 	 * state is not 'device active' or any of the reset done bits are not
1164 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1165 	 * global reset is done.
1166 	 */
1167 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1168 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1169 		/* poll on global reset currently in progress until done */
1170 		if (ice_check_reset(hw))
1171 			return -EIO;
1172 
1173 		return 0;
1174 	}
1175 
1176 	/* Reset the PF */
1177 	reg = rd32(hw, PFGEN_CTRL);
1178 
1179 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1180 
1181 	/* Wait for the PFR to complete. The wait time is the global config lock
1182 	 * timeout plus the PFR timeout which will account for a possible reset
1183 	 * that is occurring during a download package operation.
1184 	 */
1185 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1186 	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1187 		reg = rd32(hw, PFGEN_CTRL);
1188 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1189 			break;
1190 
1191 		mdelay(1);
1192 	}
1193 
1194 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1195 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1196 		return -EIO;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 /**
1203  * ice_reset - Perform different types of reset
1204  * @hw: pointer to the hardware structure
1205  * @req: reset request
1206  *
1207  * This function triggers a reset as specified by the req parameter.
1208  *
1209  * Note:
1210  * If anything other than a PF reset is triggered, PXE mode is restored.
1211  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1212  * interface has been restored in the rebuild flow.
1213  */
1214 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1215 {
1216 	u32 val = 0;
1217 
1218 	switch (req) {
1219 	case ICE_RESET_PFR:
1220 		return ice_pf_reset(hw);
1221 	case ICE_RESET_CORER:
1222 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1223 		val = GLGEN_RTRIG_CORER_M;
1224 		break;
1225 	case ICE_RESET_GLOBR:
1226 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1227 		val = GLGEN_RTRIG_GLOBR_M;
1228 		break;
1229 	default:
1230 		return -EINVAL;
1231 	}
1232 
1233 	val |= rd32(hw, GLGEN_RTRIG);
1234 	wr32(hw, GLGEN_RTRIG, val);
1235 	ice_flush(hw);
1236 
1237 	/* wait for the FW to be ready */
1238 	return ice_check_reset(hw);
1239 }
1240 
1241 /**
1242  * ice_copy_rxq_ctx_to_hw
1243  * @hw: pointer to the hardware structure
1244  * @ice_rxq_ctx: pointer to the rxq context
1245  * @rxq_index: the index of the Rx queue
1246  *
1247  * Copies rxq context from dense structure to HW register space
1248  */
1249 static int
1250 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1251 {
1252 	u8 i;
1253 
1254 	if (!ice_rxq_ctx)
1255 		return -EINVAL;
1256 
1257 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1258 		return -EINVAL;
1259 
1260 	/* Copy each dword separately to HW */
1261 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1262 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1263 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1264 
1265 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1266 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 /* LAN Rx Queue Context */
1273 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1274 	/* Field		Width	LSB */
1275 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1276 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1277 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1278 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1279 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1280 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1281 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1282 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1283 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1284 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1285 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1286 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1287 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1288 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1289 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1290 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1291 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1292 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1293 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1294 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1295 	{ 0 }
1296 };
1297 
1298 /**
1299  * ice_write_rxq_ctx
1300  * @hw: pointer to the hardware structure
1301  * @rlan_ctx: pointer to the rxq context
1302  * @rxq_index: the index of the Rx queue
1303  *
1304  * Converts rxq context from sparse to dense structure and then writes
1305  * it to HW register space and enables the hardware to prefetch descriptors
1306  * instead of only fetching them on demand
1307  */
1308 int
1309 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1310 		  u32 rxq_index)
1311 {
1312 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1313 
1314 	if (!rlan_ctx)
1315 		return -EINVAL;
1316 
1317 	rlan_ctx->prefena = 1;
1318 
1319 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1320 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1321 }
1322 
1323 /* LAN Tx Queue Context */
1324 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1325 				    /* Field			Width	LSB */
1326 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1327 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1328 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1329 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1330 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1331 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1332 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1333 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1334 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1335 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1336 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1337 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1338 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1339 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1340 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1341 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1342 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1343 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1344 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1345 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1346 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1347 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1348 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1349 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1350 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1351 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1352 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1353 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1354 	{ 0 }
1355 };
1356 
1357 /* Sideband Queue command wrappers */
1358 
1359 /**
1360  * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1361  * @hw: pointer to the HW struct
1362  * @desc: descriptor describing the command
1363  * @buf: buffer to use for indirect commands (NULL for direct commands)
1364  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1365  * @cd: pointer to command details structure
1366  */
1367 static int
1368 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1369 		 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1370 {
1371 	return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1372 			       (struct ice_aq_desc *)desc, buf, buf_size, cd);
1373 }
1374 
1375 /**
1376  * ice_sbq_rw_reg - Fill Sideband Queue command
1377  * @hw: pointer to the HW struct
1378  * @in: message info to be filled in descriptor
1379  */
1380 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1381 {
1382 	struct ice_sbq_cmd_desc desc = {0};
1383 	struct ice_sbq_msg_req msg = {0};
1384 	u16 msg_len;
1385 	int status;
1386 
1387 	msg_len = sizeof(msg);
1388 
1389 	msg.dest_dev = in->dest_dev;
1390 	msg.opcode = in->opcode;
1391 	msg.flags = ICE_SBQ_MSG_FLAGS;
1392 	msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1393 	msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1394 	msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1395 
1396 	if (in->opcode)
1397 		msg.data = cpu_to_le32(in->data);
1398 	else
1399 		/* data read comes back in completion, so shorten the struct by
1400 		 * sizeof(msg.data)
1401 		 */
1402 		msg_len -= sizeof(msg.data);
1403 
1404 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1405 	desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1406 	desc.param0.cmd_len = cpu_to_le16(msg_len);
1407 	status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1408 	if (!status && !in->opcode)
1409 		in->data = le32_to_cpu
1410 			(((struct ice_sbq_msg_cmpl *)&msg)->data);
1411 	return status;
1412 }
1413 
1414 /* FW Admin Queue command wrappers */
1415 
1416 /* Software lock/mutex that is meant to be held while the Global Config Lock
1417  * in firmware is acquired by the software to prevent most (but not all) types
1418  * of AQ commands from being sent to FW
1419  */
1420 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1421 
1422 /**
1423  * ice_should_retry_sq_send_cmd
1424  * @opcode: AQ opcode
1425  *
1426  * Decide if we should retry the send command routine for the ATQ, depending
1427  * on the opcode.
1428  */
1429 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1430 {
1431 	switch (opcode) {
1432 	case ice_aqc_opc_get_link_topo:
1433 	case ice_aqc_opc_lldp_stop:
1434 	case ice_aqc_opc_lldp_start:
1435 	case ice_aqc_opc_lldp_filter_ctrl:
1436 		return true;
1437 	}
1438 
1439 	return false;
1440 }
1441 
1442 /**
1443  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1444  * @hw: pointer to the HW struct
1445  * @cq: pointer to the specific Control queue
1446  * @desc: prefilled descriptor describing the command
1447  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1448  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1449  * @cd: pointer to command details structure
1450  *
1451  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1452  * Queue if the EBUSY AQ error is returned.
1453  */
1454 static int
1455 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1456 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1457 		      struct ice_sq_cd *cd)
1458 {
1459 	struct ice_aq_desc desc_cpy;
1460 	bool is_cmd_for_retry;
1461 	u8 *buf_cpy = NULL;
1462 	u8 idx = 0;
1463 	u16 opcode;
1464 	int status;
1465 
1466 	opcode = le16_to_cpu(desc->opcode);
1467 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1468 	memset(&desc_cpy, 0, sizeof(desc_cpy));
1469 
1470 	if (is_cmd_for_retry) {
1471 		if (buf) {
1472 			buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1473 			if (!buf_cpy)
1474 				return -ENOMEM;
1475 		}
1476 
1477 		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1478 	}
1479 
1480 	do {
1481 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1482 
1483 		if (!is_cmd_for_retry || !status ||
1484 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1485 			break;
1486 
1487 		if (buf_cpy)
1488 			memcpy(buf, buf_cpy, buf_size);
1489 
1490 		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1491 
1492 		mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1493 
1494 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1495 
1496 	kfree(buf_cpy);
1497 
1498 	return status;
1499 }
1500 
1501 /**
1502  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1503  * @hw: pointer to the HW struct
1504  * @desc: descriptor describing the command
1505  * @buf: buffer to use for indirect commands (NULL for direct commands)
1506  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1507  * @cd: pointer to command details structure
1508  *
1509  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1510  */
1511 int
1512 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1513 		u16 buf_size, struct ice_sq_cd *cd)
1514 {
1515 	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1516 	bool lock_acquired = false;
1517 	int status;
1518 
1519 	/* When a package download is in process (i.e. when the firmware's
1520 	 * Global Configuration Lock resource is held), only the Download
1521 	 * Package, Get Version, Get Package Info List and Release Resource
1522 	 * (with resource ID set to Global Config Lock) AdminQ commands are
1523 	 * allowed; all others must block until the package download completes
1524 	 * and the Global Config Lock is released.  See also
1525 	 * ice_acquire_global_cfg_lock().
1526 	 */
1527 	switch (le16_to_cpu(desc->opcode)) {
1528 	case ice_aqc_opc_download_pkg:
1529 	case ice_aqc_opc_get_pkg_info_list:
1530 	case ice_aqc_opc_get_ver:
1531 		break;
1532 	case ice_aqc_opc_release_res:
1533 		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1534 			break;
1535 		fallthrough;
1536 	default:
1537 		mutex_lock(&ice_global_cfg_lock_sw);
1538 		lock_acquired = true;
1539 		break;
1540 	}
1541 
1542 	status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1543 	if (lock_acquired)
1544 		mutex_unlock(&ice_global_cfg_lock_sw);
1545 
1546 	return status;
1547 }
1548 
1549 /**
1550  * ice_aq_get_fw_ver
1551  * @hw: pointer to the HW struct
1552  * @cd: pointer to command details structure or NULL
1553  *
1554  * Get the firmware version (0x0001) from the admin queue commands
1555  */
1556 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1557 {
1558 	struct ice_aqc_get_ver *resp;
1559 	struct ice_aq_desc desc;
1560 	int status;
1561 
1562 	resp = &desc.params.get_ver;
1563 
1564 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1565 
1566 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1567 
1568 	if (!status) {
1569 		hw->fw_branch = resp->fw_branch;
1570 		hw->fw_maj_ver = resp->fw_major;
1571 		hw->fw_min_ver = resp->fw_minor;
1572 		hw->fw_patch = resp->fw_patch;
1573 		hw->fw_build = le32_to_cpu(resp->fw_build);
1574 		hw->api_branch = resp->api_branch;
1575 		hw->api_maj_ver = resp->api_major;
1576 		hw->api_min_ver = resp->api_minor;
1577 		hw->api_patch = resp->api_patch;
1578 	}
1579 
1580 	return status;
1581 }
1582 
1583 /**
1584  * ice_aq_send_driver_ver
1585  * @hw: pointer to the HW struct
1586  * @dv: driver's major, minor version
1587  * @cd: pointer to command details structure or NULL
1588  *
1589  * Send the driver version (0x0002) to the firmware
1590  */
1591 int
1592 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1593 		       struct ice_sq_cd *cd)
1594 {
1595 	struct ice_aqc_driver_ver *cmd;
1596 	struct ice_aq_desc desc;
1597 	u16 len;
1598 
1599 	cmd = &desc.params.driver_ver;
1600 
1601 	if (!dv)
1602 		return -EINVAL;
1603 
1604 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1605 
1606 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1607 	cmd->major_ver = dv->major_ver;
1608 	cmd->minor_ver = dv->minor_ver;
1609 	cmd->build_ver = dv->build_ver;
1610 	cmd->subbuild_ver = dv->subbuild_ver;
1611 
1612 	len = 0;
1613 	while (len < sizeof(dv->driver_string) &&
1614 	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1615 		len++;
1616 
1617 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1618 }
1619 
1620 /**
1621  * ice_aq_q_shutdown
1622  * @hw: pointer to the HW struct
1623  * @unloading: is the driver unloading itself
1624  *
1625  * Tell the Firmware that we're shutting down the AdminQ and whether
1626  * or not the driver is unloading as well (0x0003).
1627  */
1628 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1629 {
1630 	struct ice_aqc_q_shutdown *cmd;
1631 	struct ice_aq_desc desc;
1632 
1633 	cmd = &desc.params.q_shutdown;
1634 
1635 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1636 
1637 	if (unloading)
1638 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1639 
1640 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1641 }
1642 
1643 /**
1644  * ice_aq_req_res
1645  * @hw: pointer to the HW struct
1646  * @res: resource ID
1647  * @access: access type
1648  * @sdp_number: resource number
1649  * @timeout: the maximum time in ms that the driver may hold the resource
1650  * @cd: pointer to command details structure or NULL
1651  *
1652  * Requests common resource using the admin queue commands (0x0008).
1653  * When attempting to acquire the Global Config Lock, the driver can
1654  * learn of three states:
1655  *  1) 0 -         acquired lock, and can perform download package
1656  *  2) -EIO -      did not get lock, driver should fail to load
1657  *  3) -EALREADY - did not get lock, but another driver has
1658  *                 successfully downloaded the package; the driver does
1659  *                 not have to download the package and can continue
1660  *                 loading
1661  *
1662  * Note that if the caller is in an acquire lock, perform action, release lock
1663  * phase of operation, it is possible that the FW may detect a timeout and issue
1664  * a CORER. In this case, the driver will receive a CORER interrupt and will
1665  * have to determine its cause. The calling thread that is handling this flow
1666  * will likely get an error propagated back to it indicating the Download
1667  * Package, Update Package or the Release Resource AQ commands timed out.
1668  */
1669 static int
1670 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1671 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1672 	       struct ice_sq_cd *cd)
1673 {
1674 	struct ice_aqc_req_res *cmd_resp;
1675 	struct ice_aq_desc desc;
1676 	int status;
1677 
1678 	cmd_resp = &desc.params.res_owner;
1679 
1680 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1681 
1682 	cmd_resp->res_id = cpu_to_le16(res);
1683 	cmd_resp->access_type = cpu_to_le16(access);
1684 	cmd_resp->res_number = cpu_to_le32(sdp_number);
1685 	cmd_resp->timeout = cpu_to_le32(*timeout);
1686 	*timeout = 0;
1687 
1688 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1689 
1690 	/* The completion specifies the maximum time in ms that the driver
1691 	 * may hold the resource in the Timeout field.
1692 	 */
1693 
1694 	/* Global config lock response utilizes an additional status field.
1695 	 *
1696 	 * If the Global config lock resource is held by some other driver, the
1697 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1698 	 * and the timeout field indicates the maximum time the current owner
1699 	 * of the resource has to free it.
1700 	 */
1701 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1702 		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1703 			*timeout = le32_to_cpu(cmd_resp->timeout);
1704 			return 0;
1705 		} else if (le16_to_cpu(cmd_resp->status) ==
1706 			   ICE_AQ_RES_GLBL_IN_PROG) {
1707 			*timeout = le32_to_cpu(cmd_resp->timeout);
1708 			return -EIO;
1709 		} else if (le16_to_cpu(cmd_resp->status) ==
1710 			   ICE_AQ_RES_GLBL_DONE) {
1711 			return -EALREADY;
1712 		}
1713 
1714 		/* invalid FW response, force a timeout immediately */
1715 		*timeout = 0;
1716 		return -EIO;
1717 	}
1718 
1719 	/* If the resource is held by some other driver, the command completes
1720 	 * with a busy return value and the timeout field indicates the maximum
1721 	 * time the current owner of the resource has to free it.
1722 	 */
1723 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1724 		*timeout = le32_to_cpu(cmd_resp->timeout);
1725 
1726 	return status;
1727 }
1728 
1729 /**
1730  * ice_aq_release_res
1731  * @hw: pointer to the HW struct
1732  * @res: resource ID
1733  * @sdp_number: resource number
1734  * @cd: pointer to command details structure or NULL
1735  *
1736  * release common resource using the admin queue commands (0x0009)
1737  */
1738 static int
1739 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1740 		   struct ice_sq_cd *cd)
1741 {
1742 	struct ice_aqc_req_res *cmd;
1743 	struct ice_aq_desc desc;
1744 
1745 	cmd = &desc.params.res_owner;
1746 
1747 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1748 
1749 	cmd->res_id = cpu_to_le16(res);
1750 	cmd->res_number = cpu_to_le32(sdp_number);
1751 
1752 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1753 }
1754 
1755 /**
1756  * ice_acquire_res
1757  * @hw: pointer to the HW structure
1758  * @res: resource ID
1759  * @access: access type (read or write)
1760  * @timeout: timeout in milliseconds
1761  *
1762  * This function will attempt to acquire the ownership of a resource.
1763  */
1764 int
1765 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1766 		enum ice_aq_res_access_type access, u32 timeout)
1767 {
1768 #define ICE_RES_POLLING_DELAY_MS	10
1769 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1770 	u32 time_left = timeout;
1771 	int status;
1772 
1773 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1774 
1775 	/* A return code of -EALREADY means that another driver has
1776 	 * previously acquired the resource and performed any necessary updates;
1777 	 * in this case the caller does not obtain the resource and has no
1778 	 * further work to do.
1779 	 */
1780 	if (status == -EALREADY)
1781 		goto ice_acquire_res_exit;
1782 
1783 	if (status)
1784 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1785 
1786 	/* If necessary, poll until the current lock owner timeouts */
1787 	timeout = time_left;
1788 	while (status && timeout && time_left) {
1789 		mdelay(delay);
1790 		timeout = (timeout > delay) ? timeout - delay : 0;
1791 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1792 
1793 		if (status == -EALREADY)
1794 			/* lock free, but no work to do */
1795 			break;
1796 
1797 		if (!status)
1798 			/* lock acquired */
1799 			break;
1800 	}
1801 	if (status && status != -EALREADY)
1802 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1803 
1804 ice_acquire_res_exit:
1805 	if (status == -EALREADY) {
1806 		if (access == ICE_RES_WRITE)
1807 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1808 		else
1809 			ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1810 	}
1811 	return status;
1812 }
1813 
1814 /**
1815  * ice_release_res
1816  * @hw: pointer to the HW structure
1817  * @res: resource ID
1818  *
1819  * This function will release a resource using the proper Admin Command.
1820  */
1821 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1822 {
1823 	u32 total_delay = 0;
1824 	int status;
1825 
1826 	status = ice_aq_release_res(hw, res, 0, NULL);
1827 
1828 	/* there are some rare cases when trying to release the resource
1829 	 * results in an admin queue timeout, so handle them correctly
1830 	 */
1831 	while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
1832 		mdelay(1);
1833 		status = ice_aq_release_res(hw, res, 0, NULL);
1834 		total_delay++;
1835 	}
1836 }
1837 
1838 /**
1839  * ice_aq_alloc_free_res - command to allocate/free resources
1840  * @hw: pointer to the HW struct
1841  * @num_entries: number of resource entries in buffer
1842  * @buf: Indirect buffer to hold data parameters and response
1843  * @buf_size: size of buffer for indirect commands
1844  * @opc: pass in the command opcode
1845  * @cd: pointer to command details structure or NULL
1846  *
1847  * Helper function to allocate/free resources using the admin queue commands
1848  */
1849 int
1850 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1851 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1852 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1853 {
1854 	struct ice_aqc_alloc_free_res_cmd *cmd;
1855 	struct ice_aq_desc desc;
1856 
1857 	cmd = &desc.params.sw_res_ctrl;
1858 
1859 	if (!buf)
1860 		return -EINVAL;
1861 
1862 	if (buf_size < flex_array_size(buf, elem, num_entries))
1863 		return -EINVAL;
1864 
1865 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1866 
1867 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1868 
1869 	cmd->num_entries = cpu_to_le16(num_entries);
1870 
1871 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1872 }
1873 
1874 /**
1875  * ice_alloc_hw_res - allocate resource
1876  * @hw: pointer to the HW struct
1877  * @type: type of resource
1878  * @num: number of resources to allocate
1879  * @btm: allocate from bottom
1880  * @res: pointer to array that will receive the resources
1881  */
1882 int
1883 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1884 {
1885 	struct ice_aqc_alloc_free_res_elem *buf;
1886 	u16 buf_len;
1887 	int status;
1888 
1889 	buf_len = struct_size(buf, elem, num);
1890 	buf = kzalloc(buf_len, GFP_KERNEL);
1891 	if (!buf)
1892 		return -ENOMEM;
1893 
1894 	/* Prepare buffer to allocate resource. */
1895 	buf->num_elems = cpu_to_le16(num);
1896 	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1897 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1898 	if (btm)
1899 		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1900 
1901 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1902 				       ice_aqc_opc_alloc_res, NULL);
1903 	if (status)
1904 		goto ice_alloc_res_exit;
1905 
1906 	memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1907 
1908 ice_alloc_res_exit:
1909 	kfree(buf);
1910 	return status;
1911 }
1912 
1913 /**
1914  * ice_free_hw_res - free allocated HW resource
1915  * @hw: pointer to the HW struct
1916  * @type: type of resource to free
1917  * @num: number of resources
1918  * @res: pointer to array that contains the resources to free
1919  */
1920 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1921 {
1922 	struct ice_aqc_alloc_free_res_elem *buf;
1923 	u16 buf_len;
1924 	int status;
1925 
1926 	buf_len = struct_size(buf, elem, num);
1927 	buf = kzalloc(buf_len, GFP_KERNEL);
1928 	if (!buf)
1929 		return -ENOMEM;
1930 
1931 	/* Prepare buffer to free resource. */
1932 	buf->num_elems = cpu_to_le16(num);
1933 	buf->res_type = cpu_to_le16(type);
1934 	memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1935 
1936 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1937 				       ice_aqc_opc_free_res, NULL);
1938 	if (status)
1939 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1940 
1941 	kfree(buf);
1942 	return status;
1943 }
1944 
1945 /**
1946  * ice_get_num_per_func - determine number of resources per PF
1947  * @hw: pointer to the HW structure
1948  * @max: value to be evenly split between each PF
1949  *
1950  * Determine the number of valid functions by going through the bitmap returned
1951  * from parsing capabilities and use this to calculate the number of resources
1952  * per PF based on the max value passed in.
1953  */
1954 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1955 {
1956 	u8 funcs;
1957 
1958 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1959 	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1960 			 ICE_CAPS_VALID_FUNCS_M);
1961 
1962 	if (!funcs)
1963 		return 0;
1964 
1965 	return max / funcs;
1966 }
1967 
1968 /**
1969  * ice_parse_common_caps - parse common device/function capabilities
1970  * @hw: pointer to the HW struct
1971  * @caps: pointer to common capabilities structure
1972  * @elem: the capability element to parse
1973  * @prefix: message prefix for tracing capabilities
1974  *
1975  * Given a capability element, extract relevant details into the common
1976  * capability structure.
1977  *
1978  * Returns: true if the capability matches one of the common capability ids,
1979  * false otherwise.
1980  */
1981 static bool
1982 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1983 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1984 {
1985 	u32 logical_id = le32_to_cpu(elem->logical_id);
1986 	u32 phys_id = le32_to_cpu(elem->phys_id);
1987 	u32 number = le32_to_cpu(elem->number);
1988 	u16 cap = le16_to_cpu(elem->cap);
1989 	bool found = true;
1990 
1991 	switch (cap) {
1992 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
1993 		caps->valid_functions = number;
1994 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1995 			  caps->valid_functions);
1996 		break;
1997 	case ICE_AQC_CAPS_SRIOV:
1998 		caps->sr_iov_1_1 = (number == 1);
1999 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2000 			  caps->sr_iov_1_1);
2001 		break;
2002 	case ICE_AQC_CAPS_DCB:
2003 		caps->dcb = (number == 1);
2004 		caps->active_tc_bitmap = logical_id;
2005 		caps->maxtc = phys_id;
2006 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2007 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2008 			  caps->active_tc_bitmap);
2009 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2010 		break;
2011 	case ICE_AQC_CAPS_RSS:
2012 		caps->rss_table_size = number;
2013 		caps->rss_table_entry_width = logical_id;
2014 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2015 			  caps->rss_table_size);
2016 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2017 			  caps->rss_table_entry_width);
2018 		break;
2019 	case ICE_AQC_CAPS_RXQS:
2020 		caps->num_rxq = number;
2021 		caps->rxq_first_id = phys_id;
2022 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2023 			  caps->num_rxq);
2024 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2025 			  caps->rxq_first_id);
2026 		break;
2027 	case ICE_AQC_CAPS_TXQS:
2028 		caps->num_txq = number;
2029 		caps->txq_first_id = phys_id;
2030 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2031 			  caps->num_txq);
2032 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2033 			  caps->txq_first_id);
2034 		break;
2035 	case ICE_AQC_CAPS_MSIX:
2036 		caps->num_msix_vectors = number;
2037 		caps->msix_vector_first_id = phys_id;
2038 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2039 			  caps->num_msix_vectors);
2040 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2041 			  caps->msix_vector_first_id);
2042 		break;
2043 	case ICE_AQC_CAPS_PENDING_NVM_VER:
2044 		caps->nvm_update_pending_nvm = true;
2045 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2046 		break;
2047 	case ICE_AQC_CAPS_PENDING_OROM_VER:
2048 		caps->nvm_update_pending_orom = true;
2049 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2050 		break;
2051 	case ICE_AQC_CAPS_PENDING_NET_VER:
2052 		caps->nvm_update_pending_netlist = true;
2053 		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2054 		break;
2055 	case ICE_AQC_CAPS_NVM_MGMT:
2056 		caps->nvm_unified_update =
2057 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2058 			true : false;
2059 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2060 			  caps->nvm_unified_update);
2061 		break;
2062 	case ICE_AQC_CAPS_RDMA:
2063 		caps->rdma = (number == 1);
2064 		ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2065 		break;
2066 	case ICE_AQC_CAPS_MAX_MTU:
2067 		caps->max_mtu = number;
2068 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2069 			  prefix, caps->max_mtu);
2070 		break;
2071 	case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2072 		caps->pcie_reset_avoidance = (number > 0);
2073 		ice_debug(hw, ICE_DBG_INIT,
2074 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2075 			  caps->pcie_reset_avoidance);
2076 		break;
2077 	case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2078 		caps->reset_restrict_support = (number == 1);
2079 		ice_debug(hw, ICE_DBG_INIT,
2080 			  "%s: reset_restrict_support = %d\n", prefix,
2081 			  caps->reset_restrict_support);
2082 		break;
2083 	default:
2084 		/* Not one of the recognized common capabilities */
2085 		found = false;
2086 	}
2087 
2088 	return found;
2089 }
2090 
2091 /**
2092  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2093  * @hw: pointer to the HW structure
2094  * @caps: pointer to capabilities structure to fix
2095  *
2096  * Re-calculate the capabilities that are dependent on the number of physical
2097  * ports; i.e. some features are not supported or function differently on
2098  * devices with more than 4 ports.
2099  */
2100 static void
2101 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2102 {
2103 	/* This assumes device capabilities are always scanned before function
2104 	 * capabilities during the initialization flow.
2105 	 */
2106 	if (hw->dev_caps.num_funcs > 4) {
2107 		/* Max 4 TCs per port */
2108 		caps->maxtc = 4;
2109 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2110 			  caps->maxtc);
2111 		if (caps->rdma) {
2112 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2113 			caps->rdma = 0;
2114 		}
2115 
2116 		/* print message only when processing device capabilities
2117 		 * during initialization.
2118 		 */
2119 		if (caps == &hw->dev_caps.common_cap)
2120 			dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2121 	}
2122 }
2123 
2124 /**
2125  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2126  * @hw: pointer to the HW struct
2127  * @func_p: pointer to function capabilities structure
2128  * @cap: pointer to the capability element to parse
2129  *
2130  * Extract function capabilities for ICE_AQC_CAPS_VF.
2131  */
2132 static void
2133 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2134 		       struct ice_aqc_list_caps_elem *cap)
2135 {
2136 	u32 logical_id = le32_to_cpu(cap->logical_id);
2137 	u32 number = le32_to_cpu(cap->number);
2138 
2139 	func_p->num_allocd_vfs = number;
2140 	func_p->vf_base_id = logical_id;
2141 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2142 		  func_p->num_allocd_vfs);
2143 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2144 		  func_p->vf_base_id);
2145 }
2146 
2147 /**
2148  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2149  * @hw: pointer to the HW struct
2150  * @func_p: pointer to function capabilities structure
2151  * @cap: pointer to the capability element to parse
2152  *
2153  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2154  */
2155 static void
2156 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2157 			struct ice_aqc_list_caps_elem *cap)
2158 {
2159 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2160 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2161 		  le32_to_cpu(cap->number));
2162 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2163 		  func_p->guar_num_vsi);
2164 }
2165 
2166 /**
2167  * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2168  * @hw: pointer to the HW struct
2169  * @func_p: pointer to function capabilities structure
2170  * @cap: pointer to the capability element to parse
2171  *
2172  * Extract function capabilities for ICE_AQC_CAPS_1588.
2173  */
2174 static void
2175 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2176 			 struct ice_aqc_list_caps_elem *cap)
2177 {
2178 	struct ice_ts_func_info *info = &func_p->ts_func_info;
2179 	u32 number = le32_to_cpu(cap->number);
2180 
2181 	info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2182 	func_p->common_cap.ieee_1588 = info->ena;
2183 
2184 	info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2185 	info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2186 	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2187 	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2188 
2189 	info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2190 	info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2191 
2192 	if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2193 		info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2194 	} else {
2195 		/* Unknown clock frequency, so assume a (probably incorrect)
2196 		 * default to avoid out-of-bounds look ups of frequency
2197 		 * related information.
2198 		 */
2199 		ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2200 			  info->clk_freq);
2201 		info->time_ref = ICE_TIME_REF_FREQ_25_000;
2202 	}
2203 
2204 	ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2205 		  func_p->common_cap.ieee_1588);
2206 	ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2207 		  info->src_tmr_owned);
2208 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2209 		  info->tmr_ena);
2210 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2211 		  info->tmr_index_owned);
2212 	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2213 		  info->tmr_index_assoc);
2214 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2215 		  info->clk_freq);
2216 	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2217 		  info->clk_src);
2218 }
2219 
2220 /**
2221  * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2222  * @hw: pointer to the HW struct
2223  * @func_p: pointer to function capabilities structure
2224  *
2225  * Extract function capabilities for ICE_AQC_CAPS_FD.
2226  */
2227 static void
2228 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2229 {
2230 	u32 reg_val, val;
2231 
2232 	reg_val = rd32(hw, GLQF_FD_SIZE);
2233 	val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2234 		GLQF_FD_SIZE_FD_GSIZE_S;
2235 	func_p->fd_fltr_guar =
2236 		ice_get_num_per_func(hw, val);
2237 	val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2238 		GLQF_FD_SIZE_FD_BSIZE_S;
2239 	func_p->fd_fltr_best_effort = val;
2240 
2241 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2242 		  func_p->fd_fltr_guar);
2243 	ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2244 		  func_p->fd_fltr_best_effort);
2245 }
2246 
2247 /**
2248  * ice_parse_func_caps - Parse function capabilities
2249  * @hw: pointer to the HW struct
2250  * @func_p: pointer to function capabilities structure
2251  * @buf: buffer containing the function capability records
2252  * @cap_count: the number of capabilities
2253  *
2254  * Helper function to parse function (0x000A) capabilities list. For
2255  * capabilities shared between device and function, this relies on
2256  * ice_parse_common_caps.
2257  *
2258  * Loop through the list of provided capabilities and extract the relevant
2259  * data into the function capabilities structured.
2260  */
2261 static void
2262 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2263 		    void *buf, u32 cap_count)
2264 {
2265 	struct ice_aqc_list_caps_elem *cap_resp;
2266 	u32 i;
2267 
2268 	cap_resp = buf;
2269 
2270 	memset(func_p, 0, sizeof(*func_p));
2271 
2272 	for (i = 0; i < cap_count; i++) {
2273 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2274 		bool found;
2275 
2276 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2277 					      &cap_resp[i], "func caps");
2278 
2279 		switch (cap) {
2280 		case ICE_AQC_CAPS_VF:
2281 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2282 			break;
2283 		case ICE_AQC_CAPS_VSI:
2284 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2285 			break;
2286 		case ICE_AQC_CAPS_1588:
2287 			ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2288 			break;
2289 		case ICE_AQC_CAPS_FD:
2290 			ice_parse_fdir_func_caps(hw, func_p);
2291 			break;
2292 		default:
2293 			/* Don't list common capabilities as unknown */
2294 			if (!found)
2295 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2296 					  i, cap);
2297 			break;
2298 		}
2299 	}
2300 
2301 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2302 }
2303 
2304 /**
2305  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2306  * @hw: pointer to the HW struct
2307  * @dev_p: pointer to device capabilities structure
2308  * @cap: capability element to parse
2309  *
2310  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2311  */
2312 static void
2313 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2314 			      struct ice_aqc_list_caps_elem *cap)
2315 {
2316 	u32 number = le32_to_cpu(cap->number);
2317 
2318 	dev_p->num_funcs = hweight32(number);
2319 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2320 		  dev_p->num_funcs);
2321 }
2322 
2323 /**
2324  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2325  * @hw: pointer to the HW struct
2326  * @dev_p: pointer to device capabilities structure
2327  * @cap: capability element to parse
2328  *
2329  * Parse ICE_AQC_CAPS_VF for device capabilities.
2330  */
2331 static void
2332 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2333 		      struct ice_aqc_list_caps_elem *cap)
2334 {
2335 	u32 number = le32_to_cpu(cap->number);
2336 
2337 	dev_p->num_vfs_exposed = number;
2338 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2339 		  dev_p->num_vfs_exposed);
2340 }
2341 
2342 /**
2343  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2344  * @hw: pointer to the HW struct
2345  * @dev_p: pointer to device capabilities structure
2346  * @cap: capability element to parse
2347  *
2348  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2349  */
2350 static void
2351 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2352 		       struct ice_aqc_list_caps_elem *cap)
2353 {
2354 	u32 number = le32_to_cpu(cap->number);
2355 
2356 	dev_p->num_vsi_allocd_to_host = number;
2357 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2358 		  dev_p->num_vsi_allocd_to_host);
2359 }
2360 
2361 /**
2362  * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2363  * @hw: pointer to the HW struct
2364  * @dev_p: pointer to device capabilities structure
2365  * @cap: capability element to parse
2366  *
2367  * Parse ICE_AQC_CAPS_1588 for device capabilities.
2368  */
2369 static void
2370 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2371 			struct ice_aqc_list_caps_elem *cap)
2372 {
2373 	struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2374 	u32 logical_id = le32_to_cpu(cap->logical_id);
2375 	u32 phys_id = le32_to_cpu(cap->phys_id);
2376 	u32 number = le32_to_cpu(cap->number);
2377 
2378 	info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2379 	dev_p->common_cap.ieee_1588 = info->ena;
2380 
2381 	info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2382 	info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2383 	info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2384 
2385 	info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2386 	info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2387 	info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2388 
2389 	info->ena_ports = logical_id;
2390 	info->tmr_own_map = phys_id;
2391 
2392 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2393 		  dev_p->common_cap.ieee_1588);
2394 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2395 		  info->tmr0_owner);
2396 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2397 		  info->tmr0_owned);
2398 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2399 		  info->tmr0_ena);
2400 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2401 		  info->tmr1_owner);
2402 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2403 		  info->tmr1_owned);
2404 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2405 		  info->tmr1_ena);
2406 	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2407 		  info->ena_ports);
2408 	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2409 		  info->tmr_own_map);
2410 }
2411 
2412 /**
2413  * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2414  * @hw: pointer to the HW struct
2415  * @dev_p: pointer to device capabilities structure
2416  * @cap: capability element to parse
2417  *
2418  * Parse ICE_AQC_CAPS_FD for device capabilities.
2419  */
2420 static void
2421 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2422 			struct ice_aqc_list_caps_elem *cap)
2423 {
2424 	u32 number = le32_to_cpu(cap->number);
2425 
2426 	dev_p->num_flow_director_fltr = number;
2427 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2428 		  dev_p->num_flow_director_fltr);
2429 }
2430 
2431 /**
2432  * ice_parse_dev_caps - Parse device capabilities
2433  * @hw: pointer to the HW struct
2434  * @dev_p: pointer to device capabilities structure
2435  * @buf: buffer containing the device capability records
2436  * @cap_count: the number of capabilities
2437  *
2438  * Helper device to parse device (0x000B) capabilities list. For
2439  * capabilities shared between device and function, this relies on
2440  * ice_parse_common_caps.
2441  *
2442  * Loop through the list of provided capabilities and extract the relevant
2443  * data into the device capabilities structured.
2444  */
2445 static void
2446 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2447 		   void *buf, u32 cap_count)
2448 {
2449 	struct ice_aqc_list_caps_elem *cap_resp;
2450 	u32 i;
2451 
2452 	cap_resp = buf;
2453 
2454 	memset(dev_p, 0, sizeof(*dev_p));
2455 
2456 	for (i = 0; i < cap_count; i++) {
2457 		u16 cap = le16_to_cpu(cap_resp[i].cap);
2458 		bool found;
2459 
2460 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2461 					      &cap_resp[i], "dev caps");
2462 
2463 		switch (cap) {
2464 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2465 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2466 			break;
2467 		case ICE_AQC_CAPS_VF:
2468 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2469 			break;
2470 		case ICE_AQC_CAPS_VSI:
2471 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2472 			break;
2473 		case ICE_AQC_CAPS_1588:
2474 			ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2475 			break;
2476 		case  ICE_AQC_CAPS_FD:
2477 			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2478 			break;
2479 		default:
2480 			/* Don't list common capabilities as unknown */
2481 			if (!found)
2482 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2483 					  i, cap);
2484 			break;
2485 		}
2486 	}
2487 
2488 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2489 }
2490 
2491 /**
2492  * ice_aq_list_caps - query function/device capabilities
2493  * @hw: pointer to the HW struct
2494  * @buf: a buffer to hold the capabilities
2495  * @buf_size: size of the buffer
2496  * @cap_count: if not NULL, set to the number of capabilities reported
2497  * @opc: capabilities type to discover, device or function
2498  * @cd: pointer to command details structure or NULL
2499  *
2500  * Get the function (0x000A) or device (0x000B) capabilities description from
2501  * firmware and store it in the buffer.
2502  *
2503  * If the cap_count pointer is not NULL, then it is set to the number of
2504  * capabilities firmware will report. Note that if the buffer size is too
2505  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2506  * cap_count will still be updated in this case. It is recommended that the
2507  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2508  * firmware could return) to avoid this.
2509  */
2510 int
2511 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2512 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2513 {
2514 	struct ice_aqc_list_caps *cmd;
2515 	struct ice_aq_desc desc;
2516 	int status;
2517 
2518 	cmd = &desc.params.get_cap;
2519 
2520 	if (opc != ice_aqc_opc_list_func_caps &&
2521 	    opc != ice_aqc_opc_list_dev_caps)
2522 		return -EINVAL;
2523 
2524 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2525 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2526 
2527 	if (cap_count)
2528 		*cap_count = le32_to_cpu(cmd->count);
2529 
2530 	return status;
2531 }
2532 
2533 /**
2534  * ice_discover_dev_caps - Read and extract device capabilities
2535  * @hw: pointer to the hardware structure
2536  * @dev_caps: pointer to device capabilities structure
2537  *
2538  * Read the device capabilities and extract them into the dev_caps structure
2539  * for later use.
2540  */
2541 int
2542 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2543 {
2544 	u32 cap_count = 0;
2545 	void *cbuf;
2546 	int status;
2547 
2548 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2549 	if (!cbuf)
2550 		return -ENOMEM;
2551 
2552 	/* Although the driver doesn't know the number of capabilities the
2553 	 * device will return, we can simply send a 4KB buffer, the maximum
2554 	 * possible size that firmware can return.
2555 	 */
2556 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2557 
2558 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2559 				  ice_aqc_opc_list_dev_caps, NULL);
2560 	if (!status)
2561 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2562 	kfree(cbuf);
2563 
2564 	return status;
2565 }
2566 
2567 /**
2568  * ice_discover_func_caps - Read and extract function capabilities
2569  * @hw: pointer to the hardware structure
2570  * @func_caps: pointer to function capabilities structure
2571  *
2572  * Read the function capabilities and extract them into the func_caps structure
2573  * for later use.
2574  */
2575 static int
2576 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2577 {
2578 	u32 cap_count = 0;
2579 	void *cbuf;
2580 	int status;
2581 
2582 	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2583 	if (!cbuf)
2584 		return -ENOMEM;
2585 
2586 	/* Although the driver doesn't know the number of capabilities the
2587 	 * device will return, we can simply send a 4KB buffer, the maximum
2588 	 * possible size that firmware can return.
2589 	 */
2590 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2591 
2592 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2593 				  ice_aqc_opc_list_func_caps, NULL);
2594 	if (!status)
2595 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2596 	kfree(cbuf);
2597 
2598 	return status;
2599 }
2600 
2601 /**
2602  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2603  * @hw: pointer to the hardware structure
2604  */
2605 void ice_set_safe_mode_caps(struct ice_hw *hw)
2606 {
2607 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2608 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2609 	struct ice_hw_common_caps cached_caps;
2610 	u32 num_funcs;
2611 
2612 	/* cache some func_caps values that should be restored after memset */
2613 	cached_caps = func_caps->common_cap;
2614 
2615 	/* unset func capabilities */
2616 	memset(func_caps, 0, sizeof(*func_caps));
2617 
2618 #define ICE_RESTORE_FUNC_CAP(name) \
2619 	func_caps->common_cap.name = cached_caps.name
2620 
2621 	/* restore cached values */
2622 	ICE_RESTORE_FUNC_CAP(valid_functions);
2623 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2624 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2625 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2626 	ICE_RESTORE_FUNC_CAP(max_mtu);
2627 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2628 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2629 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2630 	ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2631 
2632 	/* one Tx and one Rx queue in safe mode */
2633 	func_caps->common_cap.num_rxq = 1;
2634 	func_caps->common_cap.num_txq = 1;
2635 
2636 	/* two MSIX vectors, one for traffic and one for misc causes */
2637 	func_caps->common_cap.num_msix_vectors = 2;
2638 	func_caps->guar_num_vsi = 1;
2639 
2640 	/* cache some dev_caps values that should be restored after memset */
2641 	cached_caps = dev_caps->common_cap;
2642 	num_funcs = dev_caps->num_funcs;
2643 
2644 	/* unset dev capabilities */
2645 	memset(dev_caps, 0, sizeof(*dev_caps));
2646 
2647 #define ICE_RESTORE_DEV_CAP(name) \
2648 	dev_caps->common_cap.name = cached_caps.name
2649 
2650 	/* restore cached values */
2651 	ICE_RESTORE_DEV_CAP(valid_functions);
2652 	ICE_RESTORE_DEV_CAP(txq_first_id);
2653 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2654 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2655 	ICE_RESTORE_DEV_CAP(max_mtu);
2656 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2657 	ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2658 	ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2659 	ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2660 	dev_caps->num_funcs = num_funcs;
2661 
2662 	/* one Tx and one Rx queue per function in safe mode */
2663 	dev_caps->common_cap.num_rxq = num_funcs;
2664 	dev_caps->common_cap.num_txq = num_funcs;
2665 
2666 	/* two MSIX vectors per function */
2667 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2668 }
2669 
2670 /**
2671  * ice_get_caps - get info about the HW
2672  * @hw: pointer to the hardware structure
2673  */
2674 int ice_get_caps(struct ice_hw *hw)
2675 {
2676 	int status;
2677 
2678 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2679 	if (status)
2680 		return status;
2681 
2682 	return ice_discover_func_caps(hw, &hw->func_caps);
2683 }
2684 
2685 /**
2686  * ice_aq_manage_mac_write - manage MAC address write command
2687  * @hw: pointer to the HW struct
2688  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2689  * @flags: flags to control write behavior
2690  * @cd: pointer to command details structure or NULL
2691  *
2692  * This function is used to write MAC address to the NVM (0x0108).
2693  */
2694 int
2695 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2696 			struct ice_sq_cd *cd)
2697 {
2698 	struct ice_aqc_manage_mac_write *cmd;
2699 	struct ice_aq_desc desc;
2700 
2701 	cmd = &desc.params.mac_write;
2702 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2703 
2704 	cmd->flags = flags;
2705 	ether_addr_copy(cmd->mac_addr, mac_addr);
2706 
2707 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2708 }
2709 
2710 /**
2711  * ice_aq_clear_pxe_mode
2712  * @hw: pointer to the HW struct
2713  *
2714  * Tell the firmware that the driver is taking over from PXE (0x0110).
2715  */
2716 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2717 {
2718 	struct ice_aq_desc desc;
2719 
2720 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2721 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2722 
2723 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2724 }
2725 
2726 /**
2727  * ice_clear_pxe_mode - clear pxe operations mode
2728  * @hw: pointer to the HW struct
2729  *
2730  * Make sure all PXE mode settings are cleared, including things
2731  * like descriptor fetch/write-back mode.
2732  */
2733 void ice_clear_pxe_mode(struct ice_hw *hw)
2734 {
2735 	if (ice_check_sq_alive(hw, &hw->adminq))
2736 		ice_aq_clear_pxe_mode(hw);
2737 }
2738 
2739 /**
2740  * ice_get_link_speed_based_on_phy_type - returns link speed
2741  * @phy_type_low: lower part of phy_type
2742  * @phy_type_high: higher part of phy_type
2743  *
2744  * This helper function will convert an entry in PHY type structure
2745  * [phy_type_low, phy_type_high] to its corresponding link speed.
2746  * Note: In the structure of [phy_type_low, phy_type_high], there should
2747  * be one bit set, as this function will convert one PHY type to its
2748  * speed.
2749  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2750  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2751  */
2752 static u16
2753 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2754 {
2755 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2756 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2757 
2758 	switch (phy_type_low) {
2759 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2760 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2761 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2762 		break;
2763 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2764 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2765 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2766 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2767 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2768 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2769 		break;
2770 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2771 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2772 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2773 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2774 		break;
2775 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2776 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2777 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2778 		break;
2779 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2780 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2781 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2782 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2783 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2784 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2785 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2786 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2787 		break;
2788 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2789 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2790 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2791 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2792 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2793 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2794 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2795 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2796 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2797 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2798 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2799 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2800 		break;
2801 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2802 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2803 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2804 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2805 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2806 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2807 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2808 		break;
2809 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2810 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2811 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2812 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2813 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2814 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2815 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2816 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2817 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2818 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2819 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2820 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2821 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2822 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2823 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2824 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2825 		break;
2826 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2827 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2828 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2829 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2830 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2831 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2832 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2833 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2834 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2835 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2836 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2837 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2838 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2839 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2840 		break;
2841 	default:
2842 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2843 		break;
2844 	}
2845 
2846 	switch (phy_type_high) {
2847 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2848 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2849 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2850 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2851 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2852 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2853 		break;
2854 	default:
2855 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2856 		break;
2857 	}
2858 
2859 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2860 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2861 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2862 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2863 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2864 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2865 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2866 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2867 		return speed_phy_type_low;
2868 	else
2869 		return speed_phy_type_high;
2870 }
2871 
2872 /**
2873  * ice_update_phy_type
2874  * @phy_type_low: pointer to the lower part of phy_type
2875  * @phy_type_high: pointer to the higher part of phy_type
2876  * @link_speeds_bitmap: targeted link speeds bitmap
2877  *
2878  * Note: For the link_speeds_bitmap structure, you can check it at
2879  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2880  * link_speeds_bitmap include multiple speeds.
2881  *
2882  * Each entry in this [phy_type_low, phy_type_high] structure will
2883  * present a certain link speed. This helper function will turn on bits
2884  * in [phy_type_low, phy_type_high] structure based on the value of
2885  * link_speeds_bitmap input parameter.
2886  */
2887 void
2888 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2889 		    u16 link_speeds_bitmap)
2890 {
2891 	u64 pt_high;
2892 	u64 pt_low;
2893 	int index;
2894 	u16 speed;
2895 
2896 	/* We first check with low part of phy_type */
2897 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2898 		pt_low = BIT_ULL(index);
2899 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2900 
2901 		if (link_speeds_bitmap & speed)
2902 			*phy_type_low |= BIT_ULL(index);
2903 	}
2904 
2905 	/* We then check with high part of phy_type */
2906 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2907 		pt_high = BIT_ULL(index);
2908 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2909 
2910 		if (link_speeds_bitmap & speed)
2911 			*phy_type_high |= BIT_ULL(index);
2912 	}
2913 }
2914 
2915 /**
2916  * ice_aq_set_phy_cfg
2917  * @hw: pointer to the HW struct
2918  * @pi: port info structure of the interested logical port
2919  * @cfg: structure with PHY configuration data to be set
2920  * @cd: pointer to command details structure or NULL
2921  *
2922  * Set the various PHY configuration parameters supported on the Port.
2923  * One or more of the Set PHY config parameters may be ignored in an MFP
2924  * mode as the PF may not have the privilege to set some of the PHY Config
2925  * parameters. This status will be indicated by the command response (0x0601).
2926  */
2927 int
2928 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2929 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2930 {
2931 	struct ice_aq_desc desc;
2932 	int status;
2933 
2934 	if (!cfg)
2935 		return -EINVAL;
2936 
2937 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2938 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2939 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2940 			  cfg->caps);
2941 
2942 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2943 	}
2944 
2945 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2946 	desc.params.set_phy.lport_num = pi->lport;
2947 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2948 
2949 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2950 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2951 		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2952 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2953 		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2954 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2955 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2956 		  cfg->low_power_ctrl_an);
2957 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2958 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2959 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2960 		  cfg->link_fec_opt);
2961 
2962 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2963 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2964 		status = 0;
2965 
2966 	if (!status)
2967 		pi->phy.curr_user_phy_cfg = *cfg;
2968 
2969 	return status;
2970 }
2971 
2972 /**
2973  * ice_update_link_info - update status of the HW network link
2974  * @pi: port info structure of the interested logical port
2975  */
2976 int ice_update_link_info(struct ice_port_info *pi)
2977 {
2978 	struct ice_link_status *li;
2979 	int status;
2980 
2981 	if (!pi)
2982 		return -EINVAL;
2983 
2984 	li = &pi->phy.link_info;
2985 
2986 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2987 	if (status)
2988 		return status;
2989 
2990 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2991 		struct ice_aqc_get_phy_caps_data *pcaps;
2992 		struct ice_hw *hw;
2993 
2994 		hw = pi->hw;
2995 		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2996 				     GFP_KERNEL);
2997 		if (!pcaps)
2998 			return -ENOMEM;
2999 
3000 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3001 					     pcaps, NULL);
3002 
3003 		devm_kfree(ice_hw_to_dev(hw), pcaps);
3004 	}
3005 
3006 	return status;
3007 }
3008 
3009 /**
3010  * ice_cache_phy_user_req
3011  * @pi: port information structure
3012  * @cache_data: PHY logging data
3013  * @cache_mode: PHY logging mode
3014  *
3015  * Log the user request on (FC, FEC, SPEED) for later use.
3016  */
3017 static void
3018 ice_cache_phy_user_req(struct ice_port_info *pi,
3019 		       struct ice_phy_cache_mode_data cache_data,
3020 		       enum ice_phy_cache_mode cache_mode)
3021 {
3022 	if (!pi)
3023 		return;
3024 
3025 	switch (cache_mode) {
3026 	case ICE_FC_MODE:
3027 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3028 		break;
3029 	case ICE_SPEED_MODE:
3030 		pi->phy.curr_user_speed_req =
3031 			cache_data.data.curr_user_speed_req;
3032 		break;
3033 	case ICE_FEC_MODE:
3034 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3035 		break;
3036 	default:
3037 		break;
3038 	}
3039 }
3040 
3041 /**
3042  * ice_caps_to_fc_mode
3043  * @caps: PHY capabilities
3044  *
3045  * Convert PHY FC capabilities to ice FC mode
3046  */
3047 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3048 {
3049 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3050 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3051 		return ICE_FC_FULL;
3052 
3053 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3054 		return ICE_FC_TX_PAUSE;
3055 
3056 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3057 		return ICE_FC_RX_PAUSE;
3058 
3059 	return ICE_FC_NONE;
3060 }
3061 
3062 /**
3063  * ice_caps_to_fec_mode
3064  * @caps: PHY capabilities
3065  * @fec_options: Link FEC options
3066  *
3067  * Convert PHY FEC capabilities to ice FEC mode
3068  */
3069 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3070 {
3071 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3072 		return ICE_FEC_AUTO;
3073 
3074 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3075 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3076 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3077 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3078 		return ICE_FEC_BASER;
3079 
3080 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3081 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3082 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3083 		return ICE_FEC_RS;
3084 
3085 	return ICE_FEC_NONE;
3086 }
3087 
3088 /**
3089  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3090  * @pi: port information structure
3091  * @cfg: PHY configuration data to set FC mode
3092  * @req_mode: FC mode to configure
3093  */
3094 int
3095 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3096 	       enum ice_fc_mode req_mode)
3097 {
3098 	struct ice_phy_cache_mode_data cache_data;
3099 	u8 pause_mask = 0x0;
3100 
3101 	if (!pi || !cfg)
3102 		return -EINVAL;
3103 
3104 	switch (req_mode) {
3105 	case ICE_FC_FULL:
3106 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3107 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3108 		break;
3109 	case ICE_FC_RX_PAUSE:
3110 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3111 		break;
3112 	case ICE_FC_TX_PAUSE:
3113 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3114 		break;
3115 	default:
3116 		break;
3117 	}
3118 
3119 	/* clear the old pause settings */
3120 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3121 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3122 
3123 	/* set the new capabilities */
3124 	cfg->caps |= pause_mask;
3125 
3126 	/* Cache user FC request */
3127 	cache_data.data.curr_user_fc_req = req_mode;
3128 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3129 
3130 	return 0;
3131 }
3132 
3133 /**
3134  * ice_set_fc
3135  * @pi: port information structure
3136  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3137  * @ena_auto_link_update: enable automatic link update
3138  *
3139  * Set the requested flow control mode.
3140  */
3141 int
3142 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3143 {
3144 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3145 	struct ice_aqc_get_phy_caps_data *pcaps;
3146 	struct ice_hw *hw;
3147 	int status;
3148 
3149 	if (!pi || !aq_failures)
3150 		return -EINVAL;
3151 
3152 	*aq_failures = 0;
3153 	hw = pi->hw;
3154 
3155 	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3156 	if (!pcaps)
3157 		return -ENOMEM;
3158 
3159 	/* Get the current PHY config */
3160 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3161 				     pcaps, NULL);
3162 	if (status) {
3163 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3164 		goto out;
3165 	}
3166 
3167 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3168 
3169 	/* Configure the set PHY data */
3170 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3171 	if (status)
3172 		goto out;
3173 
3174 	/* If the capabilities have changed, then set the new config */
3175 	if (cfg.caps != pcaps->caps) {
3176 		int retry_count, retry_max = 10;
3177 
3178 		/* Auto restart link so settings take effect */
3179 		if (ena_auto_link_update)
3180 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3181 
3182 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3183 		if (status) {
3184 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3185 			goto out;
3186 		}
3187 
3188 		/* Update the link info
3189 		 * It sometimes takes a really long time for link to
3190 		 * come back from the atomic reset. Thus, we wait a
3191 		 * little bit.
3192 		 */
3193 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3194 			status = ice_update_link_info(pi);
3195 
3196 			if (!status)
3197 				break;
3198 
3199 			mdelay(100);
3200 		}
3201 
3202 		if (status)
3203 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3204 	}
3205 
3206 out:
3207 	devm_kfree(ice_hw_to_dev(hw), pcaps);
3208 	return status;
3209 }
3210 
3211 /**
3212  * ice_phy_caps_equals_cfg
3213  * @phy_caps: PHY capabilities
3214  * @phy_cfg: PHY configuration
3215  *
3216  * Helper function to determine if PHY capabilities matches PHY
3217  * configuration
3218  */
3219 bool
3220 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3221 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3222 {
3223 	u8 caps_mask, cfg_mask;
3224 
3225 	if (!phy_caps || !phy_cfg)
3226 		return false;
3227 
3228 	/* These bits are not common between capabilities and configuration.
3229 	 * Do not use them to determine equality.
3230 	 */
3231 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3232 					      ICE_AQC_GET_PHY_EN_MOD_QUAL);
3233 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3234 
3235 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3236 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3237 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3238 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3239 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3240 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3241 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3242 		return false;
3243 
3244 	return true;
3245 }
3246 
3247 /**
3248  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3249  * @pi: port information structure
3250  * @caps: PHY ability structure to copy date from
3251  * @cfg: PHY configuration structure to copy data to
3252  *
3253  * Helper function to copy AQC PHY get ability data to PHY set configuration
3254  * data structure
3255  */
3256 void
3257 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3258 			 struct ice_aqc_get_phy_caps_data *caps,
3259 			 struct ice_aqc_set_phy_cfg_data *cfg)
3260 {
3261 	if (!pi || !caps || !cfg)
3262 		return;
3263 
3264 	memset(cfg, 0, sizeof(*cfg));
3265 	cfg->phy_type_low = caps->phy_type_low;
3266 	cfg->phy_type_high = caps->phy_type_high;
3267 	cfg->caps = caps->caps;
3268 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3269 	cfg->eee_cap = caps->eee_cap;
3270 	cfg->eeer_value = caps->eeer_value;
3271 	cfg->link_fec_opt = caps->link_fec_options;
3272 	cfg->module_compliance_enforcement =
3273 		caps->module_compliance_enforcement;
3274 }
3275 
3276 /**
3277  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3278  * @pi: port information structure
3279  * @cfg: PHY configuration data to set FEC mode
3280  * @fec: FEC mode to configure
3281  */
3282 int
3283 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3284 		enum ice_fec_mode fec)
3285 {
3286 	struct ice_aqc_get_phy_caps_data *pcaps;
3287 	struct ice_hw *hw;
3288 	int status;
3289 
3290 	if (!pi || !cfg)
3291 		return -EINVAL;
3292 
3293 	hw = pi->hw;
3294 
3295 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3296 	if (!pcaps)
3297 		return -ENOMEM;
3298 
3299 	status = ice_aq_get_phy_caps(pi, false,
3300 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3301 				      ICE_AQC_REPORT_DFLT_CFG :
3302 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3303 	if (status)
3304 		goto out;
3305 
3306 	cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3307 	cfg->link_fec_opt = pcaps->link_fec_options;
3308 
3309 	switch (fec) {
3310 	case ICE_FEC_BASER:
3311 		/* Clear RS bits, and AND BASE-R ability
3312 		 * bits and OR request bits.
3313 		 */
3314 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3315 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3316 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3317 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3318 		break;
3319 	case ICE_FEC_RS:
3320 		/* Clear BASE-R bits, and AND RS ability
3321 		 * bits and OR request bits.
3322 		 */
3323 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3324 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3325 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3326 		break;
3327 	case ICE_FEC_NONE:
3328 		/* Clear all FEC option bits. */
3329 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3330 		break;
3331 	case ICE_FEC_AUTO:
3332 		/* AND auto FEC bit, and all caps bits. */
3333 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3334 		cfg->link_fec_opt |= pcaps->link_fec_options;
3335 		break;
3336 	default:
3337 		status = -EINVAL;
3338 		break;
3339 	}
3340 
3341 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3342 	    !ice_fw_supports_report_dflt_cfg(hw)) {
3343 		struct ice_link_default_override_tlv tlv;
3344 
3345 		status = ice_get_link_default_override(&tlv, pi);
3346 		if (status)
3347 			goto out;
3348 
3349 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3350 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3351 			cfg->link_fec_opt = tlv.fec_options;
3352 	}
3353 
3354 out:
3355 	kfree(pcaps);
3356 
3357 	return status;
3358 }
3359 
3360 /**
3361  * ice_get_link_status - get status of the HW network link
3362  * @pi: port information structure
3363  * @link_up: pointer to bool (true/false = linkup/linkdown)
3364  *
3365  * Variable link_up is true if link is up, false if link is down.
3366  * The variable link_up is invalid if status is non zero. As a
3367  * result of this call, link status reporting becomes enabled
3368  */
3369 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3370 {
3371 	struct ice_phy_info *phy_info;
3372 	int status = 0;
3373 
3374 	if (!pi || !link_up)
3375 		return -EINVAL;
3376 
3377 	phy_info = &pi->phy;
3378 
3379 	if (phy_info->get_link_info) {
3380 		status = ice_update_link_info(pi);
3381 
3382 		if (status)
3383 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3384 				  status);
3385 	}
3386 
3387 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3388 
3389 	return status;
3390 }
3391 
3392 /**
3393  * ice_aq_set_link_restart_an
3394  * @pi: pointer to the port information structure
3395  * @ena_link: if true: enable link, if false: disable link
3396  * @cd: pointer to command details structure or NULL
3397  *
3398  * Sets up the link and restarts the Auto-Negotiation over the link.
3399  */
3400 int
3401 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3402 			   struct ice_sq_cd *cd)
3403 {
3404 	struct ice_aqc_restart_an *cmd;
3405 	struct ice_aq_desc desc;
3406 
3407 	cmd = &desc.params.restart_an;
3408 
3409 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3410 
3411 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3412 	cmd->lport_num = pi->lport;
3413 	if (ena_link)
3414 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3415 	else
3416 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3417 
3418 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3419 }
3420 
3421 /**
3422  * ice_aq_set_event_mask
3423  * @hw: pointer to the HW struct
3424  * @port_num: port number of the physical function
3425  * @mask: event mask to be set
3426  * @cd: pointer to command details structure or NULL
3427  *
3428  * Set event mask (0x0613)
3429  */
3430 int
3431 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3432 		      struct ice_sq_cd *cd)
3433 {
3434 	struct ice_aqc_set_event_mask *cmd;
3435 	struct ice_aq_desc desc;
3436 
3437 	cmd = &desc.params.set_event_mask;
3438 
3439 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3440 
3441 	cmd->lport_num = port_num;
3442 
3443 	cmd->event_mask = cpu_to_le16(mask);
3444 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3445 }
3446 
3447 /**
3448  * ice_aq_set_mac_loopback
3449  * @hw: pointer to the HW struct
3450  * @ena_lpbk: Enable or Disable loopback
3451  * @cd: pointer to command details structure or NULL
3452  *
3453  * Enable/disable loopback on a given port
3454  */
3455 int
3456 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3457 {
3458 	struct ice_aqc_set_mac_lb *cmd;
3459 	struct ice_aq_desc desc;
3460 
3461 	cmd = &desc.params.set_mac_lb;
3462 
3463 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3464 	if (ena_lpbk)
3465 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3466 
3467 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3468 }
3469 
3470 /**
3471  * ice_aq_set_port_id_led
3472  * @pi: pointer to the port information
3473  * @is_orig_mode: is this LED set to original mode (by the net-list)
3474  * @cd: pointer to command details structure or NULL
3475  *
3476  * Set LED value for the given port (0x06e9)
3477  */
3478 int
3479 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3480 		       struct ice_sq_cd *cd)
3481 {
3482 	struct ice_aqc_set_port_id_led *cmd;
3483 	struct ice_hw *hw = pi->hw;
3484 	struct ice_aq_desc desc;
3485 
3486 	cmd = &desc.params.set_port_id_led;
3487 
3488 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3489 
3490 	if (is_orig_mode)
3491 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3492 	else
3493 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3494 
3495 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3496 }
3497 
3498 /**
3499  * ice_aq_sff_eeprom
3500  * @hw: pointer to the HW struct
3501  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3502  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3503  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3504  * @page: QSFP page
3505  * @set_page: set or ignore the page
3506  * @data: pointer to data buffer to be read/written to the I2C device.
3507  * @length: 1-16 for read, 1 for write.
3508  * @write: 0 read, 1 for write.
3509  * @cd: pointer to command details structure or NULL
3510  *
3511  * Read/Write SFF EEPROM (0x06EE)
3512  */
3513 int
3514 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3515 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3516 		  bool write, struct ice_sq_cd *cd)
3517 {
3518 	struct ice_aqc_sff_eeprom *cmd;
3519 	struct ice_aq_desc desc;
3520 	int status;
3521 
3522 	if (!data || (mem_addr & 0xff00))
3523 		return -EINVAL;
3524 
3525 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3526 	cmd = &desc.params.read_write_sff_param;
3527 	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3528 	cmd->lport_num = (u8)(lport & 0xff);
3529 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3530 	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3531 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3532 					((set_page <<
3533 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3534 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3535 	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3536 	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3537 	if (write)
3538 		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3539 
3540 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3541 	return status;
3542 }
3543 
3544 /**
3545  * __ice_aq_get_set_rss_lut
3546  * @hw: pointer to the hardware structure
3547  * @params: RSS LUT parameters
3548  * @set: set true to set the table, false to get the table
3549  *
3550  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3551  */
3552 static int
3553 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3554 {
3555 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3556 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3557 	struct ice_aq_desc desc;
3558 	int status;
3559 	u8 *lut;
3560 
3561 	if (!params)
3562 		return -EINVAL;
3563 
3564 	vsi_handle = params->vsi_handle;
3565 	lut = params->lut;
3566 
3567 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3568 		return -EINVAL;
3569 
3570 	lut_size = params->lut_size;
3571 	lut_type = params->lut_type;
3572 	glob_lut_idx = params->global_lut_id;
3573 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3574 
3575 	cmd_resp = &desc.params.get_set_rss_lut;
3576 
3577 	if (set) {
3578 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3579 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3580 	} else {
3581 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3582 	}
3583 
3584 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3585 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3586 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3587 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3588 
3589 	switch (lut_type) {
3590 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3591 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3592 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3593 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3594 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3595 		break;
3596 	default:
3597 		status = -EINVAL;
3598 		goto ice_aq_get_set_rss_lut_exit;
3599 	}
3600 
3601 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3602 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3603 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3604 
3605 		if (!set)
3606 			goto ice_aq_get_set_rss_lut_send;
3607 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3608 		if (!set)
3609 			goto ice_aq_get_set_rss_lut_send;
3610 	} else {
3611 		goto ice_aq_get_set_rss_lut_send;
3612 	}
3613 
3614 	/* LUT size is only valid for Global and PF table types */
3615 	switch (lut_size) {
3616 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3617 		break;
3618 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3619 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3620 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3621 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3622 		break;
3623 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3624 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3625 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3626 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3627 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3628 			break;
3629 		}
3630 		fallthrough;
3631 	default:
3632 		status = -EINVAL;
3633 		goto ice_aq_get_set_rss_lut_exit;
3634 	}
3635 
3636 ice_aq_get_set_rss_lut_send:
3637 	cmd_resp->flags = cpu_to_le16(flags);
3638 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3639 
3640 ice_aq_get_set_rss_lut_exit:
3641 	return status;
3642 }
3643 
3644 /**
3645  * ice_aq_get_rss_lut
3646  * @hw: pointer to the hardware structure
3647  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3648  *
3649  * get the RSS lookup table, PF or VSI type
3650  */
3651 int
3652 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3653 {
3654 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
3655 }
3656 
3657 /**
3658  * ice_aq_set_rss_lut
3659  * @hw: pointer to the hardware structure
3660  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3661  *
3662  * set the RSS lookup table, PF or VSI type
3663  */
3664 int
3665 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3666 {
3667 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
3668 }
3669 
3670 /**
3671  * __ice_aq_get_set_rss_key
3672  * @hw: pointer to the HW struct
3673  * @vsi_id: VSI FW index
3674  * @key: pointer to key info struct
3675  * @set: set true to set the key, false to get the key
3676  *
3677  * get (0x0B04) or set (0x0B02) the RSS key per VSI
3678  */
3679 static int
3680 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3681 			 struct ice_aqc_get_set_rss_keys *key, bool set)
3682 {
3683 	struct ice_aqc_get_set_rss_key *cmd_resp;
3684 	u16 key_size = sizeof(*key);
3685 	struct ice_aq_desc desc;
3686 
3687 	cmd_resp = &desc.params.get_set_rss_key;
3688 
3689 	if (set) {
3690 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3691 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3692 	} else {
3693 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3694 	}
3695 
3696 	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3697 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3698 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3699 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3700 
3701 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3702 }
3703 
3704 /**
3705  * ice_aq_get_rss_key
3706  * @hw: pointer to the HW struct
3707  * @vsi_handle: software VSI handle
3708  * @key: pointer to key info struct
3709  *
3710  * get the RSS key per VSI
3711  */
3712 int
3713 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3714 		   struct ice_aqc_get_set_rss_keys *key)
3715 {
3716 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3717 		return -EINVAL;
3718 
3719 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3720 					key, false);
3721 }
3722 
3723 /**
3724  * ice_aq_set_rss_key
3725  * @hw: pointer to the HW struct
3726  * @vsi_handle: software VSI handle
3727  * @keys: pointer to key info struct
3728  *
3729  * set the RSS key per VSI
3730  */
3731 int
3732 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3733 		   struct ice_aqc_get_set_rss_keys *keys)
3734 {
3735 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3736 		return -EINVAL;
3737 
3738 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3739 					keys, true);
3740 }
3741 
3742 /**
3743  * ice_aq_add_lan_txq
3744  * @hw: pointer to the hardware structure
3745  * @num_qgrps: Number of added queue groups
3746  * @qg_list: list of queue groups to be added
3747  * @buf_size: size of buffer for indirect command
3748  * @cd: pointer to command details structure or NULL
3749  *
3750  * Add Tx LAN queue (0x0C30)
3751  *
3752  * NOTE:
3753  * Prior to calling add Tx LAN queue:
3754  * Initialize the following as part of the Tx queue context:
3755  * Completion queue ID if the queue uses Completion queue, Quanta profile,
3756  * Cache profile and Packet shaper profile.
3757  *
3758  * After add Tx LAN queue AQ command is completed:
3759  * Interrupts should be associated with specific queues,
3760  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3761  * flow.
3762  */
3763 static int
3764 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3765 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3766 		   struct ice_sq_cd *cd)
3767 {
3768 	struct ice_aqc_add_tx_qgrp *list;
3769 	struct ice_aqc_add_txqs *cmd;
3770 	struct ice_aq_desc desc;
3771 	u16 i, sum_size = 0;
3772 
3773 	cmd = &desc.params.add_txqs;
3774 
3775 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3776 
3777 	if (!qg_list)
3778 		return -EINVAL;
3779 
3780 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3781 		return -EINVAL;
3782 
3783 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3784 		sum_size += struct_size(list, txqs, list->num_txqs);
3785 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3786 						      list->num_txqs);
3787 	}
3788 
3789 	if (buf_size != sum_size)
3790 		return -EINVAL;
3791 
3792 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3793 
3794 	cmd->num_qgrps = num_qgrps;
3795 
3796 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3797 }
3798 
3799 /**
3800  * ice_aq_dis_lan_txq
3801  * @hw: pointer to the hardware structure
3802  * @num_qgrps: number of groups in the list
3803  * @qg_list: the list of groups to disable
3804  * @buf_size: the total size of the qg_list buffer in bytes
3805  * @rst_src: if called due to reset, specifies the reset source
3806  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3807  * @cd: pointer to command details structure or NULL
3808  *
3809  * Disable LAN Tx queue (0x0C31)
3810  */
3811 static int
3812 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3813 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3814 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3815 		   struct ice_sq_cd *cd)
3816 {
3817 	struct ice_aqc_dis_txq_item *item;
3818 	struct ice_aqc_dis_txqs *cmd;
3819 	struct ice_aq_desc desc;
3820 	u16 i, sz = 0;
3821 	int status;
3822 
3823 	cmd = &desc.params.dis_txqs;
3824 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3825 
3826 	/* qg_list can be NULL only in VM/VF reset flow */
3827 	if (!qg_list && !rst_src)
3828 		return -EINVAL;
3829 
3830 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3831 		return -EINVAL;
3832 
3833 	cmd->num_entries = num_qgrps;
3834 
3835 	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3836 					    ICE_AQC_Q_DIS_TIMEOUT_M);
3837 
3838 	switch (rst_src) {
3839 	case ICE_VM_RESET:
3840 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3841 		cmd->vmvf_and_timeout |=
3842 			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3843 		break;
3844 	case ICE_VF_RESET:
3845 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3846 		/* In this case, FW expects vmvf_num to be absolute VF ID */
3847 		cmd->vmvf_and_timeout |=
3848 			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3849 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3850 		break;
3851 	case ICE_NO_RESET:
3852 	default:
3853 		break;
3854 	}
3855 
3856 	/* flush pipe on time out */
3857 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3858 	/* If no queue group info, we are in a reset flow. Issue the AQ */
3859 	if (!qg_list)
3860 		goto do_aq;
3861 
3862 	/* set RD bit to indicate that command buffer is provided by the driver
3863 	 * and it needs to be read by the firmware
3864 	 */
3865 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3866 
3867 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3868 		u16 item_size = struct_size(item, q_id, item->num_qs);
3869 
3870 		/* If the num of queues is even, add 2 bytes of padding */
3871 		if ((item->num_qs % 2) == 0)
3872 			item_size += 2;
3873 
3874 		sz += item_size;
3875 
3876 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3877 	}
3878 
3879 	if (buf_size != sz)
3880 		return -EINVAL;
3881 
3882 do_aq:
3883 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3884 	if (status) {
3885 		if (!qg_list)
3886 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3887 				  vmvf_num, hw->adminq.sq_last_status);
3888 		else
3889 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3890 				  le16_to_cpu(qg_list[0].q_id[0]),
3891 				  hw->adminq.sq_last_status);
3892 	}
3893 	return status;
3894 }
3895 
3896 /**
3897  * ice_aq_add_rdma_qsets
3898  * @hw: pointer to the hardware structure
3899  * @num_qset_grps: Number of RDMA Qset groups
3900  * @qset_list: list of Qset groups to be added
3901  * @buf_size: size of buffer for indirect command
3902  * @cd: pointer to command details structure or NULL
3903  *
3904  * Add Tx RDMA Qsets (0x0C33)
3905  */
3906 static int
3907 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3908 		      struct ice_aqc_add_rdma_qset_data *qset_list,
3909 		      u16 buf_size, struct ice_sq_cd *cd)
3910 {
3911 	struct ice_aqc_add_rdma_qset_data *list;
3912 	struct ice_aqc_add_rdma_qset *cmd;
3913 	struct ice_aq_desc desc;
3914 	u16 i, sum_size = 0;
3915 
3916 	cmd = &desc.params.add_rdma_qset;
3917 
3918 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3919 
3920 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3921 		return -EINVAL;
3922 
3923 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3924 		u16 num_qsets = le16_to_cpu(list->num_qsets);
3925 
3926 		sum_size += struct_size(list, rdma_qsets, num_qsets);
3927 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3928 							     num_qsets);
3929 	}
3930 
3931 	if (buf_size != sum_size)
3932 		return -EINVAL;
3933 
3934 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3935 
3936 	cmd->num_qset_grps = num_qset_grps;
3937 
3938 	return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
3939 }
3940 
3941 /* End of FW Admin Queue command wrappers */
3942 
3943 /**
3944  * ice_write_byte - write a byte to a packed context structure
3945  * @src_ctx:  the context structure to read from
3946  * @dest_ctx: the context to be written to
3947  * @ce_info:  a description of the struct to be filled
3948  */
3949 static void
3950 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3951 {
3952 	u8 src_byte, dest_byte, mask;
3953 	u8 *from, *dest;
3954 	u16 shift_width;
3955 
3956 	/* copy from the next struct field */
3957 	from = src_ctx + ce_info->offset;
3958 
3959 	/* prepare the bits and mask */
3960 	shift_width = ce_info->lsb % 8;
3961 	mask = (u8)(BIT(ce_info->width) - 1);
3962 
3963 	src_byte = *from;
3964 	src_byte &= mask;
3965 
3966 	/* shift to correct alignment */
3967 	mask <<= shift_width;
3968 	src_byte <<= shift_width;
3969 
3970 	/* get the current bits from the target bit string */
3971 	dest = dest_ctx + (ce_info->lsb / 8);
3972 
3973 	memcpy(&dest_byte, dest, sizeof(dest_byte));
3974 
3975 	dest_byte &= ~mask;	/* get the bits not changing */
3976 	dest_byte |= src_byte;	/* add in the new bits */
3977 
3978 	/* put it all back */
3979 	memcpy(dest, &dest_byte, sizeof(dest_byte));
3980 }
3981 
3982 /**
3983  * ice_write_word - write a word to a packed context structure
3984  * @src_ctx:  the context structure to read from
3985  * @dest_ctx: the context to be written to
3986  * @ce_info:  a description of the struct to be filled
3987  */
3988 static void
3989 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990 {
3991 	u16 src_word, mask;
3992 	__le16 dest_word;
3993 	u8 *from, *dest;
3994 	u16 shift_width;
3995 
3996 	/* copy from the next struct field */
3997 	from = src_ctx + ce_info->offset;
3998 
3999 	/* prepare the bits and mask */
4000 	shift_width = ce_info->lsb % 8;
4001 	mask = BIT(ce_info->width) - 1;
4002 
4003 	/* don't swizzle the bits until after the mask because the mask bits
4004 	 * will be in a different bit position on big endian machines
4005 	 */
4006 	src_word = *(u16 *)from;
4007 	src_word &= mask;
4008 
4009 	/* shift to correct alignment */
4010 	mask <<= shift_width;
4011 	src_word <<= shift_width;
4012 
4013 	/* get the current bits from the target bit string */
4014 	dest = dest_ctx + (ce_info->lsb / 8);
4015 
4016 	memcpy(&dest_word, dest, sizeof(dest_word));
4017 
4018 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
4019 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
4020 
4021 	/* put it all back */
4022 	memcpy(dest, &dest_word, sizeof(dest_word));
4023 }
4024 
4025 /**
4026  * ice_write_dword - write a dword to a packed context structure
4027  * @src_ctx:  the context structure to read from
4028  * @dest_ctx: the context to be written to
4029  * @ce_info:  a description of the struct to be filled
4030  */
4031 static void
4032 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4033 {
4034 	u32 src_dword, mask;
4035 	__le32 dest_dword;
4036 	u8 *from, *dest;
4037 	u16 shift_width;
4038 
4039 	/* copy from the next struct field */
4040 	from = src_ctx + ce_info->offset;
4041 
4042 	/* prepare the bits and mask */
4043 	shift_width = ce_info->lsb % 8;
4044 
4045 	/* if the field width is exactly 32 on an x86 machine, then the shift
4046 	 * operation will not work because the SHL instructions count is masked
4047 	 * to 5 bits so the shift will do nothing
4048 	 */
4049 	if (ce_info->width < 32)
4050 		mask = BIT(ce_info->width) - 1;
4051 	else
4052 		mask = (u32)~0;
4053 
4054 	/* don't swizzle the bits until after the mask because the mask bits
4055 	 * will be in a different bit position on big endian machines
4056 	 */
4057 	src_dword = *(u32 *)from;
4058 	src_dword &= mask;
4059 
4060 	/* shift to correct alignment */
4061 	mask <<= shift_width;
4062 	src_dword <<= shift_width;
4063 
4064 	/* get the current bits from the target bit string */
4065 	dest = dest_ctx + (ce_info->lsb / 8);
4066 
4067 	memcpy(&dest_dword, dest, sizeof(dest_dword));
4068 
4069 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
4070 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
4071 
4072 	/* put it all back */
4073 	memcpy(dest, &dest_dword, sizeof(dest_dword));
4074 }
4075 
4076 /**
4077  * ice_write_qword - write a qword to a packed context structure
4078  * @src_ctx:  the context structure to read from
4079  * @dest_ctx: the context to be written to
4080  * @ce_info:  a description of the struct to be filled
4081  */
4082 static void
4083 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4084 {
4085 	u64 src_qword, mask;
4086 	__le64 dest_qword;
4087 	u8 *from, *dest;
4088 	u16 shift_width;
4089 
4090 	/* copy from the next struct field */
4091 	from = src_ctx + ce_info->offset;
4092 
4093 	/* prepare the bits and mask */
4094 	shift_width = ce_info->lsb % 8;
4095 
4096 	/* if the field width is exactly 64 on an x86 machine, then the shift
4097 	 * operation will not work because the SHL instructions count is masked
4098 	 * to 6 bits so the shift will do nothing
4099 	 */
4100 	if (ce_info->width < 64)
4101 		mask = BIT_ULL(ce_info->width) - 1;
4102 	else
4103 		mask = (u64)~0;
4104 
4105 	/* don't swizzle the bits until after the mask because the mask bits
4106 	 * will be in a different bit position on big endian machines
4107 	 */
4108 	src_qword = *(u64 *)from;
4109 	src_qword &= mask;
4110 
4111 	/* shift to correct alignment */
4112 	mask <<= shift_width;
4113 	src_qword <<= shift_width;
4114 
4115 	/* get the current bits from the target bit string */
4116 	dest = dest_ctx + (ce_info->lsb / 8);
4117 
4118 	memcpy(&dest_qword, dest, sizeof(dest_qword));
4119 
4120 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
4121 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
4122 
4123 	/* put it all back */
4124 	memcpy(dest, &dest_qword, sizeof(dest_qword));
4125 }
4126 
4127 /**
4128  * ice_set_ctx - set context bits in packed structure
4129  * @hw: pointer to the hardware structure
4130  * @src_ctx:  pointer to a generic non-packed context structure
4131  * @dest_ctx: pointer to memory for the packed structure
4132  * @ce_info:  a description of the structure to be transformed
4133  */
4134 int
4135 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4136 	    const struct ice_ctx_ele *ce_info)
4137 {
4138 	int f;
4139 
4140 	for (f = 0; ce_info[f].width; f++) {
4141 		/* We have to deal with each element of the FW response
4142 		 * using the correct size so that we are correct regardless
4143 		 * of the endianness of the machine.
4144 		 */
4145 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4146 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4147 				  f, ce_info[f].width, ce_info[f].size_of);
4148 			continue;
4149 		}
4150 		switch (ce_info[f].size_of) {
4151 		case sizeof(u8):
4152 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4153 			break;
4154 		case sizeof(u16):
4155 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4156 			break;
4157 		case sizeof(u32):
4158 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4159 			break;
4160 		case sizeof(u64):
4161 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4162 			break;
4163 		default:
4164 			return -EINVAL;
4165 		}
4166 	}
4167 
4168 	return 0;
4169 }
4170 
4171 /**
4172  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4173  * @hw: pointer to the HW struct
4174  * @vsi_handle: software VSI handle
4175  * @tc: TC number
4176  * @q_handle: software queue handle
4177  */
4178 struct ice_q_ctx *
4179 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4180 {
4181 	struct ice_vsi_ctx *vsi;
4182 	struct ice_q_ctx *q_ctx;
4183 
4184 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4185 	if (!vsi)
4186 		return NULL;
4187 	if (q_handle >= vsi->num_lan_q_entries[tc])
4188 		return NULL;
4189 	if (!vsi->lan_q_ctx[tc])
4190 		return NULL;
4191 	q_ctx = vsi->lan_q_ctx[tc];
4192 	return &q_ctx[q_handle];
4193 }
4194 
4195 /**
4196  * ice_ena_vsi_txq
4197  * @pi: port information structure
4198  * @vsi_handle: software VSI handle
4199  * @tc: TC number
4200  * @q_handle: software queue handle
4201  * @num_qgrps: Number of added queue groups
4202  * @buf: list of queue groups to be added
4203  * @buf_size: size of buffer for indirect command
4204  * @cd: pointer to command details structure or NULL
4205  *
4206  * This function adds one LAN queue
4207  */
4208 int
4209 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4210 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4211 		struct ice_sq_cd *cd)
4212 {
4213 	struct ice_aqc_txsched_elem_data node = { 0 };
4214 	struct ice_sched_node *parent;
4215 	struct ice_q_ctx *q_ctx;
4216 	struct ice_hw *hw;
4217 	int status;
4218 
4219 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4220 		return -EIO;
4221 
4222 	if (num_qgrps > 1 || buf->num_txqs > 1)
4223 		return -ENOSPC;
4224 
4225 	hw = pi->hw;
4226 
4227 	if (!ice_is_vsi_valid(hw, vsi_handle))
4228 		return -EINVAL;
4229 
4230 	mutex_lock(&pi->sched_lock);
4231 
4232 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4233 	if (!q_ctx) {
4234 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4235 			  q_handle);
4236 		status = -EINVAL;
4237 		goto ena_txq_exit;
4238 	}
4239 
4240 	/* find a parent node */
4241 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4242 					    ICE_SCHED_NODE_OWNER_LAN);
4243 	if (!parent) {
4244 		status = -EINVAL;
4245 		goto ena_txq_exit;
4246 	}
4247 
4248 	buf->parent_teid = parent->info.node_teid;
4249 	node.parent_teid = parent->info.node_teid;
4250 	/* Mark that the values in the "generic" section as valid. The default
4251 	 * value in the "generic" section is zero. This means that :
4252 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4253 	 * - 0 priority among siblings, indicated by Bit 1-3.
4254 	 * - WFQ, indicated by Bit 4.
4255 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4256 	 * Bit 5-6.
4257 	 * - Bit 7 is reserved.
4258 	 * Without setting the generic section as valid in valid_sections, the
4259 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4260 	 */
4261 	buf->txqs[0].info.valid_sections =
4262 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4263 		ICE_AQC_ELEM_VALID_EIR;
4264 	buf->txqs[0].info.generic = 0;
4265 	buf->txqs[0].info.cir_bw.bw_profile_idx =
4266 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4267 	buf->txqs[0].info.cir_bw.bw_alloc =
4268 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4269 	buf->txqs[0].info.eir_bw.bw_profile_idx =
4270 		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4271 	buf->txqs[0].info.eir_bw.bw_alloc =
4272 		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4273 
4274 	/* add the LAN queue */
4275 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4276 	if (status) {
4277 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4278 			  le16_to_cpu(buf->txqs[0].txq_id),
4279 			  hw->adminq.sq_last_status);
4280 		goto ena_txq_exit;
4281 	}
4282 
4283 	node.node_teid = buf->txqs[0].q_teid;
4284 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4285 	q_ctx->q_handle = q_handle;
4286 	q_ctx->q_teid = le32_to_cpu(node.node_teid);
4287 
4288 	/* add a leaf node into scheduler tree queue layer */
4289 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4290 	if (!status)
4291 		status = ice_sched_replay_q_bw(pi, q_ctx);
4292 
4293 ena_txq_exit:
4294 	mutex_unlock(&pi->sched_lock);
4295 	return status;
4296 }
4297 
4298 /**
4299  * ice_dis_vsi_txq
4300  * @pi: port information structure
4301  * @vsi_handle: software VSI handle
4302  * @tc: TC number
4303  * @num_queues: number of queues
4304  * @q_handles: pointer to software queue handle array
4305  * @q_ids: pointer to the q_id array
4306  * @q_teids: pointer to queue node teids
4307  * @rst_src: if called due to reset, specifies the reset source
4308  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4309  * @cd: pointer to command details structure or NULL
4310  *
4311  * This function removes queues and their corresponding nodes in SW DB
4312  */
4313 int
4314 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4315 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
4316 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
4317 		struct ice_sq_cd *cd)
4318 {
4319 	struct ice_aqc_dis_txq_item *qg_list;
4320 	struct ice_q_ctx *q_ctx;
4321 	int status = -ENOENT;
4322 	struct ice_hw *hw;
4323 	u16 i, buf_size;
4324 
4325 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4326 		return -EIO;
4327 
4328 	hw = pi->hw;
4329 
4330 	if (!num_queues) {
4331 		/* if queue is disabled already yet the disable queue command
4332 		 * has to be sent to complete the VF reset, then call
4333 		 * ice_aq_dis_lan_txq without any queue information
4334 		 */
4335 		if (rst_src)
4336 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4337 						  vmvf_num, NULL);
4338 		return -EIO;
4339 	}
4340 
4341 	buf_size = struct_size(qg_list, q_id, 1);
4342 	qg_list = kzalloc(buf_size, GFP_KERNEL);
4343 	if (!qg_list)
4344 		return -ENOMEM;
4345 
4346 	mutex_lock(&pi->sched_lock);
4347 
4348 	for (i = 0; i < num_queues; i++) {
4349 		struct ice_sched_node *node;
4350 
4351 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4352 		if (!node)
4353 			continue;
4354 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4355 		if (!q_ctx) {
4356 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4357 				  q_handles[i]);
4358 			continue;
4359 		}
4360 		if (q_ctx->q_handle != q_handles[i]) {
4361 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4362 				  q_ctx->q_handle, q_handles[i]);
4363 			continue;
4364 		}
4365 		qg_list->parent_teid = node->info.parent_teid;
4366 		qg_list->num_qs = 1;
4367 		qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4368 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4369 					    vmvf_num, cd);
4370 
4371 		if (status)
4372 			break;
4373 		ice_free_sched_node(pi, node);
4374 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4375 	}
4376 	mutex_unlock(&pi->sched_lock);
4377 	kfree(qg_list);
4378 	return status;
4379 }
4380 
4381 /**
4382  * ice_cfg_vsi_qs - configure the new/existing VSI queues
4383  * @pi: port information structure
4384  * @vsi_handle: software VSI handle
4385  * @tc_bitmap: TC bitmap
4386  * @maxqs: max queues array per TC
4387  * @owner: LAN or RDMA
4388  *
4389  * This function adds/updates the VSI queues per TC.
4390  */
4391 static int
4392 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4393 	       u16 *maxqs, u8 owner)
4394 {
4395 	int status = 0;
4396 	u8 i;
4397 
4398 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4399 		return -EIO;
4400 
4401 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4402 		return -EINVAL;
4403 
4404 	mutex_lock(&pi->sched_lock);
4405 
4406 	ice_for_each_traffic_class(i) {
4407 		/* configuration is possible only if TC node is present */
4408 		if (!ice_sched_get_tc_node(pi, i))
4409 			continue;
4410 
4411 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4412 					   ice_is_tc_ena(tc_bitmap, i));
4413 		if (status)
4414 			break;
4415 	}
4416 
4417 	mutex_unlock(&pi->sched_lock);
4418 	return status;
4419 }
4420 
4421 /**
4422  * ice_cfg_vsi_lan - configure VSI LAN queues
4423  * @pi: port information structure
4424  * @vsi_handle: software VSI handle
4425  * @tc_bitmap: TC bitmap
4426  * @max_lanqs: max LAN queues array per TC
4427  *
4428  * This function adds/updates the VSI LAN queues per TC.
4429  */
4430 int
4431 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4432 		u16 *max_lanqs)
4433 {
4434 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4435 			      ICE_SCHED_NODE_OWNER_LAN);
4436 }
4437 
4438 /**
4439  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4440  * @pi: port information structure
4441  * @vsi_handle: software VSI handle
4442  * @tc_bitmap: TC bitmap
4443  * @max_rdmaqs: max RDMA queues array per TC
4444  *
4445  * This function adds/updates the VSI RDMA queues per TC.
4446  */
4447 int
4448 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4449 		 u16 *max_rdmaqs)
4450 {
4451 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4452 			      ICE_SCHED_NODE_OWNER_RDMA);
4453 }
4454 
4455 /**
4456  * ice_ena_vsi_rdma_qset
4457  * @pi: port information structure
4458  * @vsi_handle: software VSI handle
4459  * @tc: TC number
4460  * @rdma_qset: pointer to RDMA Qset
4461  * @num_qsets: number of RDMA Qsets
4462  * @qset_teid: pointer to Qset node TEIDs
4463  *
4464  * This function adds RDMA Qset
4465  */
4466 int
4467 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4468 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4469 {
4470 	struct ice_aqc_txsched_elem_data node = { 0 };
4471 	struct ice_aqc_add_rdma_qset_data *buf;
4472 	struct ice_sched_node *parent;
4473 	struct ice_hw *hw;
4474 	u16 i, buf_size;
4475 	int ret;
4476 
4477 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4478 		return -EIO;
4479 	hw = pi->hw;
4480 
4481 	if (!ice_is_vsi_valid(hw, vsi_handle))
4482 		return -EINVAL;
4483 
4484 	buf_size = struct_size(buf, rdma_qsets, num_qsets);
4485 	buf = kzalloc(buf_size, GFP_KERNEL);
4486 	if (!buf)
4487 		return -ENOMEM;
4488 	mutex_lock(&pi->sched_lock);
4489 
4490 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4491 					    ICE_SCHED_NODE_OWNER_RDMA);
4492 	if (!parent) {
4493 		ret = -EINVAL;
4494 		goto rdma_error_exit;
4495 	}
4496 	buf->parent_teid = parent->info.node_teid;
4497 	node.parent_teid = parent->info.node_teid;
4498 
4499 	buf->num_qsets = cpu_to_le16(num_qsets);
4500 	for (i = 0; i < num_qsets; i++) {
4501 		buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4502 		buf->rdma_qsets[i].info.valid_sections =
4503 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4504 			ICE_AQC_ELEM_VALID_EIR;
4505 		buf->rdma_qsets[i].info.generic = 0;
4506 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4507 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4508 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4509 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4510 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4511 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4512 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4513 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4514 	}
4515 	ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4516 	if (ret) {
4517 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4518 		goto rdma_error_exit;
4519 	}
4520 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4521 	for (i = 0; i < num_qsets; i++) {
4522 		node.node_teid = buf->rdma_qsets[i].qset_teid;
4523 		ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4524 					 &node);
4525 		if (ret)
4526 			break;
4527 		qset_teid[i] = le32_to_cpu(node.node_teid);
4528 	}
4529 rdma_error_exit:
4530 	mutex_unlock(&pi->sched_lock);
4531 	kfree(buf);
4532 	return ret;
4533 }
4534 
4535 /**
4536  * ice_dis_vsi_rdma_qset - free RDMA resources
4537  * @pi: port_info struct
4538  * @count: number of RDMA Qsets to free
4539  * @qset_teid: TEID of Qset node
4540  * @q_id: list of queue IDs being disabled
4541  */
4542 int
4543 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4544 		      u16 *q_id)
4545 {
4546 	struct ice_aqc_dis_txq_item *qg_list;
4547 	struct ice_hw *hw;
4548 	int status = 0;
4549 	u16 qg_size;
4550 	int i;
4551 
4552 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4553 		return -EIO;
4554 
4555 	hw = pi->hw;
4556 
4557 	qg_size = struct_size(qg_list, q_id, 1);
4558 	qg_list = kzalloc(qg_size, GFP_KERNEL);
4559 	if (!qg_list)
4560 		return -ENOMEM;
4561 
4562 	mutex_lock(&pi->sched_lock);
4563 
4564 	for (i = 0; i < count; i++) {
4565 		struct ice_sched_node *node;
4566 
4567 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4568 		if (!node)
4569 			continue;
4570 
4571 		qg_list->parent_teid = node->info.parent_teid;
4572 		qg_list->num_qs = 1;
4573 		qg_list->q_id[0] =
4574 			cpu_to_le16(q_id[i] |
4575 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4576 
4577 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4578 					    ICE_NO_RESET, 0, NULL);
4579 		if (status)
4580 			break;
4581 
4582 		ice_free_sched_node(pi, node);
4583 	}
4584 
4585 	mutex_unlock(&pi->sched_lock);
4586 	kfree(qg_list);
4587 	return status;
4588 }
4589 
4590 /**
4591  * ice_replay_pre_init - replay pre initialization
4592  * @hw: pointer to the HW struct
4593  *
4594  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4595  */
4596 static int ice_replay_pre_init(struct ice_hw *hw)
4597 {
4598 	struct ice_switch_info *sw = hw->switch_info;
4599 	u8 i;
4600 
4601 	/* Delete old entries from replay filter list head if there is any */
4602 	ice_rm_all_sw_replay_rule_info(hw);
4603 	/* In start of replay, move entries into replay_rules list, it
4604 	 * will allow adding rules entries back to filt_rules list,
4605 	 * which is operational list.
4606 	 */
4607 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4608 		list_replace_init(&sw->recp_list[i].filt_rules,
4609 				  &sw->recp_list[i].filt_replay_rules);
4610 	ice_sched_replay_agg_vsi_preinit(hw);
4611 
4612 	return 0;
4613 }
4614 
4615 /**
4616  * ice_replay_vsi - replay VSI configuration
4617  * @hw: pointer to the HW struct
4618  * @vsi_handle: driver VSI handle
4619  *
4620  * Restore all VSI configuration after reset. It is required to call this
4621  * function with main VSI first.
4622  */
4623 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4624 {
4625 	int status;
4626 
4627 	if (!ice_is_vsi_valid(hw, vsi_handle))
4628 		return -EINVAL;
4629 
4630 	/* Replay pre-initialization if there is any */
4631 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4632 		status = ice_replay_pre_init(hw);
4633 		if (status)
4634 			return status;
4635 	}
4636 	/* Replay per VSI all RSS configurations */
4637 	status = ice_replay_rss_cfg(hw, vsi_handle);
4638 	if (status)
4639 		return status;
4640 	/* Replay per VSI all filters */
4641 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4642 	if (!status)
4643 		status = ice_replay_vsi_agg(hw, vsi_handle);
4644 	return status;
4645 }
4646 
4647 /**
4648  * ice_replay_post - post replay configuration cleanup
4649  * @hw: pointer to the HW struct
4650  *
4651  * Post replay cleanup.
4652  */
4653 void ice_replay_post(struct ice_hw *hw)
4654 {
4655 	/* Delete old entries from replay filter list head */
4656 	ice_rm_all_sw_replay_rule_info(hw);
4657 	ice_sched_replay_agg(hw);
4658 }
4659 
4660 /**
4661  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4662  * @hw: ptr to the hardware info
4663  * @reg: offset of 64 bit HW register to read from
4664  * @prev_stat_loaded: bool to specify if previous stats are loaded
4665  * @prev_stat: ptr to previous loaded stat value
4666  * @cur_stat: ptr to current stat value
4667  */
4668 void
4669 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4670 		  u64 *prev_stat, u64 *cur_stat)
4671 {
4672 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4673 
4674 	/* device stats are not reset at PFR, they likely will not be zeroed
4675 	 * when the driver starts. Thus, save the value from the first read
4676 	 * without adding to the statistic value so that we report stats which
4677 	 * count up from zero.
4678 	 */
4679 	if (!prev_stat_loaded) {
4680 		*prev_stat = new_data;
4681 		return;
4682 	}
4683 
4684 	/* Calculate the difference between the new and old values, and then
4685 	 * add it to the software stat value.
4686 	 */
4687 	if (new_data >= *prev_stat)
4688 		*cur_stat += new_data - *prev_stat;
4689 	else
4690 		/* to manage the potential roll-over */
4691 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4692 
4693 	/* Update the previously stored value to prepare for next read */
4694 	*prev_stat = new_data;
4695 }
4696 
4697 /**
4698  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4699  * @hw: ptr to the hardware info
4700  * @reg: offset of HW register to read from
4701  * @prev_stat_loaded: bool to specify if previous stats are loaded
4702  * @prev_stat: ptr to previous loaded stat value
4703  * @cur_stat: ptr to current stat value
4704  */
4705 void
4706 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4707 		  u64 *prev_stat, u64 *cur_stat)
4708 {
4709 	u32 new_data;
4710 
4711 	new_data = rd32(hw, reg);
4712 
4713 	/* device stats are not reset at PFR, they likely will not be zeroed
4714 	 * when the driver starts. Thus, save the value from the first read
4715 	 * without adding to the statistic value so that we report stats which
4716 	 * count up from zero.
4717 	 */
4718 	if (!prev_stat_loaded) {
4719 		*prev_stat = new_data;
4720 		return;
4721 	}
4722 
4723 	/* Calculate the difference between the new and old values, and then
4724 	 * add it to the software stat value.
4725 	 */
4726 	if (new_data >= *prev_stat)
4727 		*cur_stat += new_data - *prev_stat;
4728 	else
4729 		/* to manage the potential roll-over */
4730 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4731 
4732 	/* Update the previously stored value to prepare for next read */
4733 	*prev_stat = new_data;
4734 }
4735 
4736 /**
4737  * ice_sched_query_elem - query element information from HW
4738  * @hw: pointer to the HW struct
4739  * @node_teid: node TEID to be queried
4740  * @buf: buffer to element information
4741  *
4742  * This function queries HW element information
4743  */
4744 int
4745 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4746 		     struct ice_aqc_txsched_elem_data *buf)
4747 {
4748 	u16 buf_size, num_elem_ret = 0;
4749 	int status;
4750 
4751 	buf_size = sizeof(*buf);
4752 	memset(buf, 0, buf_size);
4753 	buf->node_teid = cpu_to_le32(node_teid);
4754 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4755 					  NULL);
4756 	if (status || num_elem_ret != 1)
4757 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4758 	return status;
4759 }
4760 
4761 /**
4762  * ice_aq_set_driver_param - Set driver parameter to share via firmware
4763  * @hw: pointer to the HW struct
4764  * @idx: parameter index to set
4765  * @value: the value to set the parameter to
4766  * @cd: pointer to command details structure or NULL
4767  *
4768  * Set the value of one of the software defined parameters. All PFs connected
4769  * to this device can read the value using ice_aq_get_driver_param.
4770  *
4771  * Note that firmware provides no synchronization or locking, and will not
4772  * save the parameter value during a device reset. It is expected that
4773  * a single PF will write the parameter value, while all other PFs will only
4774  * read it.
4775  */
4776 int
4777 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4778 			u32 value, struct ice_sq_cd *cd)
4779 {
4780 	struct ice_aqc_driver_shared_params *cmd;
4781 	struct ice_aq_desc desc;
4782 
4783 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4784 		return -EIO;
4785 
4786 	cmd = &desc.params.drv_shared_params;
4787 
4788 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4789 
4790 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4791 	cmd->param_indx = idx;
4792 	cmd->param_val = cpu_to_le32(value);
4793 
4794 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4795 }
4796 
4797 /**
4798  * ice_aq_get_driver_param - Get driver parameter shared via firmware
4799  * @hw: pointer to the HW struct
4800  * @idx: parameter index to set
4801  * @value: storage to return the shared parameter
4802  * @cd: pointer to command details structure or NULL
4803  *
4804  * Get the value of one of the software defined parameters.
4805  *
4806  * Note that firmware provides no synchronization or locking. It is expected
4807  * that only a single PF will write a given parameter.
4808  */
4809 int
4810 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4811 			u32 *value, struct ice_sq_cd *cd)
4812 {
4813 	struct ice_aqc_driver_shared_params *cmd;
4814 	struct ice_aq_desc desc;
4815 	int status;
4816 
4817 	if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4818 		return -EIO;
4819 
4820 	cmd = &desc.params.drv_shared_params;
4821 
4822 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4823 
4824 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4825 	cmd->param_indx = idx;
4826 
4827 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4828 	if (status)
4829 		return status;
4830 
4831 	*value = le32_to_cpu(cmd->param_val);
4832 
4833 	return 0;
4834 }
4835 
4836 /**
4837  * ice_aq_set_gpio
4838  * @hw: pointer to the hw struct
4839  * @gpio_ctrl_handle: GPIO controller node handle
4840  * @pin_idx: IO Number of the GPIO that needs to be set
4841  * @value: SW provide IO value to set in the LSB
4842  * @cd: pointer to command details structure or NULL
4843  *
4844  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
4845  */
4846 int
4847 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4848 		struct ice_sq_cd *cd)
4849 {
4850 	struct ice_aqc_gpio *cmd;
4851 	struct ice_aq_desc desc;
4852 
4853 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4854 	cmd = &desc.params.read_write_gpio;
4855 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4856 	cmd->gpio_num = pin_idx;
4857 	cmd->gpio_val = value ? 1 : 0;
4858 
4859 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4860 }
4861 
4862 /**
4863  * ice_aq_get_gpio
4864  * @hw: pointer to the hw struct
4865  * @gpio_ctrl_handle: GPIO controller node handle
4866  * @pin_idx: IO Number of the GPIO that needs to be set
4867  * @value: IO value read
4868  * @cd: pointer to command details structure or NULL
4869  *
4870  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
4871  * the topology
4872  */
4873 int
4874 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
4875 		bool *value, struct ice_sq_cd *cd)
4876 {
4877 	struct ice_aqc_gpio *cmd;
4878 	struct ice_aq_desc desc;
4879 	int status;
4880 
4881 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
4882 	cmd = &desc.params.read_write_gpio;
4883 	cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4884 	cmd->gpio_num = pin_idx;
4885 
4886 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4887 	if (status)
4888 		return status;
4889 
4890 	*value = !!cmd->gpio_val;
4891 	return 0;
4892 }
4893 
4894 /**
4895  * ice_fw_supports_link_override
4896  * @hw: pointer to the hardware structure
4897  *
4898  * Checks if the firmware supports link override
4899  */
4900 bool ice_fw_supports_link_override(struct ice_hw *hw)
4901 {
4902 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4903 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4904 			return true;
4905 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4906 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4907 			return true;
4908 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4909 		return true;
4910 	}
4911 
4912 	return false;
4913 }
4914 
4915 /**
4916  * ice_get_link_default_override
4917  * @ldo: pointer to the link default override struct
4918  * @pi: pointer to the port info struct
4919  *
4920  * Gets the link default override for a port
4921  */
4922 int
4923 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4924 			      struct ice_port_info *pi)
4925 {
4926 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
4927 	struct ice_hw *hw = pi->hw;
4928 	int status;
4929 
4930 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4931 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4932 	if (status) {
4933 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4934 		return status;
4935 	}
4936 
4937 	/* Each port has its own config; calculate for our port */
4938 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4939 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4940 
4941 	/* link options first */
4942 	status = ice_read_sr_word(hw, tlv_start, &buf);
4943 	if (status) {
4944 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4945 		return status;
4946 	}
4947 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4948 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4949 		ICE_LINK_OVERRIDE_PHY_CFG_S;
4950 
4951 	/* link PHY config */
4952 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4953 	status = ice_read_sr_word(hw, offset, &buf);
4954 	if (status) {
4955 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4956 		return status;
4957 	}
4958 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4959 
4960 	/* PHY types low */
4961 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4962 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4963 		status = ice_read_sr_word(hw, (offset + i), &buf);
4964 		if (status) {
4965 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4966 			return status;
4967 		}
4968 		/* shift 16 bits at a time to fill 64 bits */
4969 		ldo->phy_type_low |= ((u64)buf << (i * 16));
4970 	}
4971 
4972 	/* PHY types high */
4973 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4974 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4975 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4976 		status = ice_read_sr_word(hw, (offset + i), &buf);
4977 		if (status) {
4978 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4979 			return status;
4980 		}
4981 		/* shift 16 bits at a time to fill 64 bits */
4982 		ldo->phy_type_high |= ((u64)buf << (i * 16));
4983 	}
4984 
4985 	return status;
4986 }
4987 
4988 /**
4989  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4990  * @caps: get PHY capability data
4991  */
4992 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4993 {
4994 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4995 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4996 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
4997 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
4998 		return true;
4999 
5000 	return false;
5001 }
5002 
5003 /**
5004  * ice_aq_set_lldp_mib - Set the LLDP MIB
5005  * @hw: pointer to the HW struct
5006  * @mib_type: Local, Remote or both Local and Remote MIBs
5007  * @buf: pointer to the caller-supplied buffer to store the MIB block
5008  * @buf_size: size of the buffer (in bytes)
5009  * @cd: pointer to command details structure or NULL
5010  *
5011  * Set the LLDP MIB. (0x0A08)
5012  */
5013 int
5014 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5015 		    struct ice_sq_cd *cd)
5016 {
5017 	struct ice_aqc_lldp_set_local_mib *cmd;
5018 	struct ice_aq_desc desc;
5019 
5020 	cmd = &desc.params.lldp_set_mib;
5021 
5022 	if (buf_size == 0 || !buf)
5023 		return -EINVAL;
5024 
5025 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5026 
5027 	desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5028 	desc.datalen = cpu_to_le16(buf_size);
5029 
5030 	cmd->type = mib_type;
5031 	cmd->length = cpu_to_le16(buf_size);
5032 
5033 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5034 }
5035 
5036 /**
5037  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5038  * @hw: pointer to HW struct
5039  */
5040 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5041 {
5042 	if (hw->mac_type != ICE_MAC_E810)
5043 		return false;
5044 
5045 	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5046 		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5047 			return true;
5048 		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5049 		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5050 			return true;
5051 	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5052 		return true;
5053 	}
5054 	return false;
5055 }
5056 
5057 /**
5058  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5059  * @hw: pointer to HW struct
5060  * @vsi_num: absolute HW index for VSI
5061  * @add: boolean for if adding or removing a filter
5062  */
5063 int
5064 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5065 {
5066 	struct ice_aqc_lldp_filter_ctrl *cmd;
5067 	struct ice_aq_desc desc;
5068 
5069 	cmd = &desc.params.lldp_filter_ctrl;
5070 
5071 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5072 
5073 	if (add)
5074 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5075 	else
5076 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5077 
5078 	cmd->vsi_num = cpu_to_le16(vsi_num);
5079 
5080 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5081 }
5082 
5083 /**
5084  * ice_fw_supports_report_dflt_cfg
5085  * @hw: pointer to the hardware structure
5086  *
5087  * Checks if the firmware supports report default configuration
5088  */
5089 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5090 {
5091 	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5092 		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5093 			return true;
5094 		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5095 		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5096 			return true;
5097 	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5098 		return true;
5099 	}
5100 	return false;
5101 }
5102