1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 #include <linux/packing.h> 10 11 #define ICE_PF_RESET_WAIT_COUNT 300 12 #define ICE_MAX_NETLIST_SIZE 10 13 14 static const char * const ice_link_mode_str_low[] = { 15 [0] = "100BASE_TX", 16 [1] = "100M_SGMII", 17 [2] = "1000BASE_T", 18 [3] = "1000BASE_SX", 19 [4] = "1000BASE_LX", 20 [5] = "1000BASE_KX", 21 [6] = "1G_SGMII", 22 [7] = "2500BASE_T", 23 [8] = "2500BASE_X", 24 [9] = "2500BASE_KX", 25 [10] = "5GBASE_T", 26 [11] = "5GBASE_KR", 27 [12] = "10GBASE_T", 28 [13] = "10G_SFI_DA", 29 [14] = "10GBASE_SR", 30 [15] = "10GBASE_LR", 31 [16] = "10GBASE_KR_CR1", 32 [17] = "10G_SFI_AOC_ACC", 33 [18] = "10G_SFI_C2C", 34 [19] = "25GBASE_T", 35 [20] = "25GBASE_CR", 36 [21] = "25GBASE_CR_S", 37 [22] = "25GBASE_CR1", 38 [23] = "25GBASE_SR", 39 [24] = "25GBASE_LR", 40 [25] = "25GBASE_KR", 41 [26] = "25GBASE_KR_S", 42 [27] = "25GBASE_KR1", 43 [28] = "25G_AUI_AOC_ACC", 44 [29] = "25G_AUI_C2C", 45 [30] = "40GBASE_CR4", 46 [31] = "40GBASE_SR4", 47 [32] = "40GBASE_LR4", 48 [33] = "40GBASE_KR4", 49 [34] = "40G_XLAUI_AOC_ACC", 50 [35] = "40G_XLAUI", 51 [36] = "50GBASE_CR2", 52 [37] = "50GBASE_SR2", 53 [38] = "50GBASE_LR2", 54 [39] = "50GBASE_KR2", 55 [40] = "50G_LAUI2_AOC_ACC", 56 [41] = "50G_LAUI2", 57 [42] = "50G_AUI2_AOC_ACC", 58 [43] = "50G_AUI2", 59 [44] = "50GBASE_CP", 60 [45] = "50GBASE_SR", 61 [46] = "50GBASE_FR", 62 [47] = "50GBASE_LR", 63 [48] = "50GBASE_KR_PAM4", 64 [49] = "50G_AUI1_AOC_ACC", 65 [50] = "50G_AUI1", 66 [51] = "100GBASE_CR4", 67 [52] = "100GBASE_SR4", 68 [53] = "100GBASE_LR4", 69 [54] = "100GBASE_KR4", 70 [55] = "100G_CAUI4_AOC_ACC", 71 [56] = "100G_CAUI4", 72 [57] = "100G_AUI4_AOC_ACC", 73 [58] = "100G_AUI4", 74 [59] = "100GBASE_CR_PAM4", 75 [60] = "100GBASE_KR_PAM4", 76 [61] = "100GBASE_CP2", 77 [62] = "100GBASE_SR2", 78 [63] = "100GBASE_DR", 79 }; 80 81 static const char * const ice_link_mode_str_high[] = { 82 [0] = "100GBASE_KR2_PAM4", 83 [1] = "100G_CAUI2_AOC_ACC", 84 [2] = "100G_CAUI2", 85 [3] = "100G_AUI2_AOC_ACC", 86 [4] = "100G_AUI2", 87 }; 88 89 /** 90 * ice_dump_phy_type - helper function to dump phy_type 91 * @hw: pointer to the HW structure 92 * @low: 64 bit value for phy_type_low 93 * @high: 64 bit value for phy_type_high 94 * @prefix: prefix string to differentiate multiple dumps 95 */ 96 static void 97 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 98 { 99 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 100 101 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 102 if (low & BIT_ULL(i)) 103 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 104 prefix, i, ice_link_mode_str_low[i]); 105 } 106 107 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 108 109 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 110 if (high & BIT_ULL(i)) 111 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 112 prefix, i, ice_link_mode_str_high[i]); 113 } 114 } 115 116 /** 117 * ice_set_mac_type - Sets MAC type 118 * @hw: pointer to the HW structure 119 * 120 * This function sets the MAC type of the adapter based on the 121 * vendor ID and device ID stored in the HW structure. 122 */ 123 static int ice_set_mac_type(struct ice_hw *hw) 124 { 125 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 126 return -ENODEV; 127 128 switch (hw->device_id) { 129 case ICE_DEV_ID_E810C_BACKPLANE: 130 case ICE_DEV_ID_E810C_QSFP: 131 case ICE_DEV_ID_E810C_SFP: 132 case ICE_DEV_ID_E810_XXV_BACKPLANE: 133 case ICE_DEV_ID_E810_XXV_QSFP: 134 case ICE_DEV_ID_E810_XXV_SFP: 135 hw->mac_type = ICE_MAC_E810; 136 break; 137 case ICE_DEV_ID_E823C_10G_BASE_T: 138 case ICE_DEV_ID_E823C_BACKPLANE: 139 case ICE_DEV_ID_E823C_QSFP: 140 case ICE_DEV_ID_E823C_SFP: 141 case ICE_DEV_ID_E823C_SGMII: 142 case ICE_DEV_ID_E822C_10G_BASE_T: 143 case ICE_DEV_ID_E822C_BACKPLANE: 144 case ICE_DEV_ID_E822C_QSFP: 145 case ICE_DEV_ID_E822C_SFP: 146 case ICE_DEV_ID_E822C_SGMII: 147 case ICE_DEV_ID_E822L_10G_BASE_T: 148 case ICE_DEV_ID_E822L_BACKPLANE: 149 case ICE_DEV_ID_E822L_SFP: 150 case ICE_DEV_ID_E822L_SGMII: 151 case ICE_DEV_ID_E823L_10G_BASE_T: 152 case ICE_DEV_ID_E823L_1GBE: 153 case ICE_DEV_ID_E823L_BACKPLANE: 154 case ICE_DEV_ID_E823L_QSFP: 155 case ICE_DEV_ID_E823L_SFP: 156 hw->mac_type = ICE_MAC_GENERIC; 157 break; 158 case ICE_DEV_ID_E825C_BACKPLANE: 159 case ICE_DEV_ID_E825C_QSFP: 160 case ICE_DEV_ID_E825C_SFP: 161 case ICE_DEV_ID_E825C_SGMII: 162 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 163 break; 164 case ICE_DEV_ID_E830CC_BACKPLANE: 165 case ICE_DEV_ID_E830CC_QSFP56: 166 case ICE_DEV_ID_E830CC_SFP: 167 case ICE_DEV_ID_E830CC_SFP_DD: 168 case ICE_DEV_ID_E830C_BACKPLANE: 169 case ICE_DEV_ID_E830_XXV_BACKPLANE: 170 case ICE_DEV_ID_E830C_QSFP: 171 case ICE_DEV_ID_E830_XXV_QSFP: 172 case ICE_DEV_ID_E830C_SFP: 173 case ICE_DEV_ID_E830_XXV_SFP: 174 hw->mac_type = ICE_MAC_E830; 175 break; 176 default: 177 hw->mac_type = ICE_MAC_UNKNOWN; 178 break; 179 } 180 181 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 182 return 0; 183 } 184 185 /** 186 * ice_is_generic_mac - check if device's mac_type is generic 187 * @hw: pointer to the hardware structure 188 * 189 * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise. 190 */ 191 bool ice_is_generic_mac(struct ice_hw *hw) 192 { 193 return (hw->mac_type == ICE_MAC_GENERIC || 194 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 195 } 196 197 /** 198 * ice_is_pf_c827 - check if pf contains c827 phy 199 * @hw: pointer to the hw struct 200 * 201 * Return: true if the device has c827 phy. 202 */ 203 static bool ice_is_pf_c827(struct ice_hw *hw) 204 { 205 struct ice_aqc_get_link_topo cmd = {}; 206 u8 node_part_number; 207 u16 node_handle; 208 int status; 209 210 if (hw->mac_type != ICE_MAC_E810) 211 return false; 212 213 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 214 return true; 215 216 cmd.addr.topo_params.node_type_ctx = 217 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 218 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 219 cmd.addr.topo_params.index = 0; 220 221 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 222 &node_handle); 223 224 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 225 return false; 226 227 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 228 return true; 229 230 return false; 231 } 232 233 /** 234 * ice_clear_pf_cfg - Clear PF configuration 235 * @hw: pointer to the hardware structure 236 * 237 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 238 * configuration, flow director filters, etc.). 239 */ 240 int ice_clear_pf_cfg(struct ice_hw *hw) 241 { 242 struct ice_aq_desc desc; 243 244 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 245 246 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 247 } 248 249 /** 250 * ice_aq_manage_mac_read - manage MAC address read command 251 * @hw: pointer to the HW struct 252 * @buf: a virtual buffer to hold the manage MAC read response 253 * @buf_size: Size of the virtual buffer 254 * @cd: pointer to command details structure or NULL 255 * 256 * This function is used to return per PF station MAC address (0x0107). 257 * NOTE: Upon successful completion of this command, MAC address information 258 * is returned in user specified buffer. Please interpret user specified 259 * buffer as "manage_mac_read" response. 260 * Response such as various MAC addresses are stored in HW struct (port.mac) 261 * ice_discover_dev_caps is expected to be called before this function is 262 * called. 263 */ 264 static int 265 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 266 struct ice_sq_cd *cd) 267 { 268 struct ice_aqc_manage_mac_read_resp *resp; 269 struct ice_aqc_manage_mac_read *cmd; 270 struct ice_aq_desc desc; 271 int status; 272 u16 flags; 273 u8 i; 274 275 cmd = &desc.params.mac_read; 276 277 if (buf_size < sizeof(*resp)) 278 return -EINVAL; 279 280 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 281 282 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 283 if (status) 284 return status; 285 286 resp = buf; 287 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 288 289 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 290 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 291 return -EIO; 292 } 293 294 /* A single port can report up to two (LAN and WoL) addresses */ 295 for (i = 0; i < cmd->num_addr; i++) 296 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 297 ether_addr_copy(hw->port_info->mac.lan_addr, 298 resp[i].mac_addr); 299 ether_addr_copy(hw->port_info->mac.perm_addr, 300 resp[i].mac_addr); 301 break; 302 } 303 304 return 0; 305 } 306 307 /** 308 * ice_aq_get_phy_caps - returns PHY capabilities 309 * @pi: port information structure 310 * @qual_mods: report qualified modules 311 * @report_mode: report mode capabilities 312 * @pcaps: structure for PHY capabilities to be filled 313 * @cd: pointer to command details structure or NULL 314 * 315 * Returns the various PHY capabilities supported on the Port (0x0600) 316 */ 317 int 318 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 319 struct ice_aqc_get_phy_caps_data *pcaps, 320 struct ice_sq_cd *cd) 321 { 322 struct ice_aqc_get_phy_caps *cmd; 323 u16 pcaps_size = sizeof(*pcaps); 324 struct ice_aq_desc desc; 325 const char *prefix; 326 struct ice_hw *hw; 327 int status; 328 329 cmd = &desc.params.get_phy; 330 331 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 332 return -EINVAL; 333 hw = pi->hw; 334 335 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 336 !ice_fw_supports_report_dflt_cfg(hw)) 337 return -EINVAL; 338 339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 340 341 if (qual_mods) 342 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 343 344 cmd->param0 |= cpu_to_le16(report_mode); 345 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 346 347 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 348 349 switch (report_mode) { 350 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 351 prefix = "phy_caps_media"; 352 break; 353 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 354 prefix = "phy_caps_no_media"; 355 break; 356 case ICE_AQC_REPORT_ACTIVE_CFG: 357 prefix = "phy_caps_active"; 358 break; 359 case ICE_AQC_REPORT_DFLT_CFG: 360 prefix = "phy_caps_default"; 361 break; 362 default: 363 prefix = "phy_caps_invalid"; 364 } 365 366 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 367 le64_to_cpu(pcaps->phy_type_high), prefix); 368 369 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 370 prefix, report_mode); 371 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 372 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 373 pcaps->low_power_ctrl_an); 374 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 375 pcaps->eee_cap); 376 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 377 pcaps->eeer_value); 378 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 379 pcaps->link_fec_options); 380 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 381 prefix, pcaps->module_compliance_enforcement); 382 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 383 prefix, pcaps->extended_compliance_code); 384 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 385 pcaps->module_type[0]); 386 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 387 pcaps->module_type[1]); 388 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 389 pcaps->module_type[2]); 390 391 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 392 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 393 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 394 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 395 sizeof(pi->phy.link_info.module_type)); 396 } 397 398 return status; 399 } 400 401 /** 402 * ice_aq_get_link_topo_handle - get link topology node return status 403 * @pi: port information structure 404 * @node_type: requested node type 405 * @cd: pointer to command details structure or NULL 406 * 407 * Get link topology node return status for specified node type (0x06E0) 408 * 409 * Node type cage can be used to determine if cage is present. If AQC 410 * returns error (ENOENT), then no cage present. If no cage present, then 411 * connection type is backplane or BASE-T. 412 */ 413 static int 414 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 415 struct ice_sq_cd *cd) 416 { 417 struct ice_aqc_get_link_topo *cmd; 418 struct ice_aq_desc desc; 419 420 cmd = &desc.params.get_link_topo; 421 422 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 423 424 cmd->addr.topo_params.node_type_ctx = 425 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 426 ICE_AQC_LINK_TOPO_NODE_CTX_S); 427 428 /* set node type */ 429 cmd->addr.topo_params.node_type_ctx |= 430 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 431 432 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 433 } 434 435 /** 436 * ice_aq_get_netlist_node 437 * @hw: pointer to the hw struct 438 * @cmd: get_link_topo AQ structure 439 * @node_part_number: output node part number if node found 440 * @node_handle: output node handle parameter if node found 441 * 442 * Get netlist node handle. 443 */ 444 int 445 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 446 u8 *node_part_number, u16 *node_handle) 447 { 448 struct ice_aq_desc desc; 449 450 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 451 desc.params.get_link_topo = *cmd; 452 453 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 454 return -EINTR; 455 456 if (node_handle) 457 *node_handle = 458 le16_to_cpu(desc.params.get_link_topo.addr.handle); 459 if (node_part_number) 460 *node_part_number = desc.params.get_link_topo.node_part_num; 461 462 return 0; 463 } 464 465 /** 466 * ice_find_netlist_node 467 * @hw: pointer to the hw struct 468 * @node_type: type of netlist node to look for 469 * @ctx: context of the search 470 * @node_part_number: node part number to look for 471 * @node_handle: output parameter if node found - optional 472 * 473 * Scan the netlist for a node handle of the given node type and part number. 474 * 475 * If node_handle is non-NULL it will be modified on function exit. It is only 476 * valid if the function returns zero, and should be ignored on any non-zero 477 * return value. 478 * 479 * Return: 480 * * 0 if the node is found, 481 * * -ENOENT if no handle was found, 482 * * negative error code on failure to access the AQ. 483 */ 484 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx, 485 u8 node_part_number, u16 *node_handle) 486 { 487 u8 idx; 488 489 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 490 struct ice_aqc_get_link_topo cmd = {}; 491 u8 rec_node_part_number; 492 int status; 493 494 cmd.addr.topo_params.node_type_ctx = 495 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) | 496 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx); 497 cmd.addr.topo_params.index = idx; 498 499 status = ice_aq_get_netlist_node(hw, &cmd, 500 &rec_node_part_number, 501 node_handle); 502 if (status) 503 return status; 504 505 if (rec_node_part_number == node_part_number) 506 return 0; 507 } 508 509 return -ENOENT; 510 } 511 512 /** 513 * ice_is_media_cage_present 514 * @pi: port information structure 515 * 516 * Returns true if media cage is present, else false. If no cage, then 517 * media type is backplane or BASE-T. 518 */ 519 static bool ice_is_media_cage_present(struct ice_port_info *pi) 520 { 521 /* Node type cage can be used to determine if cage is present. If AQC 522 * returns error (ENOENT), then no cage present. If no cage present then 523 * connection type is backplane or BASE-T. 524 */ 525 return !ice_aq_get_link_topo_handle(pi, 526 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 527 NULL); 528 } 529 530 /** 531 * ice_get_media_type - Gets media type 532 * @pi: port information structure 533 */ 534 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 535 { 536 struct ice_link_status *hw_link_info; 537 538 if (!pi) 539 return ICE_MEDIA_UNKNOWN; 540 541 hw_link_info = &pi->phy.link_info; 542 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 543 /* If more than one media type is selected, report unknown */ 544 return ICE_MEDIA_UNKNOWN; 545 546 if (hw_link_info->phy_type_low) { 547 /* 1G SGMII is a special case where some DA cable PHYs 548 * may show this as an option when it really shouldn't 549 * be since SGMII is meant to be between a MAC and a PHY 550 * in a backplane. Try to detect this case and handle it 551 */ 552 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 553 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 554 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 555 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 556 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 557 return ICE_MEDIA_DA; 558 559 switch (hw_link_info->phy_type_low) { 560 case ICE_PHY_TYPE_LOW_1000BASE_SX: 561 case ICE_PHY_TYPE_LOW_1000BASE_LX: 562 case ICE_PHY_TYPE_LOW_10GBASE_SR: 563 case ICE_PHY_TYPE_LOW_10GBASE_LR: 564 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 565 case ICE_PHY_TYPE_LOW_25GBASE_SR: 566 case ICE_PHY_TYPE_LOW_25GBASE_LR: 567 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 568 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 569 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 570 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 571 case ICE_PHY_TYPE_LOW_50GBASE_SR: 572 case ICE_PHY_TYPE_LOW_50GBASE_FR: 573 case ICE_PHY_TYPE_LOW_50GBASE_LR: 574 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 575 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 576 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 577 case ICE_PHY_TYPE_LOW_100GBASE_DR: 578 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 579 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 580 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 581 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 582 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 583 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 584 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 585 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 586 return ICE_MEDIA_FIBER; 587 case ICE_PHY_TYPE_LOW_100BASE_TX: 588 case ICE_PHY_TYPE_LOW_1000BASE_T: 589 case ICE_PHY_TYPE_LOW_2500BASE_T: 590 case ICE_PHY_TYPE_LOW_5GBASE_T: 591 case ICE_PHY_TYPE_LOW_10GBASE_T: 592 case ICE_PHY_TYPE_LOW_25GBASE_T: 593 return ICE_MEDIA_BASET; 594 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 595 case ICE_PHY_TYPE_LOW_25GBASE_CR: 596 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 597 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 598 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 599 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 600 case ICE_PHY_TYPE_LOW_50GBASE_CP: 601 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 602 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 603 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 604 return ICE_MEDIA_DA; 605 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 606 case ICE_PHY_TYPE_LOW_40G_XLAUI: 607 case ICE_PHY_TYPE_LOW_50G_LAUI2: 608 case ICE_PHY_TYPE_LOW_50G_AUI2: 609 case ICE_PHY_TYPE_LOW_50G_AUI1: 610 case ICE_PHY_TYPE_LOW_100G_AUI4: 611 case ICE_PHY_TYPE_LOW_100G_CAUI4: 612 if (ice_is_media_cage_present(pi)) 613 return ICE_MEDIA_DA; 614 fallthrough; 615 case ICE_PHY_TYPE_LOW_1000BASE_KX: 616 case ICE_PHY_TYPE_LOW_2500BASE_KX: 617 case ICE_PHY_TYPE_LOW_2500BASE_X: 618 case ICE_PHY_TYPE_LOW_5GBASE_KR: 619 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 620 case ICE_PHY_TYPE_LOW_25GBASE_KR: 621 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 622 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 623 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 624 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 625 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 626 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 627 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 628 return ICE_MEDIA_BACKPLANE; 629 } 630 } else { 631 switch (hw_link_info->phy_type_high) { 632 case ICE_PHY_TYPE_HIGH_100G_AUI2: 633 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 634 if (ice_is_media_cage_present(pi)) 635 return ICE_MEDIA_DA; 636 fallthrough; 637 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 638 return ICE_MEDIA_BACKPLANE; 639 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 640 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 641 return ICE_MEDIA_FIBER; 642 } 643 } 644 return ICE_MEDIA_UNKNOWN; 645 } 646 647 /** 648 * ice_get_link_status_datalen 649 * @hw: pointer to the HW struct 650 * 651 * Returns datalength for the Get Link Status AQ command, which is bigger for 652 * newer adapter families handled by ice driver. 653 */ 654 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 655 { 656 switch (hw->mac_type) { 657 case ICE_MAC_E830: 658 return ICE_AQC_LS_DATA_SIZE_V2; 659 case ICE_MAC_E810: 660 default: 661 return ICE_AQC_LS_DATA_SIZE_V1; 662 } 663 } 664 665 /** 666 * ice_aq_get_link_info 667 * @pi: port information structure 668 * @ena_lse: enable/disable LinkStatusEvent reporting 669 * @link: pointer to link status structure - optional 670 * @cd: pointer to command details structure or NULL 671 * 672 * Get Link Status (0x607). Returns the link status of the adapter. 673 */ 674 int 675 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 676 struct ice_link_status *link, struct ice_sq_cd *cd) 677 { 678 struct ice_aqc_get_link_status_data link_data = { 0 }; 679 struct ice_aqc_get_link_status *resp; 680 struct ice_link_status *li_old, *li; 681 enum ice_media_type *hw_media_type; 682 struct ice_fc_info *hw_fc_info; 683 bool tx_pause, rx_pause; 684 struct ice_aq_desc desc; 685 struct ice_hw *hw; 686 u16 cmd_flags; 687 int status; 688 689 if (!pi) 690 return -EINVAL; 691 hw = pi->hw; 692 li_old = &pi->phy.link_info_old; 693 hw_media_type = &pi->phy.media_type; 694 li = &pi->phy.link_info; 695 hw_fc_info = &pi->fc; 696 697 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 698 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 699 resp = &desc.params.get_link_status; 700 resp->cmd_flags = cpu_to_le16(cmd_flags); 701 resp->lport_num = pi->lport; 702 703 status = ice_aq_send_cmd(hw, &desc, &link_data, 704 ice_get_link_status_datalen(hw), cd); 705 if (status) 706 return status; 707 708 /* save off old link status information */ 709 *li_old = *li; 710 711 /* update current link status information */ 712 li->link_speed = le16_to_cpu(link_data.link_speed); 713 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 714 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 715 *hw_media_type = ice_get_media_type(pi); 716 li->link_info = link_data.link_info; 717 li->link_cfg_err = link_data.link_cfg_err; 718 li->an_info = link_data.an_info; 719 li->ext_info = link_data.ext_info; 720 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 721 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 722 li->topo_media_conflict = link_data.topo_media_conflict; 723 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 724 ICE_AQ_CFG_PACING_TYPE_M); 725 726 /* update fc info */ 727 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 728 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 729 if (tx_pause && rx_pause) 730 hw_fc_info->current_mode = ICE_FC_FULL; 731 else if (tx_pause) 732 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 733 else if (rx_pause) 734 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 735 else 736 hw_fc_info->current_mode = ICE_FC_NONE; 737 738 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 739 740 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 741 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 742 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 743 (unsigned long long)li->phy_type_low); 744 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 745 (unsigned long long)li->phy_type_high); 746 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 747 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 748 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 749 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 750 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 751 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 752 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 753 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 754 li->max_frame_size); 755 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 756 757 /* save link status information */ 758 if (link) 759 *link = *li; 760 761 /* flag cleared so calling functions don't call AQ again */ 762 pi->phy.get_link_info = false; 763 764 return 0; 765 } 766 767 /** 768 * ice_fill_tx_timer_and_fc_thresh 769 * @hw: pointer to the HW struct 770 * @cmd: pointer to MAC cfg structure 771 * 772 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 773 * descriptor 774 */ 775 static void 776 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 777 struct ice_aqc_set_mac_cfg *cmd) 778 { 779 u32 val, fc_thres_m; 780 781 /* We read back the transmit timer and FC threshold value of 782 * LFC. Thus, we will use index = 783 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 784 * 785 * Also, because we are operating on transmit timer and FC 786 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 787 */ 788 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 789 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 790 791 if (hw->mac_type == ICE_MAC_E830) { 792 /* Retrieve the transmit timer */ 793 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 794 cmd->tx_tmr_value = 795 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 796 797 /* Retrieve the fc threshold */ 798 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 799 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 800 } else { 801 /* Retrieve the transmit timer */ 802 val = rd32(hw, 803 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 804 cmd->tx_tmr_value = 805 le16_encode_bits(val, 806 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 807 808 /* Retrieve the fc threshold */ 809 val = rd32(hw, 810 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 811 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 812 } 813 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 814 } 815 816 /** 817 * ice_aq_set_mac_cfg 818 * @hw: pointer to the HW struct 819 * @max_frame_size: Maximum Frame Size to be supported 820 * @cd: pointer to command details structure or NULL 821 * 822 * Set MAC configuration (0x0603) 823 */ 824 int 825 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 826 { 827 struct ice_aqc_set_mac_cfg *cmd; 828 struct ice_aq_desc desc; 829 830 cmd = &desc.params.set_mac_cfg; 831 832 if (max_frame_size == 0) 833 return -EINVAL; 834 835 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 836 837 cmd->max_frame_size = cpu_to_le16(max_frame_size); 838 839 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 840 841 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 842 } 843 844 /** 845 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 846 * @hw: pointer to the HW struct 847 */ 848 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 849 { 850 struct ice_switch_info *sw; 851 int status; 852 853 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 854 sizeof(*hw->switch_info), GFP_KERNEL); 855 sw = hw->switch_info; 856 857 if (!sw) 858 return -ENOMEM; 859 860 INIT_LIST_HEAD(&sw->vsi_list_map_head); 861 sw->prof_res_bm_init = 0; 862 863 /* Initialize recipe count with default recipes read from NVM */ 864 sw->recp_cnt = ICE_SW_LKUP_LAST; 865 866 status = ice_init_def_sw_recp(hw); 867 if (status) { 868 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 869 return status; 870 } 871 return 0; 872 } 873 874 /** 875 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 876 * @hw: pointer to the HW struct 877 */ 878 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 879 { 880 struct ice_switch_info *sw = hw->switch_info; 881 struct ice_vsi_list_map_info *v_pos_map; 882 struct ice_vsi_list_map_info *v_tmp_map; 883 struct ice_sw_recipe *recps; 884 u8 i; 885 886 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 887 list_entry) { 888 list_del(&v_pos_map->list_entry); 889 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 890 } 891 recps = sw->recp_list; 892 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 893 recps[i].root_rid = i; 894 895 if (recps[i].adv_rule) { 896 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 897 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 898 899 mutex_destroy(&recps[i].filt_rule_lock); 900 list_for_each_entry_safe(lst_itr, tmp_entry, 901 &recps[i].filt_rules, 902 list_entry) { 903 list_del(&lst_itr->list_entry); 904 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 905 devm_kfree(ice_hw_to_dev(hw), lst_itr); 906 } 907 } else { 908 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 909 910 mutex_destroy(&recps[i].filt_rule_lock); 911 list_for_each_entry_safe(lst_itr, tmp_entry, 912 &recps[i].filt_rules, 913 list_entry) { 914 list_del(&lst_itr->list_entry); 915 devm_kfree(ice_hw_to_dev(hw), lst_itr); 916 } 917 } 918 } 919 ice_rm_all_sw_replay_rule_info(hw); 920 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 921 devm_kfree(ice_hw_to_dev(hw), sw); 922 } 923 924 /** 925 * ice_get_itr_intrl_gran 926 * @hw: pointer to the HW struct 927 * 928 * Determines the ITR/INTRL granularities based on the maximum aggregate 929 * bandwidth according to the device's configuration during power-on. 930 */ 931 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 932 { 933 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 934 rd32(hw, GL_PWR_MODE_CTL)); 935 936 switch (max_agg_bw) { 937 case ICE_MAX_AGG_BW_200G: 938 case ICE_MAX_AGG_BW_100G: 939 case ICE_MAX_AGG_BW_50G: 940 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 941 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 942 break; 943 case ICE_MAX_AGG_BW_25G: 944 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 945 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 946 break; 947 } 948 } 949 950 /** 951 * ice_wait_for_fw - wait for full FW readiness 952 * @hw: pointer to the hardware structure 953 * @timeout: milliseconds that can elapse before timing out 954 * 955 * Return: 0 on success, -ETIMEDOUT on timeout. 956 */ 957 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) 958 { 959 int fw_loading; 960 u32 elapsed = 0; 961 962 while (elapsed <= timeout) { 963 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; 964 965 /* firmware was not yet loaded, we have to wait more */ 966 if (fw_loading) { 967 elapsed += 100; 968 msleep(100); 969 continue; 970 } 971 return 0; 972 } 973 974 return -ETIMEDOUT; 975 } 976 977 /** 978 * ice_init_hw - main hardware initialization routine 979 * @hw: pointer to the hardware structure 980 */ 981 int ice_init_hw(struct ice_hw *hw) 982 { 983 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 984 void *mac_buf __free(kfree) = NULL; 985 u16 mac_buf_len; 986 int status; 987 988 /* Set MAC type based on DeviceID */ 989 status = ice_set_mac_type(hw); 990 if (status) 991 return status; 992 993 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 994 995 status = ice_reset(hw, ICE_RESET_PFR); 996 if (status) 997 return status; 998 999 ice_get_itr_intrl_gran(hw); 1000 1001 status = ice_create_all_ctrlq(hw); 1002 if (status) 1003 goto err_unroll_cqinit; 1004 1005 status = ice_fwlog_init(hw); 1006 if (status) 1007 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1008 status); 1009 1010 status = ice_clear_pf_cfg(hw); 1011 if (status) 1012 goto err_unroll_cqinit; 1013 1014 /* Set bit to enable Flow Director filters */ 1015 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1016 INIT_LIST_HEAD(&hw->fdir_list_head); 1017 1018 ice_clear_pxe_mode(hw); 1019 1020 status = ice_init_nvm(hw); 1021 if (status) 1022 goto err_unroll_cqinit; 1023 1024 status = ice_get_caps(hw); 1025 if (status) 1026 goto err_unroll_cqinit; 1027 1028 if (!hw->port_info) 1029 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1030 sizeof(*hw->port_info), 1031 GFP_KERNEL); 1032 if (!hw->port_info) { 1033 status = -ENOMEM; 1034 goto err_unroll_cqinit; 1035 } 1036 1037 hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; 1038 /* set the back pointer to HW */ 1039 hw->port_info->hw = hw; 1040 1041 /* Initialize port_info struct with switch configuration data */ 1042 status = ice_get_initial_sw_cfg(hw); 1043 if (status) 1044 goto err_unroll_alloc; 1045 1046 hw->evb_veb = true; 1047 1048 /* init xarray for identifying scheduling nodes uniquely */ 1049 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1050 1051 /* Query the allocated resources for Tx scheduler */ 1052 status = ice_sched_query_res_alloc(hw); 1053 if (status) { 1054 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1055 goto err_unroll_alloc; 1056 } 1057 ice_sched_get_psm_clk_freq(hw); 1058 1059 /* Initialize port_info struct with scheduler data */ 1060 status = ice_sched_init_port(hw->port_info); 1061 if (status) 1062 goto err_unroll_sched; 1063 1064 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1065 if (!pcaps) { 1066 status = -ENOMEM; 1067 goto err_unroll_sched; 1068 } 1069 1070 /* Initialize port_info struct with PHY capabilities */ 1071 status = ice_aq_get_phy_caps(hw->port_info, false, 1072 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1073 NULL); 1074 if (status) 1075 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1076 status); 1077 1078 /* Initialize port_info struct with link information */ 1079 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1080 if (status) 1081 goto err_unroll_sched; 1082 1083 /* need a valid SW entry point to build a Tx tree */ 1084 if (!hw->sw_entry_point_layer) { 1085 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1086 status = -EIO; 1087 goto err_unroll_sched; 1088 } 1089 INIT_LIST_HEAD(&hw->agg_list); 1090 /* Initialize max burst size */ 1091 if (!hw->max_burst_size) 1092 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1093 1094 status = ice_init_fltr_mgmt_struct(hw); 1095 if (status) 1096 goto err_unroll_sched; 1097 1098 /* Get MAC information */ 1099 /* A single port can report up to two (LAN and WoL) addresses */ 1100 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1101 GFP_KERNEL); 1102 if (!mac_buf) { 1103 status = -ENOMEM; 1104 goto err_unroll_fltr_mgmt_struct; 1105 } 1106 1107 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1108 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1109 1110 if (status) 1111 goto err_unroll_fltr_mgmt_struct; 1112 /* enable jumbo frame support at MAC level */ 1113 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1114 if (status) 1115 goto err_unroll_fltr_mgmt_struct; 1116 /* Obtain counter base index which would be used by flow director */ 1117 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1118 if (status) 1119 goto err_unroll_fltr_mgmt_struct; 1120 status = ice_init_hw_tbls(hw); 1121 if (status) 1122 goto err_unroll_fltr_mgmt_struct; 1123 mutex_init(&hw->tnl_lock); 1124 ice_init_chk_recipe_reuse_support(hw); 1125 1126 /* Some cards require longer initialization times 1127 * due to necessity of loading FW from an external source. 1128 * This can take even half a minute. 1129 */ 1130 if (ice_is_pf_c827(hw)) { 1131 status = ice_wait_for_fw(hw, 30000); 1132 if (status) { 1133 dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); 1134 goto err_unroll_fltr_mgmt_struct; 1135 } 1136 } 1137 1138 return 0; 1139 err_unroll_fltr_mgmt_struct: 1140 ice_cleanup_fltr_mgmt_struct(hw); 1141 err_unroll_sched: 1142 ice_sched_cleanup_all(hw); 1143 err_unroll_alloc: 1144 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1145 err_unroll_cqinit: 1146 ice_destroy_all_ctrlq(hw); 1147 return status; 1148 } 1149 1150 /** 1151 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1152 * @hw: pointer to the hardware structure 1153 * 1154 * This should be called only during nominal operation, not as a result of 1155 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1156 * applicable initializations if it fails for any reason. 1157 */ 1158 void ice_deinit_hw(struct ice_hw *hw) 1159 { 1160 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1161 ice_cleanup_fltr_mgmt_struct(hw); 1162 1163 ice_sched_cleanup_all(hw); 1164 ice_sched_clear_agg(hw); 1165 ice_free_seg(hw); 1166 ice_free_hw_tbls(hw); 1167 mutex_destroy(&hw->tnl_lock); 1168 1169 ice_fwlog_deinit(hw); 1170 ice_destroy_all_ctrlq(hw); 1171 1172 /* Clear VSI contexts if not already cleared */ 1173 ice_clear_all_vsi_ctx(hw); 1174 } 1175 1176 /** 1177 * ice_check_reset - Check to see if a global reset is complete 1178 * @hw: pointer to the hardware structure 1179 */ 1180 int ice_check_reset(struct ice_hw *hw) 1181 { 1182 u32 cnt, reg = 0, grst_timeout, uld_mask; 1183 1184 /* Poll for Device Active state in case a recent CORER, GLOBR, 1185 * or EMPR has occurred. The grst delay value is in 100ms units. 1186 * Add 1sec for outstanding AQ commands that can take a long time. 1187 */ 1188 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1189 rd32(hw, GLGEN_RSTCTL)) + 10; 1190 1191 for (cnt = 0; cnt < grst_timeout; cnt++) { 1192 mdelay(100); 1193 reg = rd32(hw, GLGEN_RSTAT); 1194 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1195 break; 1196 } 1197 1198 if (cnt == grst_timeout) { 1199 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1200 return -EIO; 1201 } 1202 1203 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1204 GLNVM_ULD_PCIER_DONE_1_M |\ 1205 GLNVM_ULD_CORER_DONE_M |\ 1206 GLNVM_ULD_GLOBR_DONE_M |\ 1207 GLNVM_ULD_POR_DONE_M |\ 1208 GLNVM_ULD_POR_DONE_1_M |\ 1209 GLNVM_ULD_PCIER_DONE_2_M) 1210 1211 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1212 GLNVM_ULD_PE_DONE_M : 0); 1213 1214 /* Device is Active; check Global Reset processes are done */ 1215 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1216 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1217 if (reg == uld_mask) { 1218 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1219 break; 1220 } 1221 mdelay(10); 1222 } 1223 1224 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1225 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1226 reg); 1227 return -EIO; 1228 } 1229 1230 return 0; 1231 } 1232 1233 /** 1234 * ice_pf_reset - Reset the PF 1235 * @hw: pointer to the hardware structure 1236 * 1237 * If a global reset has been triggered, this function checks 1238 * for its completion and then issues the PF reset 1239 */ 1240 static int ice_pf_reset(struct ice_hw *hw) 1241 { 1242 u32 cnt, reg; 1243 1244 /* If at function entry a global reset was already in progress, i.e. 1245 * state is not 'device active' or any of the reset done bits are not 1246 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1247 * global reset is done. 1248 */ 1249 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1250 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1251 /* poll on global reset currently in progress until done */ 1252 if (ice_check_reset(hw)) 1253 return -EIO; 1254 1255 return 0; 1256 } 1257 1258 /* Reset the PF */ 1259 reg = rd32(hw, PFGEN_CTRL); 1260 1261 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1262 1263 /* Wait for the PFR to complete. The wait time is the global config lock 1264 * timeout plus the PFR timeout which will account for a possible reset 1265 * that is occurring during a download package operation. 1266 */ 1267 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1268 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1269 reg = rd32(hw, PFGEN_CTRL); 1270 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1271 break; 1272 1273 mdelay(1); 1274 } 1275 1276 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1277 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1278 return -EIO; 1279 } 1280 1281 return 0; 1282 } 1283 1284 /** 1285 * ice_reset - Perform different types of reset 1286 * @hw: pointer to the hardware structure 1287 * @req: reset request 1288 * 1289 * This function triggers a reset as specified by the req parameter. 1290 * 1291 * Note: 1292 * If anything other than a PF reset is triggered, PXE mode is restored. 1293 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1294 * interface has been restored in the rebuild flow. 1295 */ 1296 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1297 { 1298 u32 val = 0; 1299 1300 switch (req) { 1301 case ICE_RESET_PFR: 1302 return ice_pf_reset(hw); 1303 case ICE_RESET_CORER: 1304 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1305 val = GLGEN_RTRIG_CORER_M; 1306 break; 1307 case ICE_RESET_GLOBR: 1308 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1309 val = GLGEN_RTRIG_GLOBR_M; 1310 break; 1311 default: 1312 return -EINVAL; 1313 } 1314 1315 val |= rd32(hw, GLGEN_RTRIG); 1316 wr32(hw, GLGEN_RTRIG, val); 1317 ice_flush(hw); 1318 1319 /* wait for the FW to be ready */ 1320 return ice_check_reset(hw); 1321 } 1322 1323 /** 1324 * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers 1325 * @hw: pointer to the hardware structure 1326 * @rxq_ctx: pointer to the packed Rx queue context 1327 * @rxq_index: the index of the Rx queue 1328 */ 1329 static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, 1330 const ice_rxq_ctx_buf_t *rxq_ctx, 1331 u32 rxq_index) 1332 { 1333 /* Copy each dword separately to HW */ 1334 for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1335 u32 ctx = ((const u32 *)rxq_ctx)[i]; 1336 1337 wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); 1338 1339 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); 1340 } 1341 } 1342 1343 #define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ 1344 PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) 1345 1346 /* LAN Rx Queue Context */ 1347 static const struct packed_field_u8 ice_rlan_ctx_fields[] = { 1348 /* Field Width LSB */ 1349 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1350 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1351 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1352 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1353 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1354 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1355 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1356 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1357 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1358 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1359 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1360 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1361 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1362 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1363 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1364 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1365 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1366 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1367 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1368 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1369 }; 1370 1371 /** 1372 * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer 1373 * @ctx: the Rx queue context to pack 1374 * @buf: the HW buffer to pack into 1375 * 1376 * Pack the Rx queue context from the CPU-friendly unpacked buffer into its 1377 * bit-packed HW layout. 1378 */ 1379 static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, 1380 ice_rxq_ctx_buf_t *buf) 1381 { 1382 pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, 1383 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1384 } 1385 1386 /** 1387 * ice_write_rxq_ctx - Write Rx Queue context to hardware 1388 * @hw: pointer to the hardware structure 1389 * @rlan_ctx: pointer to the unpacked Rx queue context 1390 * @rxq_index: the index of the Rx queue 1391 * 1392 * Pack the sparse Rx Queue context into dense hardware format and write it 1393 * into the HW register space. 1394 * 1395 * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. 1396 */ 1397 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1398 u32 rxq_index) 1399 { 1400 ice_rxq_ctx_buf_t buf = {}; 1401 1402 if (rxq_index > QRX_CTRL_MAX_INDEX) 1403 return -EINVAL; 1404 1405 ice_pack_rxq_ctx(rlan_ctx, &buf); 1406 ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); 1407 1408 return 0; 1409 } 1410 1411 /* LAN Tx Queue Context */ 1412 static const struct packed_field_u8 ice_tlan_ctx_fields[] = { 1413 /* Field Width LSB */ 1414 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1415 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1416 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1417 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1418 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1419 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1420 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1421 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1422 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1423 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1424 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1425 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1426 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1427 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1428 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1429 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1430 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1431 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1432 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1433 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1434 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1435 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1436 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1437 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1438 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1439 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1440 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1441 }; 1442 1443 /** 1444 * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer 1445 * @ctx: the Tx queue context to pack 1446 * @buf: the HW buffer to pack into 1447 * 1448 * Pack the Tx queue context from the CPU-friendly unpacked buffer into its 1449 * bit-packed HW layout. 1450 */ 1451 void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) 1452 { 1453 pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1454 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1455 } 1456 1457 /* Sideband Queue command wrappers */ 1458 1459 /** 1460 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1461 * @hw: pointer to the HW struct 1462 * @desc: descriptor describing the command 1463 * @buf: buffer to use for indirect commands (NULL for direct commands) 1464 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1465 * @cd: pointer to command details structure 1466 */ 1467 static int 1468 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1469 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1470 { 1471 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1472 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1473 } 1474 1475 /** 1476 * ice_sbq_rw_reg - Fill Sideband Queue command 1477 * @hw: pointer to the HW struct 1478 * @in: message info to be filled in descriptor 1479 * @flags: control queue descriptor flags 1480 */ 1481 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) 1482 { 1483 struct ice_sbq_cmd_desc desc = {0}; 1484 struct ice_sbq_msg_req msg = {0}; 1485 u16 msg_len; 1486 int status; 1487 1488 msg_len = sizeof(msg); 1489 1490 msg.dest_dev = in->dest_dev; 1491 msg.opcode = in->opcode; 1492 msg.flags = ICE_SBQ_MSG_FLAGS; 1493 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1494 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1495 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1496 1497 if (in->opcode) 1498 msg.data = cpu_to_le32(in->data); 1499 else 1500 /* data read comes back in completion, so shorten the struct by 1501 * sizeof(msg.data) 1502 */ 1503 msg_len -= sizeof(msg.data); 1504 1505 desc.flags = cpu_to_le16(flags); 1506 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1507 desc.param0.cmd_len = cpu_to_le16(msg_len); 1508 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1509 if (!status && !in->opcode) 1510 in->data = le32_to_cpu 1511 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1512 return status; 1513 } 1514 1515 /* FW Admin Queue command wrappers */ 1516 1517 /* Software lock/mutex that is meant to be held while the Global Config Lock 1518 * in firmware is acquired by the software to prevent most (but not all) types 1519 * of AQ commands from being sent to FW 1520 */ 1521 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1522 1523 /** 1524 * ice_should_retry_sq_send_cmd 1525 * @opcode: AQ opcode 1526 * 1527 * Decide if we should retry the send command routine for the ATQ, depending 1528 * on the opcode. 1529 */ 1530 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1531 { 1532 switch (opcode) { 1533 case ice_aqc_opc_get_link_topo: 1534 case ice_aqc_opc_lldp_stop: 1535 case ice_aqc_opc_lldp_start: 1536 case ice_aqc_opc_lldp_filter_ctrl: 1537 return true; 1538 } 1539 1540 return false; 1541 } 1542 1543 /** 1544 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1545 * @hw: pointer to the HW struct 1546 * @cq: pointer to the specific Control queue 1547 * @desc: prefilled descriptor describing the command 1548 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1549 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1550 * @cd: pointer to command details structure 1551 * 1552 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1553 * Queue if the EBUSY AQ error is returned. 1554 */ 1555 static int 1556 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1557 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1558 struct ice_sq_cd *cd) 1559 { 1560 struct ice_aq_desc desc_cpy; 1561 bool is_cmd_for_retry; 1562 u8 idx = 0; 1563 u16 opcode; 1564 int status; 1565 1566 opcode = le16_to_cpu(desc->opcode); 1567 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1568 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1569 1570 if (is_cmd_for_retry) { 1571 /* All retryable cmds are direct, without buf. */ 1572 WARN_ON(buf); 1573 1574 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1575 } 1576 1577 do { 1578 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1579 1580 if (!is_cmd_for_retry || !status || 1581 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1582 break; 1583 1584 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1585 1586 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1587 1588 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1589 1590 return status; 1591 } 1592 1593 /** 1594 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1595 * @hw: pointer to the HW struct 1596 * @desc: descriptor describing the command 1597 * @buf: buffer to use for indirect commands (NULL for direct commands) 1598 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1599 * @cd: pointer to command details structure 1600 * 1601 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1602 */ 1603 int 1604 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1605 u16 buf_size, struct ice_sq_cd *cd) 1606 { 1607 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1608 bool lock_acquired = false; 1609 int status; 1610 1611 /* When a package download is in process (i.e. when the firmware's 1612 * Global Configuration Lock resource is held), only the Download 1613 * Package, Get Version, Get Package Info List, Upload Section, 1614 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1615 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1616 * Recipes to Profile Association, and Release Resource (with resource 1617 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1618 * must block until the package download completes and the Global Config 1619 * Lock is released. See also ice_acquire_global_cfg_lock(). 1620 */ 1621 switch (le16_to_cpu(desc->opcode)) { 1622 case ice_aqc_opc_download_pkg: 1623 case ice_aqc_opc_get_pkg_info_list: 1624 case ice_aqc_opc_get_ver: 1625 case ice_aqc_opc_upload_section: 1626 case ice_aqc_opc_update_pkg: 1627 case ice_aqc_opc_set_port_params: 1628 case ice_aqc_opc_get_vlan_mode_parameters: 1629 case ice_aqc_opc_set_vlan_mode_parameters: 1630 case ice_aqc_opc_set_tx_topo: 1631 case ice_aqc_opc_get_tx_topo: 1632 case ice_aqc_opc_add_recipe: 1633 case ice_aqc_opc_recipe_to_profile: 1634 case ice_aqc_opc_get_recipe: 1635 case ice_aqc_opc_get_recipe_to_profile: 1636 break; 1637 case ice_aqc_opc_release_res: 1638 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1639 break; 1640 fallthrough; 1641 default: 1642 mutex_lock(&ice_global_cfg_lock_sw); 1643 lock_acquired = true; 1644 break; 1645 } 1646 1647 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1648 if (lock_acquired) 1649 mutex_unlock(&ice_global_cfg_lock_sw); 1650 1651 return status; 1652 } 1653 1654 /** 1655 * ice_aq_get_fw_ver 1656 * @hw: pointer to the HW struct 1657 * @cd: pointer to command details structure or NULL 1658 * 1659 * Get the firmware version (0x0001) from the admin queue commands 1660 */ 1661 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1662 { 1663 struct ice_aqc_get_ver *resp; 1664 struct ice_aq_desc desc; 1665 int status; 1666 1667 resp = &desc.params.get_ver; 1668 1669 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1670 1671 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1672 1673 if (!status) { 1674 hw->fw_branch = resp->fw_branch; 1675 hw->fw_maj_ver = resp->fw_major; 1676 hw->fw_min_ver = resp->fw_minor; 1677 hw->fw_patch = resp->fw_patch; 1678 hw->fw_build = le32_to_cpu(resp->fw_build); 1679 hw->api_branch = resp->api_branch; 1680 hw->api_maj_ver = resp->api_major; 1681 hw->api_min_ver = resp->api_minor; 1682 hw->api_patch = resp->api_patch; 1683 } 1684 1685 return status; 1686 } 1687 1688 /** 1689 * ice_aq_send_driver_ver 1690 * @hw: pointer to the HW struct 1691 * @dv: driver's major, minor version 1692 * @cd: pointer to command details structure or NULL 1693 * 1694 * Send the driver version (0x0002) to the firmware 1695 */ 1696 int 1697 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1698 struct ice_sq_cd *cd) 1699 { 1700 struct ice_aqc_driver_ver *cmd; 1701 struct ice_aq_desc desc; 1702 u16 len; 1703 1704 cmd = &desc.params.driver_ver; 1705 1706 if (!dv) 1707 return -EINVAL; 1708 1709 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1710 1711 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1712 cmd->major_ver = dv->major_ver; 1713 cmd->minor_ver = dv->minor_ver; 1714 cmd->build_ver = dv->build_ver; 1715 cmd->subbuild_ver = dv->subbuild_ver; 1716 1717 len = 0; 1718 while (len < sizeof(dv->driver_string) && 1719 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1720 len++; 1721 1722 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1723 } 1724 1725 /** 1726 * ice_aq_q_shutdown 1727 * @hw: pointer to the HW struct 1728 * @unloading: is the driver unloading itself 1729 * 1730 * Tell the Firmware that we're shutting down the AdminQ and whether 1731 * or not the driver is unloading as well (0x0003). 1732 */ 1733 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1734 { 1735 struct ice_aqc_q_shutdown *cmd; 1736 struct ice_aq_desc desc; 1737 1738 cmd = &desc.params.q_shutdown; 1739 1740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1741 1742 if (unloading) 1743 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1744 1745 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1746 } 1747 1748 /** 1749 * ice_aq_req_res 1750 * @hw: pointer to the HW struct 1751 * @res: resource ID 1752 * @access: access type 1753 * @sdp_number: resource number 1754 * @timeout: the maximum time in ms that the driver may hold the resource 1755 * @cd: pointer to command details structure or NULL 1756 * 1757 * Requests common resource using the admin queue commands (0x0008). 1758 * When attempting to acquire the Global Config Lock, the driver can 1759 * learn of three states: 1760 * 1) 0 - acquired lock, and can perform download package 1761 * 2) -EIO - did not get lock, driver should fail to load 1762 * 3) -EALREADY - did not get lock, but another driver has 1763 * successfully downloaded the package; the driver does 1764 * not have to download the package and can continue 1765 * loading 1766 * 1767 * Note that if the caller is in an acquire lock, perform action, release lock 1768 * phase of operation, it is possible that the FW may detect a timeout and issue 1769 * a CORER. In this case, the driver will receive a CORER interrupt and will 1770 * have to determine its cause. The calling thread that is handling this flow 1771 * will likely get an error propagated back to it indicating the Download 1772 * Package, Update Package or the Release Resource AQ commands timed out. 1773 */ 1774 static int 1775 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1776 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1777 struct ice_sq_cd *cd) 1778 { 1779 struct ice_aqc_req_res *cmd_resp; 1780 struct ice_aq_desc desc; 1781 int status; 1782 1783 cmd_resp = &desc.params.res_owner; 1784 1785 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1786 1787 cmd_resp->res_id = cpu_to_le16(res); 1788 cmd_resp->access_type = cpu_to_le16(access); 1789 cmd_resp->res_number = cpu_to_le32(sdp_number); 1790 cmd_resp->timeout = cpu_to_le32(*timeout); 1791 *timeout = 0; 1792 1793 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1794 1795 /* The completion specifies the maximum time in ms that the driver 1796 * may hold the resource in the Timeout field. 1797 */ 1798 1799 /* Global config lock response utilizes an additional status field. 1800 * 1801 * If the Global config lock resource is held by some other driver, the 1802 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1803 * and the timeout field indicates the maximum time the current owner 1804 * of the resource has to free it. 1805 */ 1806 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1807 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1808 *timeout = le32_to_cpu(cmd_resp->timeout); 1809 return 0; 1810 } else if (le16_to_cpu(cmd_resp->status) == 1811 ICE_AQ_RES_GLBL_IN_PROG) { 1812 *timeout = le32_to_cpu(cmd_resp->timeout); 1813 return -EIO; 1814 } else if (le16_to_cpu(cmd_resp->status) == 1815 ICE_AQ_RES_GLBL_DONE) { 1816 return -EALREADY; 1817 } 1818 1819 /* invalid FW response, force a timeout immediately */ 1820 *timeout = 0; 1821 return -EIO; 1822 } 1823 1824 /* If the resource is held by some other driver, the command completes 1825 * with a busy return value and the timeout field indicates the maximum 1826 * time the current owner of the resource has to free it. 1827 */ 1828 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1829 *timeout = le32_to_cpu(cmd_resp->timeout); 1830 1831 return status; 1832 } 1833 1834 /** 1835 * ice_aq_release_res 1836 * @hw: pointer to the HW struct 1837 * @res: resource ID 1838 * @sdp_number: resource number 1839 * @cd: pointer to command details structure or NULL 1840 * 1841 * release common resource using the admin queue commands (0x0009) 1842 */ 1843 static int 1844 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1845 struct ice_sq_cd *cd) 1846 { 1847 struct ice_aqc_req_res *cmd; 1848 struct ice_aq_desc desc; 1849 1850 cmd = &desc.params.res_owner; 1851 1852 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1853 1854 cmd->res_id = cpu_to_le16(res); 1855 cmd->res_number = cpu_to_le32(sdp_number); 1856 1857 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1858 } 1859 1860 /** 1861 * ice_acquire_res 1862 * @hw: pointer to the HW structure 1863 * @res: resource ID 1864 * @access: access type (read or write) 1865 * @timeout: timeout in milliseconds 1866 * 1867 * This function will attempt to acquire the ownership of a resource. 1868 */ 1869 int 1870 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1871 enum ice_aq_res_access_type access, u32 timeout) 1872 { 1873 #define ICE_RES_POLLING_DELAY_MS 10 1874 u32 delay = ICE_RES_POLLING_DELAY_MS; 1875 u32 time_left = timeout; 1876 int status; 1877 1878 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1879 1880 /* A return code of -EALREADY means that another driver has 1881 * previously acquired the resource and performed any necessary updates; 1882 * in this case the caller does not obtain the resource and has no 1883 * further work to do. 1884 */ 1885 if (status == -EALREADY) 1886 goto ice_acquire_res_exit; 1887 1888 if (status) 1889 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1890 1891 /* If necessary, poll until the current lock owner timeouts */ 1892 timeout = time_left; 1893 while (status && timeout && time_left) { 1894 mdelay(delay); 1895 timeout = (timeout > delay) ? timeout - delay : 0; 1896 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1897 1898 if (status == -EALREADY) 1899 /* lock free, but no work to do */ 1900 break; 1901 1902 if (!status) 1903 /* lock acquired */ 1904 break; 1905 } 1906 if (status && status != -EALREADY) 1907 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1908 1909 ice_acquire_res_exit: 1910 if (status == -EALREADY) { 1911 if (access == ICE_RES_WRITE) 1912 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1913 else 1914 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1915 } 1916 return status; 1917 } 1918 1919 /** 1920 * ice_release_res 1921 * @hw: pointer to the HW structure 1922 * @res: resource ID 1923 * 1924 * This function will release a resource using the proper Admin Command. 1925 */ 1926 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1927 { 1928 unsigned long timeout; 1929 int status; 1930 1931 /* there are some rare cases when trying to release the resource 1932 * results in an admin queue timeout, so handle them correctly 1933 */ 1934 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1935 do { 1936 status = ice_aq_release_res(hw, res, 0, NULL); 1937 if (status != -EIO) 1938 break; 1939 usleep_range(1000, 2000); 1940 } while (time_before(jiffies, timeout)); 1941 } 1942 1943 /** 1944 * ice_aq_alloc_free_res - command to allocate/free resources 1945 * @hw: pointer to the HW struct 1946 * @buf: Indirect buffer to hold data parameters and response 1947 * @buf_size: size of buffer for indirect commands 1948 * @opc: pass in the command opcode 1949 * 1950 * Helper function to allocate/free resources using the admin queue commands 1951 */ 1952 int ice_aq_alloc_free_res(struct ice_hw *hw, 1953 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1954 enum ice_adminq_opc opc) 1955 { 1956 struct ice_aqc_alloc_free_res_cmd *cmd; 1957 struct ice_aq_desc desc; 1958 1959 cmd = &desc.params.sw_res_ctrl; 1960 1961 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1962 return -EINVAL; 1963 1964 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1965 1966 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1967 1968 cmd->num_entries = cpu_to_le16(1); 1969 1970 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1971 } 1972 1973 /** 1974 * ice_alloc_hw_res - allocate resource 1975 * @hw: pointer to the HW struct 1976 * @type: type of resource 1977 * @num: number of resources to allocate 1978 * @btm: allocate from bottom 1979 * @res: pointer to array that will receive the resources 1980 */ 1981 int 1982 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1983 { 1984 struct ice_aqc_alloc_free_res_elem *buf; 1985 u16 buf_len; 1986 int status; 1987 1988 buf_len = struct_size(buf, elem, num); 1989 buf = kzalloc(buf_len, GFP_KERNEL); 1990 if (!buf) 1991 return -ENOMEM; 1992 1993 /* Prepare buffer to allocate resource. */ 1994 buf->num_elems = cpu_to_le16(num); 1995 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1996 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1997 if (btm) 1998 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1999 2000 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2001 if (status) 2002 goto ice_alloc_res_exit; 2003 2004 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2005 2006 ice_alloc_res_exit: 2007 kfree(buf); 2008 return status; 2009 } 2010 2011 /** 2012 * ice_free_hw_res - free allocated HW resource 2013 * @hw: pointer to the HW struct 2014 * @type: type of resource to free 2015 * @num: number of resources 2016 * @res: pointer to array that contains the resources to free 2017 */ 2018 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2019 { 2020 struct ice_aqc_alloc_free_res_elem *buf; 2021 u16 buf_len; 2022 int status; 2023 2024 buf_len = struct_size(buf, elem, num); 2025 buf = kzalloc(buf_len, GFP_KERNEL); 2026 if (!buf) 2027 return -ENOMEM; 2028 2029 /* Prepare buffer to free resource. */ 2030 buf->num_elems = cpu_to_le16(num); 2031 buf->res_type = cpu_to_le16(type); 2032 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2033 2034 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2035 if (status) 2036 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2037 2038 kfree(buf); 2039 return status; 2040 } 2041 2042 /** 2043 * ice_get_num_per_func - determine number of resources per PF 2044 * @hw: pointer to the HW structure 2045 * @max: value to be evenly split between each PF 2046 * 2047 * Determine the number of valid functions by going through the bitmap returned 2048 * from parsing capabilities and use this to calculate the number of resources 2049 * per PF based on the max value passed in. 2050 */ 2051 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2052 { 2053 u8 funcs; 2054 2055 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2056 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2057 ICE_CAPS_VALID_FUNCS_M); 2058 2059 if (!funcs) 2060 return 0; 2061 2062 return max / funcs; 2063 } 2064 2065 /** 2066 * ice_parse_common_caps - parse common device/function capabilities 2067 * @hw: pointer to the HW struct 2068 * @caps: pointer to common capabilities structure 2069 * @elem: the capability element to parse 2070 * @prefix: message prefix for tracing capabilities 2071 * 2072 * Given a capability element, extract relevant details into the common 2073 * capability structure. 2074 * 2075 * Returns: true if the capability matches one of the common capability ids, 2076 * false otherwise. 2077 */ 2078 static bool 2079 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2080 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2081 { 2082 u32 logical_id = le32_to_cpu(elem->logical_id); 2083 u32 phys_id = le32_to_cpu(elem->phys_id); 2084 u32 number = le32_to_cpu(elem->number); 2085 u16 cap = le16_to_cpu(elem->cap); 2086 bool found = true; 2087 2088 switch (cap) { 2089 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2090 caps->valid_functions = number; 2091 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2092 caps->valid_functions); 2093 break; 2094 case ICE_AQC_CAPS_SRIOV: 2095 caps->sr_iov_1_1 = (number == 1); 2096 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2097 caps->sr_iov_1_1); 2098 break; 2099 case ICE_AQC_CAPS_DCB: 2100 caps->dcb = (number == 1); 2101 caps->active_tc_bitmap = logical_id; 2102 caps->maxtc = phys_id; 2103 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2104 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2105 caps->active_tc_bitmap); 2106 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2107 break; 2108 case ICE_AQC_CAPS_RSS: 2109 caps->rss_table_size = number; 2110 caps->rss_table_entry_width = logical_id; 2111 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2112 caps->rss_table_size); 2113 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2114 caps->rss_table_entry_width); 2115 break; 2116 case ICE_AQC_CAPS_RXQS: 2117 caps->num_rxq = number; 2118 caps->rxq_first_id = phys_id; 2119 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2120 caps->num_rxq); 2121 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2122 caps->rxq_first_id); 2123 break; 2124 case ICE_AQC_CAPS_TXQS: 2125 caps->num_txq = number; 2126 caps->txq_first_id = phys_id; 2127 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2128 caps->num_txq); 2129 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2130 caps->txq_first_id); 2131 break; 2132 case ICE_AQC_CAPS_MSIX: 2133 caps->num_msix_vectors = number; 2134 caps->msix_vector_first_id = phys_id; 2135 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2136 caps->num_msix_vectors); 2137 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2138 caps->msix_vector_first_id); 2139 break; 2140 case ICE_AQC_CAPS_PENDING_NVM_VER: 2141 caps->nvm_update_pending_nvm = true; 2142 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2143 break; 2144 case ICE_AQC_CAPS_PENDING_OROM_VER: 2145 caps->nvm_update_pending_orom = true; 2146 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2147 break; 2148 case ICE_AQC_CAPS_PENDING_NET_VER: 2149 caps->nvm_update_pending_netlist = true; 2150 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2151 break; 2152 case ICE_AQC_CAPS_NVM_MGMT: 2153 caps->nvm_unified_update = 2154 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2155 true : false; 2156 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2157 caps->nvm_unified_update); 2158 break; 2159 case ICE_AQC_CAPS_RDMA: 2160 caps->rdma = (number == 1); 2161 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2162 break; 2163 case ICE_AQC_CAPS_MAX_MTU: 2164 caps->max_mtu = number; 2165 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2166 prefix, caps->max_mtu); 2167 break; 2168 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2169 caps->pcie_reset_avoidance = (number > 0); 2170 ice_debug(hw, ICE_DBG_INIT, 2171 "%s: pcie_reset_avoidance = %d\n", prefix, 2172 caps->pcie_reset_avoidance); 2173 break; 2174 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2175 caps->reset_restrict_support = (number == 1); 2176 ice_debug(hw, ICE_DBG_INIT, 2177 "%s: reset_restrict_support = %d\n", prefix, 2178 caps->reset_restrict_support); 2179 break; 2180 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2181 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2182 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2183 prefix, caps->roce_lag); 2184 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2185 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2186 prefix, caps->sriov_lag); 2187 break; 2188 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2189 caps->tx_sched_topo_comp_mode_en = (number == 1); 2190 break; 2191 default: 2192 /* Not one of the recognized common capabilities */ 2193 found = false; 2194 } 2195 2196 return found; 2197 } 2198 2199 /** 2200 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2201 * @hw: pointer to the HW structure 2202 * @caps: pointer to capabilities structure to fix 2203 * 2204 * Re-calculate the capabilities that are dependent on the number of physical 2205 * ports; i.e. some features are not supported or function differently on 2206 * devices with more than 4 ports. 2207 */ 2208 static void 2209 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2210 { 2211 /* This assumes device capabilities are always scanned before function 2212 * capabilities during the initialization flow. 2213 */ 2214 if (hw->dev_caps.num_funcs > 4) { 2215 /* Max 4 TCs per port */ 2216 caps->maxtc = 4; 2217 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2218 caps->maxtc); 2219 if (caps->rdma) { 2220 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2221 caps->rdma = 0; 2222 } 2223 2224 /* print message only when processing device capabilities 2225 * during initialization. 2226 */ 2227 if (caps == &hw->dev_caps.common_cap) 2228 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2229 } 2230 } 2231 2232 /** 2233 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2234 * @hw: pointer to the HW struct 2235 * @func_p: pointer to function capabilities structure 2236 * @cap: pointer to the capability element to parse 2237 * 2238 * Extract function capabilities for ICE_AQC_CAPS_VF. 2239 */ 2240 static void 2241 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2242 struct ice_aqc_list_caps_elem *cap) 2243 { 2244 u32 logical_id = le32_to_cpu(cap->logical_id); 2245 u32 number = le32_to_cpu(cap->number); 2246 2247 func_p->num_allocd_vfs = number; 2248 func_p->vf_base_id = logical_id; 2249 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2250 func_p->num_allocd_vfs); 2251 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2252 func_p->vf_base_id); 2253 } 2254 2255 /** 2256 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2257 * @hw: pointer to the HW struct 2258 * @func_p: pointer to function capabilities structure 2259 * @cap: pointer to the capability element to parse 2260 * 2261 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2262 */ 2263 static void 2264 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2265 struct ice_aqc_list_caps_elem *cap) 2266 { 2267 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2268 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2269 le32_to_cpu(cap->number)); 2270 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2271 func_p->guar_num_vsi); 2272 } 2273 2274 /** 2275 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2276 * @hw: pointer to the HW struct 2277 * @func_p: pointer to function capabilities structure 2278 * @cap: pointer to the capability element to parse 2279 * 2280 * Extract function capabilities for ICE_AQC_CAPS_1588. 2281 */ 2282 static void 2283 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2284 struct ice_aqc_list_caps_elem *cap) 2285 { 2286 struct ice_ts_func_info *info = &func_p->ts_func_info; 2287 u32 number = le32_to_cpu(cap->number); 2288 2289 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2290 func_p->common_cap.ieee_1588 = info->ena; 2291 2292 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2293 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2294 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2295 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2296 2297 if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) { 2298 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2299 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2300 } else { 2301 info->clk_freq = ICE_TIME_REF_FREQ_156_250; 2302 info->clk_src = ICE_CLK_SRC_TCXO; 2303 } 2304 2305 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2306 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2307 } else { 2308 /* Unknown clock frequency, so assume a (probably incorrect) 2309 * default to avoid out-of-bounds look ups of frequency 2310 * related information. 2311 */ 2312 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2313 info->clk_freq); 2314 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2315 } 2316 2317 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2318 func_p->common_cap.ieee_1588); 2319 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2320 info->src_tmr_owned); 2321 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2322 info->tmr_ena); 2323 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2324 info->tmr_index_owned); 2325 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2326 info->tmr_index_assoc); 2327 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2328 info->clk_freq); 2329 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2330 info->clk_src); 2331 } 2332 2333 /** 2334 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2335 * @hw: pointer to the HW struct 2336 * @func_p: pointer to function capabilities structure 2337 * 2338 * Extract function capabilities for ICE_AQC_CAPS_FD. 2339 */ 2340 static void 2341 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2342 { 2343 u32 reg_val, gsize, bsize; 2344 2345 reg_val = rd32(hw, GLQF_FD_SIZE); 2346 switch (hw->mac_type) { 2347 case ICE_MAC_E830: 2348 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2349 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2350 break; 2351 case ICE_MAC_E810: 2352 default: 2353 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2354 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2355 } 2356 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2357 func_p->fd_fltr_best_effort = bsize; 2358 2359 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2360 func_p->fd_fltr_guar); 2361 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2362 func_p->fd_fltr_best_effort); 2363 } 2364 2365 /** 2366 * ice_parse_func_caps - Parse function capabilities 2367 * @hw: pointer to the HW struct 2368 * @func_p: pointer to function capabilities structure 2369 * @buf: buffer containing the function capability records 2370 * @cap_count: the number of capabilities 2371 * 2372 * Helper function to parse function (0x000A) capabilities list. For 2373 * capabilities shared between device and function, this relies on 2374 * ice_parse_common_caps. 2375 * 2376 * Loop through the list of provided capabilities and extract the relevant 2377 * data into the function capabilities structured. 2378 */ 2379 static void 2380 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2381 void *buf, u32 cap_count) 2382 { 2383 struct ice_aqc_list_caps_elem *cap_resp; 2384 u32 i; 2385 2386 cap_resp = buf; 2387 2388 memset(func_p, 0, sizeof(*func_p)); 2389 2390 for (i = 0; i < cap_count; i++) { 2391 u16 cap = le16_to_cpu(cap_resp[i].cap); 2392 bool found; 2393 2394 found = ice_parse_common_caps(hw, &func_p->common_cap, 2395 &cap_resp[i], "func caps"); 2396 2397 switch (cap) { 2398 case ICE_AQC_CAPS_VF: 2399 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2400 break; 2401 case ICE_AQC_CAPS_VSI: 2402 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2403 break; 2404 case ICE_AQC_CAPS_1588: 2405 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2406 break; 2407 case ICE_AQC_CAPS_FD: 2408 ice_parse_fdir_func_caps(hw, func_p); 2409 break; 2410 default: 2411 /* Don't list common capabilities as unknown */ 2412 if (!found) 2413 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2414 i, cap); 2415 break; 2416 } 2417 } 2418 2419 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2420 } 2421 2422 /** 2423 * ice_func_id_to_logical_id - map from function id to logical pf id 2424 * @active_function_bitmap: active function bitmap 2425 * @pf_id: function number of device 2426 * 2427 * Return: logical PF ID. 2428 */ 2429 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) 2430 { 2431 u8 logical_id = 0; 2432 u8 i; 2433 2434 for (i = 0; i < pf_id; i++) 2435 if (active_function_bitmap & BIT(i)) 2436 logical_id++; 2437 2438 return logical_id; 2439 } 2440 2441 /** 2442 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2443 * @hw: pointer to the HW struct 2444 * @dev_p: pointer to device capabilities structure 2445 * @cap: capability element to parse 2446 * 2447 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2448 */ 2449 static void 2450 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2451 struct ice_aqc_list_caps_elem *cap) 2452 { 2453 u32 number = le32_to_cpu(cap->number); 2454 2455 dev_p->num_funcs = hweight32(number); 2456 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2457 dev_p->num_funcs); 2458 2459 hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); 2460 } 2461 2462 /** 2463 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2464 * @hw: pointer to the HW struct 2465 * @dev_p: pointer to device capabilities structure 2466 * @cap: capability element to parse 2467 * 2468 * Parse ICE_AQC_CAPS_VF for device capabilities. 2469 */ 2470 static void 2471 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2472 struct ice_aqc_list_caps_elem *cap) 2473 { 2474 u32 number = le32_to_cpu(cap->number); 2475 2476 dev_p->num_vfs_exposed = number; 2477 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2478 dev_p->num_vfs_exposed); 2479 } 2480 2481 /** 2482 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2483 * @hw: pointer to the HW struct 2484 * @dev_p: pointer to device capabilities structure 2485 * @cap: capability element to parse 2486 * 2487 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2488 */ 2489 static void 2490 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2491 struct ice_aqc_list_caps_elem *cap) 2492 { 2493 u32 number = le32_to_cpu(cap->number); 2494 2495 dev_p->num_vsi_allocd_to_host = number; 2496 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2497 dev_p->num_vsi_allocd_to_host); 2498 } 2499 2500 /** 2501 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2502 * @hw: pointer to the HW struct 2503 * @dev_p: pointer to device capabilities structure 2504 * @cap: capability element to parse 2505 * 2506 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2507 */ 2508 static void 2509 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2510 struct ice_aqc_list_caps_elem *cap) 2511 { 2512 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2513 u32 logical_id = le32_to_cpu(cap->logical_id); 2514 u32 phys_id = le32_to_cpu(cap->phys_id); 2515 u32 number = le32_to_cpu(cap->number); 2516 2517 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2518 dev_p->common_cap.ieee_1588 = info->ena; 2519 2520 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2521 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2522 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2523 2524 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2525 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2526 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2527 2528 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2529 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2530 info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0); 2531 2532 info->ena_ports = logical_id; 2533 info->tmr_own_map = phys_id; 2534 2535 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2536 dev_p->common_cap.ieee_1588); 2537 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2538 info->tmr0_owner); 2539 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2540 info->tmr0_owned); 2541 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2542 info->tmr0_ena); 2543 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2544 info->tmr1_owner); 2545 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2546 info->tmr1_owned); 2547 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2548 info->tmr1_ena); 2549 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2550 info->ts_ll_read); 2551 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2552 info->ts_ll_int_read); 2553 ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n", 2554 info->ll_phy_tmr_update); 2555 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2556 info->ena_ports); 2557 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2558 info->tmr_own_map); 2559 } 2560 2561 /** 2562 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2563 * @hw: pointer to the HW struct 2564 * @dev_p: pointer to device capabilities structure 2565 * @cap: capability element to parse 2566 * 2567 * Parse ICE_AQC_CAPS_FD for device capabilities. 2568 */ 2569 static void 2570 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2571 struct ice_aqc_list_caps_elem *cap) 2572 { 2573 u32 number = le32_to_cpu(cap->number); 2574 2575 dev_p->num_flow_director_fltr = number; 2576 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2577 dev_p->num_flow_director_fltr); 2578 } 2579 2580 /** 2581 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2582 * @hw: pointer to the HW struct 2583 * @dev_p: pointer to device capabilities structure 2584 * @cap: capability element to parse 2585 * 2586 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2587 * enabled sensors. 2588 */ 2589 static void 2590 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2591 struct ice_aqc_list_caps_elem *cap) 2592 { 2593 dev_p->supported_sensors = le32_to_cpu(cap->number); 2594 2595 ice_debug(hw, ICE_DBG_INIT, 2596 "dev caps: supported sensors (bitmap) = 0x%x\n", 2597 dev_p->supported_sensors); 2598 } 2599 2600 /** 2601 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2602 * @hw: pointer to the HW struct 2603 * @dev_p: pointer to device capabilities structure 2604 * @cap: capability element to parse 2605 * 2606 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2607 */ 2608 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, 2609 struct ice_hw_dev_caps *dev_p, 2610 struct ice_aqc_list_caps_elem *cap) 2611 { 2612 dev_p->nac_topo.mode = le32_to_cpu(cap->number); 2613 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2614 2615 dev_info(ice_hw_to_dev(hw), 2616 "PF is configured in %s mode with IP instance ID %d\n", 2617 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? 2618 "primary" : "secondary", dev_p->nac_topo.id); 2619 2620 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2621 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2622 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2623 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2624 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2625 dev_p->nac_topo.id); 2626 } 2627 2628 /** 2629 * ice_parse_dev_caps - Parse device capabilities 2630 * @hw: pointer to the HW struct 2631 * @dev_p: pointer to device capabilities structure 2632 * @buf: buffer containing the device capability records 2633 * @cap_count: the number of capabilities 2634 * 2635 * Helper device to parse device (0x000B) capabilities list. For 2636 * capabilities shared between device and function, this relies on 2637 * ice_parse_common_caps. 2638 * 2639 * Loop through the list of provided capabilities and extract the relevant 2640 * data into the device capabilities structured. 2641 */ 2642 static void 2643 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2644 void *buf, u32 cap_count) 2645 { 2646 struct ice_aqc_list_caps_elem *cap_resp; 2647 u32 i; 2648 2649 cap_resp = buf; 2650 2651 memset(dev_p, 0, sizeof(*dev_p)); 2652 2653 for (i = 0; i < cap_count; i++) { 2654 u16 cap = le16_to_cpu(cap_resp[i].cap); 2655 bool found; 2656 2657 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2658 &cap_resp[i], "dev caps"); 2659 2660 switch (cap) { 2661 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2662 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2663 break; 2664 case ICE_AQC_CAPS_VF: 2665 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2666 break; 2667 case ICE_AQC_CAPS_VSI: 2668 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2669 break; 2670 case ICE_AQC_CAPS_1588: 2671 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2672 break; 2673 case ICE_AQC_CAPS_FD: 2674 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2675 break; 2676 case ICE_AQC_CAPS_SENSOR_READING: 2677 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2678 break; 2679 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2680 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2681 break; 2682 default: 2683 /* Don't list common capabilities as unknown */ 2684 if (!found) 2685 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2686 i, cap); 2687 break; 2688 } 2689 } 2690 2691 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2692 } 2693 2694 /** 2695 * ice_is_phy_rclk_in_netlist 2696 * @hw: pointer to the hw struct 2697 * 2698 * Check if the PHY Recovered Clock device is present in the netlist 2699 */ 2700 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2701 { 2702 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2703 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2704 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2705 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2706 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2707 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2708 return false; 2709 2710 return true; 2711 } 2712 2713 /** 2714 * ice_is_clock_mux_in_netlist 2715 * @hw: pointer to the hw struct 2716 * 2717 * Check if the Clock Multiplexer device is present in the netlist 2718 */ 2719 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2720 { 2721 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2722 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2723 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2724 NULL)) 2725 return false; 2726 2727 return true; 2728 } 2729 2730 /** 2731 * ice_is_cgu_in_netlist - check for CGU presence 2732 * @hw: pointer to the hw struct 2733 * 2734 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2735 * Save the CGU part number in the hw structure for later use. 2736 * Return: 2737 * * true - cgu is present 2738 * * false - cgu is not present 2739 */ 2740 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2741 { 2742 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2743 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2744 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2745 NULL)) { 2746 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2747 return true; 2748 } else if (!ice_find_netlist_node(hw, 2749 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2750 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2751 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2752 NULL)) { 2753 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2754 return true; 2755 } 2756 2757 return false; 2758 } 2759 2760 /** 2761 * ice_is_gps_in_netlist 2762 * @hw: pointer to the hw struct 2763 * 2764 * Check if the GPS generic device is present in the netlist 2765 */ 2766 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2767 { 2768 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2769 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2770 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2771 return false; 2772 2773 return true; 2774 } 2775 2776 /** 2777 * ice_aq_list_caps - query function/device capabilities 2778 * @hw: pointer to the HW struct 2779 * @buf: a buffer to hold the capabilities 2780 * @buf_size: size of the buffer 2781 * @cap_count: if not NULL, set to the number of capabilities reported 2782 * @opc: capabilities type to discover, device or function 2783 * @cd: pointer to command details structure or NULL 2784 * 2785 * Get the function (0x000A) or device (0x000B) capabilities description from 2786 * firmware and store it in the buffer. 2787 * 2788 * If the cap_count pointer is not NULL, then it is set to the number of 2789 * capabilities firmware will report. Note that if the buffer size is too 2790 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2791 * cap_count will still be updated in this case. It is recommended that the 2792 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2793 * firmware could return) to avoid this. 2794 */ 2795 int 2796 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2797 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2798 { 2799 struct ice_aqc_list_caps *cmd; 2800 struct ice_aq_desc desc; 2801 int status; 2802 2803 cmd = &desc.params.get_cap; 2804 2805 if (opc != ice_aqc_opc_list_func_caps && 2806 opc != ice_aqc_opc_list_dev_caps) 2807 return -EINVAL; 2808 2809 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2810 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2811 2812 if (cap_count) 2813 *cap_count = le32_to_cpu(cmd->count); 2814 2815 return status; 2816 } 2817 2818 /** 2819 * ice_discover_dev_caps - Read and extract device capabilities 2820 * @hw: pointer to the hardware structure 2821 * @dev_caps: pointer to device capabilities structure 2822 * 2823 * Read the device capabilities and extract them into the dev_caps structure 2824 * for later use. 2825 */ 2826 int 2827 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2828 { 2829 u32 cap_count = 0; 2830 void *cbuf; 2831 int status; 2832 2833 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2834 if (!cbuf) 2835 return -ENOMEM; 2836 2837 /* Although the driver doesn't know the number of capabilities the 2838 * device will return, we can simply send a 4KB buffer, the maximum 2839 * possible size that firmware can return. 2840 */ 2841 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2842 2843 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2844 ice_aqc_opc_list_dev_caps, NULL); 2845 if (!status) 2846 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2847 kfree(cbuf); 2848 2849 return status; 2850 } 2851 2852 /** 2853 * ice_discover_func_caps - Read and extract function capabilities 2854 * @hw: pointer to the hardware structure 2855 * @func_caps: pointer to function capabilities structure 2856 * 2857 * Read the function capabilities and extract them into the func_caps structure 2858 * for later use. 2859 */ 2860 static int 2861 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2862 { 2863 u32 cap_count = 0; 2864 void *cbuf; 2865 int status; 2866 2867 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2868 if (!cbuf) 2869 return -ENOMEM; 2870 2871 /* Although the driver doesn't know the number of capabilities the 2872 * device will return, we can simply send a 4KB buffer, the maximum 2873 * possible size that firmware can return. 2874 */ 2875 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2876 2877 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2878 ice_aqc_opc_list_func_caps, NULL); 2879 if (!status) 2880 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2881 kfree(cbuf); 2882 2883 return status; 2884 } 2885 2886 /** 2887 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2888 * @hw: pointer to the hardware structure 2889 */ 2890 void ice_set_safe_mode_caps(struct ice_hw *hw) 2891 { 2892 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2893 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2894 struct ice_hw_common_caps cached_caps; 2895 u32 num_funcs; 2896 2897 /* cache some func_caps values that should be restored after memset */ 2898 cached_caps = func_caps->common_cap; 2899 2900 /* unset func capabilities */ 2901 memset(func_caps, 0, sizeof(*func_caps)); 2902 2903 #define ICE_RESTORE_FUNC_CAP(name) \ 2904 func_caps->common_cap.name = cached_caps.name 2905 2906 /* restore cached values */ 2907 ICE_RESTORE_FUNC_CAP(valid_functions); 2908 ICE_RESTORE_FUNC_CAP(txq_first_id); 2909 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2910 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2911 ICE_RESTORE_FUNC_CAP(max_mtu); 2912 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2913 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2914 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2915 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2916 2917 /* one Tx and one Rx queue in safe mode */ 2918 func_caps->common_cap.num_rxq = 1; 2919 func_caps->common_cap.num_txq = 1; 2920 2921 /* two MSIX vectors, one for traffic and one for misc causes */ 2922 func_caps->common_cap.num_msix_vectors = 2; 2923 func_caps->guar_num_vsi = 1; 2924 2925 /* cache some dev_caps values that should be restored after memset */ 2926 cached_caps = dev_caps->common_cap; 2927 num_funcs = dev_caps->num_funcs; 2928 2929 /* unset dev capabilities */ 2930 memset(dev_caps, 0, sizeof(*dev_caps)); 2931 2932 #define ICE_RESTORE_DEV_CAP(name) \ 2933 dev_caps->common_cap.name = cached_caps.name 2934 2935 /* restore cached values */ 2936 ICE_RESTORE_DEV_CAP(valid_functions); 2937 ICE_RESTORE_DEV_CAP(txq_first_id); 2938 ICE_RESTORE_DEV_CAP(rxq_first_id); 2939 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2940 ICE_RESTORE_DEV_CAP(max_mtu); 2941 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2942 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2943 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2944 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2945 dev_caps->num_funcs = num_funcs; 2946 2947 /* one Tx and one Rx queue per function in safe mode */ 2948 dev_caps->common_cap.num_rxq = num_funcs; 2949 dev_caps->common_cap.num_txq = num_funcs; 2950 2951 /* two MSIX vectors per function */ 2952 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2953 } 2954 2955 /** 2956 * ice_get_caps - get info about the HW 2957 * @hw: pointer to the hardware structure 2958 */ 2959 int ice_get_caps(struct ice_hw *hw) 2960 { 2961 int status; 2962 2963 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2964 if (status) 2965 return status; 2966 2967 return ice_discover_func_caps(hw, &hw->func_caps); 2968 } 2969 2970 /** 2971 * ice_aq_manage_mac_write - manage MAC address write command 2972 * @hw: pointer to the HW struct 2973 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2974 * @flags: flags to control write behavior 2975 * @cd: pointer to command details structure or NULL 2976 * 2977 * This function is used to write MAC address to the NVM (0x0108). 2978 */ 2979 int 2980 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2981 struct ice_sq_cd *cd) 2982 { 2983 struct ice_aqc_manage_mac_write *cmd; 2984 struct ice_aq_desc desc; 2985 2986 cmd = &desc.params.mac_write; 2987 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2988 2989 cmd->flags = flags; 2990 ether_addr_copy(cmd->mac_addr, mac_addr); 2991 2992 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2993 } 2994 2995 /** 2996 * ice_aq_clear_pxe_mode 2997 * @hw: pointer to the HW struct 2998 * 2999 * Tell the firmware that the driver is taking over from PXE (0x0110). 3000 */ 3001 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3002 { 3003 struct ice_aq_desc desc; 3004 3005 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3006 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3007 3008 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3009 } 3010 3011 /** 3012 * ice_clear_pxe_mode - clear pxe operations mode 3013 * @hw: pointer to the HW struct 3014 * 3015 * Make sure all PXE mode settings are cleared, including things 3016 * like descriptor fetch/write-back mode. 3017 */ 3018 void ice_clear_pxe_mode(struct ice_hw *hw) 3019 { 3020 if (ice_check_sq_alive(hw, &hw->adminq)) 3021 ice_aq_clear_pxe_mode(hw); 3022 } 3023 3024 /** 3025 * ice_aq_set_port_params - set physical port parameters. 3026 * @pi: pointer to the port info struct 3027 * @double_vlan: if set double VLAN is enabled 3028 * @cd: pointer to command details structure or NULL 3029 * 3030 * Set Physical port parameters (0x0203) 3031 */ 3032 int 3033 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3034 struct ice_sq_cd *cd) 3035 3036 { 3037 struct ice_aqc_set_port_params *cmd; 3038 struct ice_hw *hw = pi->hw; 3039 struct ice_aq_desc desc; 3040 u16 cmd_flags = 0; 3041 3042 cmd = &desc.params.set_port_params; 3043 3044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3045 if (double_vlan) 3046 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3047 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3048 3049 cmd->local_fwd_mode = pi->local_fwd_mode | 3050 ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; 3051 3052 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3053 } 3054 3055 /** 3056 * ice_is_100m_speed_supported 3057 * @hw: pointer to the HW struct 3058 * 3059 * returns true if 100M speeds are supported by the device, 3060 * false otherwise. 3061 */ 3062 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3063 { 3064 switch (hw->device_id) { 3065 case ICE_DEV_ID_E822C_SGMII: 3066 case ICE_DEV_ID_E822L_SGMII: 3067 case ICE_DEV_ID_E823L_1GBE: 3068 case ICE_DEV_ID_E823C_SGMII: 3069 return true; 3070 default: 3071 return false; 3072 } 3073 } 3074 3075 /** 3076 * ice_get_link_speed_based_on_phy_type - returns link speed 3077 * @phy_type_low: lower part of phy_type 3078 * @phy_type_high: higher part of phy_type 3079 * 3080 * This helper function will convert an entry in PHY type structure 3081 * [phy_type_low, phy_type_high] to its corresponding link speed. 3082 * Note: In the structure of [phy_type_low, phy_type_high], there should 3083 * be one bit set, as this function will convert one PHY type to its 3084 * speed. 3085 * 3086 * Return: 3087 * * PHY speed for recognized PHY type 3088 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3089 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3090 */ 3091 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3092 { 3093 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3094 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3095 3096 switch (phy_type_low) { 3097 case ICE_PHY_TYPE_LOW_100BASE_TX: 3098 case ICE_PHY_TYPE_LOW_100M_SGMII: 3099 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3100 break; 3101 case ICE_PHY_TYPE_LOW_1000BASE_T: 3102 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3103 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3104 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3105 case ICE_PHY_TYPE_LOW_1G_SGMII: 3106 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3107 break; 3108 case ICE_PHY_TYPE_LOW_2500BASE_T: 3109 case ICE_PHY_TYPE_LOW_2500BASE_X: 3110 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3111 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3112 break; 3113 case ICE_PHY_TYPE_LOW_5GBASE_T: 3114 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3115 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3116 break; 3117 case ICE_PHY_TYPE_LOW_10GBASE_T: 3118 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3119 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3120 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3121 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3122 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3123 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3124 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3125 break; 3126 case ICE_PHY_TYPE_LOW_25GBASE_T: 3127 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3128 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3129 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3130 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3131 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3132 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3133 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3134 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3135 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3136 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3137 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3138 break; 3139 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3140 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3141 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3142 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3143 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3144 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3145 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3146 break; 3147 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3148 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3149 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3150 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3151 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3152 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3153 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3154 case ICE_PHY_TYPE_LOW_50G_AUI2: 3155 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3156 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3157 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3158 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3159 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3160 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3161 case ICE_PHY_TYPE_LOW_50G_AUI1: 3162 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3163 break; 3164 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3165 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3166 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3167 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3168 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3169 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3170 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3171 case ICE_PHY_TYPE_LOW_100G_AUI4: 3172 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3173 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3174 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3175 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3176 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3177 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3178 break; 3179 default: 3180 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3181 break; 3182 } 3183 3184 switch (phy_type_high) { 3185 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3186 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3187 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3188 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3189 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3190 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3191 break; 3192 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3193 case ICE_PHY_TYPE_HIGH_200G_SR4: 3194 case ICE_PHY_TYPE_HIGH_200G_FR4: 3195 case ICE_PHY_TYPE_HIGH_200G_LR4: 3196 case ICE_PHY_TYPE_HIGH_200G_DR4: 3197 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3198 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3199 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3200 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3201 break; 3202 default: 3203 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3204 break; 3205 } 3206 3207 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3208 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3209 return ICE_AQ_LINK_SPEED_UNKNOWN; 3210 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3211 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3212 return ICE_AQ_LINK_SPEED_UNKNOWN; 3213 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3214 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3215 return speed_phy_type_low; 3216 else 3217 return speed_phy_type_high; 3218 } 3219 3220 /** 3221 * ice_update_phy_type 3222 * @phy_type_low: pointer to the lower part of phy_type 3223 * @phy_type_high: pointer to the higher part of phy_type 3224 * @link_speeds_bitmap: targeted link speeds bitmap 3225 * 3226 * Note: For the link_speeds_bitmap structure, you can check it at 3227 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3228 * link_speeds_bitmap include multiple speeds. 3229 * 3230 * Each entry in this [phy_type_low, phy_type_high] structure will 3231 * present a certain link speed. This helper function will turn on bits 3232 * in [phy_type_low, phy_type_high] structure based on the value of 3233 * link_speeds_bitmap input parameter. 3234 */ 3235 void 3236 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3237 u16 link_speeds_bitmap) 3238 { 3239 u64 pt_high; 3240 u64 pt_low; 3241 int index; 3242 u16 speed; 3243 3244 /* We first check with low part of phy_type */ 3245 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3246 pt_low = BIT_ULL(index); 3247 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3248 3249 if (link_speeds_bitmap & speed) 3250 *phy_type_low |= BIT_ULL(index); 3251 } 3252 3253 /* We then check with high part of phy_type */ 3254 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3255 pt_high = BIT_ULL(index); 3256 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3257 3258 if (link_speeds_bitmap & speed) 3259 *phy_type_high |= BIT_ULL(index); 3260 } 3261 } 3262 3263 /** 3264 * ice_aq_set_phy_cfg 3265 * @hw: pointer to the HW struct 3266 * @pi: port info structure of the interested logical port 3267 * @cfg: structure with PHY configuration data to be set 3268 * @cd: pointer to command details structure or NULL 3269 * 3270 * Set the various PHY configuration parameters supported on the Port. 3271 * One or more of the Set PHY config parameters may be ignored in an MFP 3272 * mode as the PF may not have the privilege to set some of the PHY Config 3273 * parameters. This status will be indicated by the command response (0x0601). 3274 */ 3275 int 3276 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3277 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3278 { 3279 struct ice_aq_desc desc; 3280 int status; 3281 3282 if (!cfg) 3283 return -EINVAL; 3284 3285 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3286 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3287 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3288 cfg->caps); 3289 3290 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3291 } 3292 3293 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3294 desc.params.set_phy.lport_num = pi->lport; 3295 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3296 3297 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3298 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3299 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3300 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3301 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3302 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3303 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3304 cfg->low_power_ctrl_an); 3305 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3306 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3307 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3308 cfg->link_fec_opt); 3309 3310 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3311 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3312 status = 0; 3313 3314 if (!status) 3315 pi->phy.curr_user_phy_cfg = *cfg; 3316 3317 return status; 3318 } 3319 3320 /** 3321 * ice_update_link_info - update status of the HW network link 3322 * @pi: port info structure of the interested logical port 3323 */ 3324 int ice_update_link_info(struct ice_port_info *pi) 3325 { 3326 struct ice_link_status *li; 3327 int status; 3328 3329 if (!pi) 3330 return -EINVAL; 3331 3332 li = &pi->phy.link_info; 3333 3334 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3335 if (status) 3336 return status; 3337 3338 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3339 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3340 3341 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3342 if (!pcaps) 3343 return -ENOMEM; 3344 3345 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3346 pcaps, NULL); 3347 } 3348 3349 return status; 3350 } 3351 3352 /** 3353 * ice_aq_get_phy_equalization - function to read serdes equaliser 3354 * value from firmware using admin queue command. 3355 * @hw: pointer to the HW struct 3356 * @data_in: represents the serdes equalization parameter requested 3357 * @op_code: represents the serdes number and flag to represent tx or rx 3358 * @serdes_num: represents the serdes number 3359 * @output: pointer to the caller-supplied buffer to return serdes equaliser 3360 * 3361 * Return: non-zero status on error and 0 on success. 3362 */ 3363 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, 3364 u8 serdes_num, int *output) 3365 { 3366 struct ice_aqc_dnl_call_command *cmd; 3367 struct ice_aqc_dnl_call buf = {}; 3368 struct ice_aq_desc desc; 3369 int err; 3370 3371 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); 3372 buf.sto.txrx_equa_reqs.op_code_serdes_sel = 3373 cpu_to_le16(op_code | (serdes_num & 0xF)); 3374 cmd = &desc.params.dnl_call; 3375 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); 3376 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | 3377 ICE_AQ_FLAG_RD | 3378 ICE_AQ_FLAG_SI); 3379 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); 3380 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); 3381 3382 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), 3383 NULL); 3384 *output = err ? 0 : buf.sto.txrx_equa_resp.val; 3385 3386 return err; 3387 } 3388 3389 #define FEC_REG_PORT(port) { \ 3390 FEC_CORR_LOW_REG_PORT##port, \ 3391 FEC_CORR_HIGH_REG_PORT##port, \ 3392 FEC_UNCORR_LOW_REG_PORT##port, \ 3393 FEC_UNCORR_HIGH_REG_PORT##port, \ 3394 } 3395 3396 static const u32 fec_reg[][ICE_FEC_MAX] = { 3397 FEC_REG_PORT(0), 3398 FEC_REG_PORT(1), 3399 FEC_REG_PORT(2), 3400 FEC_REG_PORT(3) 3401 }; 3402 3403 /** 3404 * ice_aq_get_fec_stats - reads fec stats from phy 3405 * @hw: pointer to the HW struct 3406 * @pcs_quad: represents pcsquad of user input serdes 3407 * @pcs_port: represents the pcs port number part of above pcs quad 3408 * @fec_type: represents FEC stats type 3409 * @output: pointer to the caller-supplied buffer to return requested fec stats 3410 * 3411 * Return: non-zero status on error and 0 on success. 3412 */ 3413 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 3414 enum ice_fec_stats_types fec_type, u32 *output) 3415 { 3416 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); 3417 struct ice_sbq_msg_input msg = {}; 3418 u32 receiver_id, reg_offset; 3419 int err; 3420 3421 if (pcs_port > 3) 3422 return -EINVAL; 3423 3424 reg_offset = fec_reg[pcs_port][fec_type]; 3425 3426 if (pcs_quad == 0) 3427 receiver_id = FEC_RECEIVER_ID_PCS0; 3428 else if (pcs_quad == 1) 3429 receiver_id = FEC_RECEIVER_ID_PCS1; 3430 else 3431 return -EINVAL; 3432 3433 msg.msg_addr_low = lower_16_bits(reg_offset); 3434 msg.msg_addr_high = receiver_id; 3435 msg.opcode = ice_sbq_msg_rd; 3436 msg.dest_dev = rmn_0; 3437 3438 err = ice_sbq_rw_reg(hw, &msg, flag); 3439 if (err) 3440 return err; 3441 3442 *output = msg.data; 3443 return 0; 3444 } 3445 3446 /** 3447 * ice_cache_phy_user_req 3448 * @pi: port information structure 3449 * @cache_data: PHY logging data 3450 * @cache_mode: PHY logging mode 3451 * 3452 * Log the user request on (FC, FEC, SPEED) for later use. 3453 */ 3454 static void 3455 ice_cache_phy_user_req(struct ice_port_info *pi, 3456 struct ice_phy_cache_mode_data cache_data, 3457 enum ice_phy_cache_mode cache_mode) 3458 { 3459 if (!pi) 3460 return; 3461 3462 switch (cache_mode) { 3463 case ICE_FC_MODE: 3464 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3465 break; 3466 case ICE_SPEED_MODE: 3467 pi->phy.curr_user_speed_req = 3468 cache_data.data.curr_user_speed_req; 3469 break; 3470 case ICE_FEC_MODE: 3471 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3472 break; 3473 default: 3474 break; 3475 } 3476 } 3477 3478 /** 3479 * ice_caps_to_fc_mode 3480 * @caps: PHY capabilities 3481 * 3482 * Convert PHY FC capabilities to ice FC mode 3483 */ 3484 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3485 { 3486 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3487 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3488 return ICE_FC_FULL; 3489 3490 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3491 return ICE_FC_TX_PAUSE; 3492 3493 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3494 return ICE_FC_RX_PAUSE; 3495 3496 return ICE_FC_NONE; 3497 } 3498 3499 /** 3500 * ice_caps_to_fec_mode 3501 * @caps: PHY capabilities 3502 * @fec_options: Link FEC options 3503 * 3504 * Convert PHY FEC capabilities to ice FEC mode 3505 */ 3506 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3507 { 3508 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3509 return ICE_FEC_AUTO; 3510 3511 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3512 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3513 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3514 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3515 return ICE_FEC_BASER; 3516 3517 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3518 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3519 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3520 return ICE_FEC_RS; 3521 3522 return ICE_FEC_NONE; 3523 } 3524 3525 /** 3526 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3527 * @pi: port information structure 3528 * @cfg: PHY configuration data to set FC mode 3529 * @req_mode: FC mode to configure 3530 */ 3531 int 3532 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3533 enum ice_fc_mode req_mode) 3534 { 3535 struct ice_phy_cache_mode_data cache_data; 3536 u8 pause_mask = 0x0; 3537 3538 if (!pi || !cfg) 3539 return -EINVAL; 3540 3541 switch (req_mode) { 3542 case ICE_FC_FULL: 3543 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3544 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3545 break; 3546 case ICE_FC_RX_PAUSE: 3547 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3548 break; 3549 case ICE_FC_TX_PAUSE: 3550 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3551 break; 3552 default: 3553 break; 3554 } 3555 3556 /* clear the old pause settings */ 3557 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3558 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3559 3560 /* set the new capabilities */ 3561 cfg->caps |= pause_mask; 3562 3563 /* Cache user FC request */ 3564 cache_data.data.curr_user_fc_req = req_mode; 3565 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3566 3567 return 0; 3568 } 3569 3570 /** 3571 * ice_set_fc 3572 * @pi: port information structure 3573 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3574 * @ena_auto_link_update: enable automatic link update 3575 * 3576 * Set the requested flow control mode. 3577 */ 3578 int 3579 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3580 { 3581 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3582 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3583 struct ice_hw *hw; 3584 int status; 3585 3586 if (!pi || !aq_failures) 3587 return -EINVAL; 3588 3589 *aq_failures = 0; 3590 hw = pi->hw; 3591 3592 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3593 if (!pcaps) 3594 return -ENOMEM; 3595 3596 /* Get the current PHY config */ 3597 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3598 pcaps, NULL); 3599 if (status) { 3600 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3601 goto out; 3602 } 3603 3604 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3605 3606 /* Configure the set PHY data */ 3607 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3608 if (status) 3609 goto out; 3610 3611 /* If the capabilities have changed, then set the new config */ 3612 if (cfg.caps != pcaps->caps) { 3613 int retry_count, retry_max = 10; 3614 3615 /* Auto restart link so settings take effect */ 3616 if (ena_auto_link_update) 3617 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3618 3619 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3620 if (status) { 3621 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3622 goto out; 3623 } 3624 3625 /* Update the link info 3626 * It sometimes takes a really long time for link to 3627 * come back from the atomic reset. Thus, we wait a 3628 * little bit. 3629 */ 3630 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3631 status = ice_update_link_info(pi); 3632 3633 if (!status) 3634 break; 3635 3636 mdelay(100); 3637 } 3638 3639 if (status) 3640 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3641 } 3642 3643 out: 3644 return status; 3645 } 3646 3647 /** 3648 * ice_phy_caps_equals_cfg 3649 * @phy_caps: PHY capabilities 3650 * @phy_cfg: PHY configuration 3651 * 3652 * Helper function to determine if PHY capabilities matches PHY 3653 * configuration 3654 */ 3655 bool 3656 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3657 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3658 { 3659 u8 caps_mask, cfg_mask; 3660 3661 if (!phy_caps || !phy_cfg) 3662 return false; 3663 3664 /* These bits are not common between capabilities and configuration. 3665 * Do not use them to determine equality. 3666 */ 3667 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3668 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3669 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3670 3671 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3672 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3673 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3674 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3675 phy_caps->eee_cap != phy_cfg->eee_cap || 3676 phy_caps->eeer_value != phy_cfg->eeer_value || 3677 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3678 return false; 3679 3680 return true; 3681 } 3682 3683 /** 3684 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3685 * @pi: port information structure 3686 * @caps: PHY ability structure to copy date from 3687 * @cfg: PHY configuration structure to copy data to 3688 * 3689 * Helper function to copy AQC PHY get ability data to PHY set configuration 3690 * data structure 3691 */ 3692 void 3693 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3694 struct ice_aqc_get_phy_caps_data *caps, 3695 struct ice_aqc_set_phy_cfg_data *cfg) 3696 { 3697 if (!pi || !caps || !cfg) 3698 return; 3699 3700 memset(cfg, 0, sizeof(*cfg)); 3701 cfg->phy_type_low = caps->phy_type_low; 3702 cfg->phy_type_high = caps->phy_type_high; 3703 cfg->caps = caps->caps; 3704 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3705 cfg->eee_cap = caps->eee_cap; 3706 cfg->eeer_value = caps->eeer_value; 3707 cfg->link_fec_opt = caps->link_fec_options; 3708 cfg->module_compliance_enforcement = 3709 caps->module_compliance_enforcement; 3710 } 3711 3712 /** 3713 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3714 * @pi: port information structure 3715 * @cfg: PHY configuration data to set FEC mode 3716 * @fec: FEC mode to configure 3717 */ 3718 int 3719 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3720 enum ice_fec_mode fec) 3721 { 3722 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3723 struct ice_hw *hw; 3724 int status; 3725 3726 if (!pi || !cfg) 3727 return -EINVAL; 3728 3729 hw = pi->hw; 3730 3731 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3732 if (!pcaps) 3733 return -ENOMEM; 3734 3735 status = ice_aq_get_phy_caps(pi, false, 3736 (ice_fw_supports_report_dflt_cfg(hw) ? 3737 ICE_AQC_REPORT_DFLT_CFG : 3738 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3739 if (status) 3740 goto out; 3741 3742 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3743 cfg->link_fec_opt = pcaps->link_fec_options; 3744 3745 switch (fec) { 3746 case ICE_FEC_BASER: 3747 /* Clear RS bits, and AND BASE-R ability 3748 * bits and OR request bits. 3749 */ 3750 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3751 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3752 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3753 ICE_AQC_PHY_FEC_25G_KR_REQ; 3754 break; 3755 case ICE_FEC_RS: 3756 /* Clear BASE-R bits, and AND RS ability 3757 * bits and OR request bits. 3758 */ 3759 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3760 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3761 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3762 break; 3763 case ICE_FEC_NONE: 3764 /* Clear all FEC option bits. */ 3765 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3766 break; 3767 case ICE_FEC_AUTO: 3768 /* AND auto FEC bit, and all caps bits. */ 3769 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3770 cfg->link_fec_opt |= pcaps->link_fec_options; 3771 break; 3772 default: 3773 status = -EINVAL; 3774 break; 3775 } 3776 3777 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3778 !ice_fw_supports_report_dflt_cfg(hw)) { 3779 struct ice_link_default_override_tlv tlv = { 0 }; 3780 3781 status = ice_get_link_default_override(&tlv, pi); 3782 if (status) 3783 goto out; 3784 3785 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3786 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3787 cfg->link_fec_opt = tlv.fec_options; 3788 } 3789 3790 out: 3791 return status; 3792 } 3793 3794 /** 3795 * ice_get_link_status - get status of the HW network link 3796 * @pi: port information structure 3797 * @link_up: pointer to bool (true/false = linkup/linkdown) 3798 * 3799 * Variable link_up is true if link is up, false if link is down. 3800 * The variable link_up is invalid if status is non zero. As a 3801 * result of this call, link status reporting becomes enabled 3802 */ 3803 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3804 { 3805 struct ice_phy_info *phy_info; 3806 int status = 0; 3807 3808 if (!pi || !link_up) 3809 return -EINVAL; 3810 3811 phy_info = &pi->phy; 3812 3813 if (phy_info->get_link_info) { 3814 status = ice_update_link_info(pi); 3815 3816 if (status) 3817 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3818 status); 3819 } 3820 3821 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3822 3823 return status; 3824 } 3825 3826 /** 3827 * ice_aq_set_link_restart_an 3828 * @pi: pointer to the port information structure 3829 * @ena_link: if true: enable link, if false: disable link 3830 * @cd: pointer to command details structure or NULL 3831 * 3832 * Sets up the link and restarts the Auto-Negotiation over the link. 3833 */ 3834 int 3835 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3836 struct ice_sq_cd *cd) 3837 { 3838 struct ice_aqc_restart_an *cmd; 3839 struct ice_aq_desc desc; 3840 3841 cmd = &desc.params.restart_an; 3842 3843 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3844 3845 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3846 cmd->lport_num = pi->lport; 3847 if (ena_link) 3848 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3849 else 3850 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3851 3852 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3853 } 3854 3855 /** 3856 * ice_aq_set_event_mask 3857 * @hw: pointer to the HW struct 3858 * @port_num: port number of the physical function 3859 * @mask: event mask to be set 3860 * @cd: pointer to command details structure or NULL 3861 * 3862 * Set event mask (0x0613) 3863 */ 3864 int 3865 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3866 struct ice_sq_cd *cd) 3867 { 3868 struct ice_aqc_set_event_mask *cmd; 3869 struct ice_aq_desc desc; 3870 3871 cmd = &desc.params.set_event_mask; 3872 3873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3874 3875 cmd->lport_num = port_num; 3876 3877 cmd->event_mask = cpu_to_le16(mask); 3878 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3879 } 3880 3881 /** 3882 * ice_aq_set_mac_loopback 3883 * @hw: pointer to the HW struct 3884 * @ena_lpbk: Enable or Disable loopback 3885 * @cd: pointer to command details structure or NULL 3886 * 3887 * Enable/disable loopback on a given port 3888 */ 3889 int 3890 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3891 { 3892 struct ice_aqc_set_mac_lb *cmd; 3893 struct ice_aq_desc desc; 3894 3895 cmd = &desc.params.set_mac_lb; 3896 3897 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3898 if (ena_lpbk) 3899 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3900 3901 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3902 } 3903 3904 /** 3905 * ice_aq_set_port_id_led 3906 * @pi: pointer to the port information 3907 * @is_orig_mode: is this LED set to original mode (by the net-list) 3908 * @cd: pointer to command details structure or NULL 3909 * 3910 * Set LED value for the given port (0x06e9) 3911 */ 3912 int 3913 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3914 struct ice_sq_cd *cd) 3915 { 3916 struct ice_aqc_set_port_id_led *cmd; 3917 struct ice_hw *hw = pi->hw; 3918 struct ice_aq_desc desc; 3919 3920 cmd = &desc.params.set_port_id_led; 3921 3922 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3923 3924 if (is_orig_mode) 3925 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3926 else 3927 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3928 3929 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3930 } 3931 3932 /** 3933 * ice_aq_get_port_options 3934 * @hw: pointer to the HW struct 3935 * @options: buffer for the resultant port options 3936 * @option_count: input - size of the buffer in port options structures, 3937 * output - number of returned port options 3938 * @lport: logical port to call the command with (optional) 3939 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3940 * when PF owns more than 1 port it must be true 3941 * @active_option_idx: index of active port option in returned buffer 3942 * @active_option_valid: active option in returned buffer is valid 3943 * @pending_option_idx: index of pending port option in returned buffer 3944 * @pending_option_valid: pending option in returned buffer is valid 3945 * 3946 * Calls Get Port Options AQC (0x06ea) and verifies result. 3947 */ 3948 int 3949 ice_aq_get_port_options(struct ice_hw *hw, 3950 struct ice_aqc_get_port_options_elem *options, 3951 u8 *option_count, u8 lport, bool lport_valid, 3952 u8 *active_option_idx, bool *active_option_valid, 3953 u8 *pending_option_idx, bool *pending_option_valid) 3954 { 3955 struct ice_aqc_get_port_options *cmd; 3956 struct ice_aq_desc desc; 3957 int status; 3958 u8 i; 3959 3960 /* options buffer shall be able to hold max returned options */ 3961 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3962 return -EINVAL; 3963 3964 cmd = &desc.params.get_port_options; 3965 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3966 3967 if (lport_valid) 3968 cmd->lport_num = lport; 3969 cmd->lport_num_valid = lport_valid; 3970 3971 status = ice_aq_send_cmd(hw, &desc, options, 3972 *option_count * sizeof(*options), NULL); 3973 if (status) 3974 return status; 3975 3976 /* verify direct FW response & set output parameters */ 3977 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3978 cmd->port_options_count); 3979 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3980 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3981 cmd->port_options); 3982 if (*active_option_valid) { 3983 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3984 cmd->port_options); 3985 if (*active_option_idx > (*option_count - 1)) 3986 return -EIO; 3987 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3988 *active_option_idx); 3989 } 3990 3991 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3992 cmd->pending_port_option_status); 3993 if (*pending_option_valid) { 3994 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3995 cmd->pending_port_option_status); 3996 if (*pending_option_idx > (*option_count - 1)) 3997 return -EIO; 3998 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3999 *pending_option_idx); 4000 } 4001 4002 /* mask output options fields */ 4003 for (i = 0; i < *option_count; i++) { 4004 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4005 options[i].pmd); 4006 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4007 options[i].max_lane_speed); 4008 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4009 options[i].pmd, options[i].max_lane_speed); 4010 } 4011 4012 return 0; 4013 } 4014 4015 /** 4016 * ice_aq_set_port_option 4017 * @hw: pointer to the HW struct 4018 * @lport: logical port to call the command with 4019 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4020 * when PF owns more than 1 port it must be true 4021 * @new_option: new port option to be written 4022 * 4023 * Calls Set Port Options AQC (0x06eb). 4024 */ 4025 int 4026 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4027 u8 new_option) 4028 { 4029 struct ice_aqc_set_port_option *cmd; 4030 struct ice_aq_desc desc; 4031 4032 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4033 return -EINVAL; 4034 4035 cmd = &desc.params.set_port_option; 4036 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4037 4038 if (lport_valid) 4039 cmd->lport_num = lport; 4040 4041 cmd->lport_num_valid = lport_valid; 4042 cmd->selected_port_option = new_option; 4043 4044 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4045 } 4046 4047 /** 4048 * ice_get_phy_lane_number - Get PHY lane number for current adapter 4049 * @hw: pointer to the hw struct 4050 * 4051 * Return: PHY lane number on success, negative error code otherwise. 4052 */ 4053 int ice_get_phy_lane_number(struct ice_hw *hw) 4054 { 4055 struct ice_aqc_get_port_options_elem *options; 4056 unsigned int lport = 0; 4057 unsigned int lane; 4058 int err; 4059 4060 options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); 4061 if (!options) 4062 return -ENOMEM; 4063 4064 for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) { 4065 u8 options_count = ICE_AQC_PORT_OPT_MAX; 4066 u8 speed, active_idx, pending_idx; 4067 bool active_valid, pending_valid; 4068 4069 err = ice_aq_get_port_options(hw, options, &options_count, lane, 4070 true, &active_idx, &active_valid, 4071 &pending_idx, &pending_valid); 4072 if (err) 4073 goto err; 4074 4075 if (!active_valid) 4076 continue; 4077 4078 speed = options[active_idx].max_lane_speed; 4079 /* If we don't get speed for this lane, it's unoccupied */ 4080 if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G) 4081 continue; 4082 4083 if (hw->pf_id == lport) { 4084 kfree(options); 4085 return lane; 4086 } 4087 4088 lport++; 4089 } 4090 4091 /* PHY lane not found */ 4092 err = -ENXIO; 4093 err: 4094 kfree(options); 4095 return err; 4096 } 4097 4098 /** 4099 * ice_aq_sff_eeprom 4100 * @hw: pointer to the HW struct 4101 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4102 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4103 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4104 * @page: QSFP page 4105 * @set_page: set or ignore the page 4106 * @data: pointer to data buffer to be read/written to the I2C device. 4107 * @length: 1-16 for read, 1 for write. 4108 * @write: 0 read, 1 for write. 4109 * @cd: pointer to command details structure or NULL 4110 * 4111 * Read/Write SFF EEPROM (0x06EE) 4112 */ 4113 int 4114 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4115 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4116 bool write, struct ice_sq_cd *cd) 4117 { 4118 struct ice_aqc_sff_eeprom *cmd; 4119 struct ice_aq_desc desc; 4120 u16 i2c_bus_addr; 4121 int status; 4122 4123 if (!data || (mem_addr & 0xff00)) 4124 return -EINVAL; 4125 4126 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4127 cmd = &desc.params.read_write_sff_param; 4128 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4129 cmd->lport_num = (u8)(lport & 0xff); 4130 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4131 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 4132 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 4133 if (write) 4134 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 4135 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 4136 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4137 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 4138 4139 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4140 return status; 4141 } 4142 4143 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4144 { 4145 switch (type) { 4146 case ICE_LUT_VSI: 4147 return ICE_LUT_VSI_SIZE; 4148 case ICE_LUT_GLOBAL: 4149 return ICE_LUT_GLOBAL_SIZE; 4150 case ICE_LUT_PF: 4151 return ICE_LUT_PF_SIZE; 4152 } 4153 WARN_ONCE(1, "incorrect type passed"); 4154 return ICE_LUT_VSI_SIZE; 4155 } 4156 4157 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4158 { 4159 switch (size) { 4160 case ICE_LUT_VSI_SIZE: 4161 return ICE_AQC_LUT_SIZE_SMALL; 4162 case ICE_LUT_GLOBAL_SIZE: 4163 return ICE_AQC_LUT_SIZE_512; 4164 case ICE_LUT_PF_SIZE: 4165 return ICE_AQC_LUT_SIZE_2K; 4166 } 4167 WARN_ONCE(1, "incorrect size passed"); 4168 return 0; 4169 } 4170 4171 /** 4172 * __ice_aq_get_set_rss_lut 4173 * @hw: pointer to the hardware structure 4174 * @params: RSS LUT parameters 4175 * @set: set true to set the table, false to get the table 4176 * 4177 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4178 */ 4179 static int 4180 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4181 struct ice_aq_get_set_rss_lut_params *params, bool set) 4182 { 4183 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4184 enum ice_lut_type lut_type = params->lut_type; 4185 struct ice_aqc_get_set_rss_lut *desc_params; 4186 enum ice_aqc_lut_flags flags; 4187 enum ice_lut_size lut_size; 4188 struct ice_aq_desc desc; 4189 u8 *lut = params->lut; 4190 4191 4192 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4193 return -EINVAL; 4194 4195 lut_size = ice_lut_type_to_size(lut_type); 4196 if (lut_size > params->lut_size) 4197 return -EINVAL; 4198 else if (set && lut_size != params->lut_size) 4199 return -EINVAL; 4200 4201 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4202 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4203 if (set) 4204 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4205 4206 desc_params = &desc.params.get_set_rss_lut; 4207 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4208 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4209 4210 if (lut_type == ICE_LUT_GLOBAL) 4211 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4212 params->global_lut_id); 4213 4214 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4215 desc_params->flags = cpu_to_le16(flags); 4216 4217 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4218 } 4219 4220 /** 4221 * ice_aq_get_rss_lut 4222 * @hw: pointer to the hardware structure 4223 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4224 * 4225 * get the RSS lookup table, PF or VSI type 4226 */ 4227 int 4228 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4229 { 4230 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4231 } 4232 4233 /** 4234 * ice_aq_set_rss_lut 4235 * @hw: pointer to the hardware structure 4236 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4237 * 4238 * set the RSS lookup table, PF or VSI type 4239 */ 4240 int 4241 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4242 { 4243 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4244 } 4245 4246 /** 4247 * __ice_aq_get_set_rss_key 4248 * @hw: pointer to the HW struct 4249 * @vsi_id: VSI FW index 4250 * @key: pointer to key info struct 4251 * @set: set true to set the key, false to get the key 4252 * 4253 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4254 */ 4255 static int 4256 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4257 struct ice_aqc_get_set_rss_keys *key, bool set) 4258 { 4259 struct ice_aqc_get_set_rss_key *desc_params; 4260 u16 key_size = sizeof(*key); 4261 struct ice_aq_desc desc; 4262 4263 if (set) { 4264 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4265 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4266 } else { 4267 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4268 } 4269 4270 desc_params = &desc.params.get_set_rss_key; 4271 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4272 4273 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4274 } 4275 4276 /** 4277 * ice_aq_get_rss_key 4278 * @hw: pointer to the HW struct 4279 * @vsi_handle: software VSI handle 4280 * @key: pointer to key info struct 4281 * 4282 * get the RSS key per VSI 4283 */ 4284 int 4285 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4286 struct ice_aqc_get_set_rss_keys *key) 4287 { 4288 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4289 return -EINVAL; 4290 4291 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4292 key, false); 4293 } 4294 4295 /** 4296 * ice_aq_set_rss_key 4297 * @hw: pointer to the HW struct 4298 * @vsi_handle: software VSI handle 4299 * @keys: pointer to key info struct 4300 * 4301 * set the RSS key per VSI 4302 */ 4303 int 4304 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4305 struct ice_aqc_get_set_rss_keys *keys) 4306 { 4307 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4308 return -EINVAL; 4309 4310 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4311 keys, true); 4312 } 4313 4314 /** 4315 * ice_aq_add_lan_txq 4316 * @hw: pointer to the hardware structure 4317 * @num_qgrps: Number of added queue groups 4318 * @qg_list: list of queue groups to be added 4319 * @buf_size: size of buffer for indirect command 4320 * @cd: pointer to command details structure or NULL 4321 * 4322 * Add Tx LAN queue (0x0C30) 4323 * 4324 * NOTE: 4325 * Prior to calling add Tx LAN queue: 4326 * Initialize the following as part of the Tx queue context: 4327 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4328 * Cache profile and Packet shaper profile. 4329 * 4330 * After add Tx LAN queue AQ command is completed: 4331 * Interrupts should be associated with specific queues, 4332 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4333 * flow. 4334 */ 4335 static int 4336 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4337 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4338 struct ice_sq_cd *cd) 4339 { 4340 struct ice_aqc_add_tx_qgrp *list; 4341 struct ice_aqc_add_txqs *cmd; 4342 struct ice_aq_desc desc; 4343 u16 i, sum_size = 0; 4344 4345 cmd = &desc.params.add_txqs; 4346 4347 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4348 4349 if (!qg_list) 4350 return -EINVAL; 4351 4352 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4353 return -EINVAL; 4354 4355 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4356 sum_size += struct_size(list, txqs, list->num_txqs); 4357 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4358 list->num_txqs); 4359 } 4360 4361 if (buf_size != sum_size) 4362 return -EINVAL; 4363 4364 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4365 4366 cmd->num_qgrps = num_qgrps; 4367 4368 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4369 } 4370 4371 /** 4372 * ice_aq_dis_lan_txq 4373 * @hw: pointer to the hardware structure 4374 * @num_qgrps: number of groups in the list 4375 * @qg_list: the list of groups to disable 4376 * @buf_size: the total size of the qg_list buffer in bytes 4377 * @rst_src: if called due to reset, specifies the reset source 4378 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4379 * @cd: pointer to command details structure or NULL 4380 * 4381 * Disable LAN Tx queue (0x0C31) 4382 */ 4383 static int 4384 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4385 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4386 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4387 struct ice_sq_cd *cd) 4388 { 4389 struct ice_aqc_dis_txq_item *item; 4390 struct ice_aqc_dis_txqs *cmd; 4391 struct ice_aq_desc desc; 4392 u16 vmvf_and_timeout; 4393 u16 i, sz = 0; 4394 int status; 4395 4396 cmd = &desc.params.dis_txqs; 4397 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4398 4399 /* qg_list can be NULL only in VM/VF reset flow */ 4400 if (!qg_list && !rst_src) 4401 return -EINVAL; 4402 4403 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4404 return -EINVAL; 4405 4406 cmd->num_entries = num_qgrps; 4407 4408 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4409 4410 switch (rst_src) { 4411 case ICE_VM_RESET: 4412 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4413 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4414 break; 4415 case ICE_VF_RESET: 4416 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4417 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4418 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4419 ICE_AQC_Q_DIS_VMVF_NUM_M; 4420 break; 4421 case ICE_NO_RESET: 4422 default: 4423 break; 4424 } 4425 4426 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4427 4428 /* flush pipe on time out */ 4429 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4430 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4431 if (!qg_list) 4432 goto do_aq; 4433 4434 /* set RD bit to indicate that command buffer is provided by the driver 4435 * and it needs to be read by the firmware 4436 */ 4437 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4438 4439 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4440 u16 item_size = struct_size(item, q_id, item->num_qs); 4441 4442 /* If the num of queues is even, add 2 bytes of padding */ 4443 if ((item->num_qs % 2) == 0) 4444 item_size += 2; 4445 4446 sz += item_size; 4447 4448 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4449 } 4450 4451 if (buf_size != sz) 4452 return -EINVAL; 4453 4454 do_aq: 4455 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4456 if (status) { 4457 if (!qg_list) 4458 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4459 vmvf_num, hw->adminq.sq_last_status); 4460 else 4461 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4462 le16_to_cpu(qg_list[0].q_id[0]), 4463 hw->adminq.sq_last_status); 4464 } 4465 return status; 4466 } 4467 4468 /** 4469 * ice_aq_cfg_lan_txq 4470 * @hw: pointer to the hardware structure 4471 * @buf: buffer for command 4472 * @buf_size: size of buffer in bytes 4473 * @num_qs: number of queues being configured 4474 * @oldport: origination lport 4475 * @newport: destination lport 4476 * @cd: pointer to command details structure or NULL 4477 * 4478 * Move/Configure LAN Tx queue (0x0C32) 4479 * 4480 * There is a better AQ command to use for moving nodes, so only coding 4481 * this one for configuring the node. 4482 */ 4483 int 4484 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4485 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4486 struct ice_sq_cd *cd) 4487 { 4488 struct ice_aqc_cfg_txqs *cmd; 4489 struct ice_aq_desc desc; 4490 int status; 4491 4492 cmd = &desc.params.cfg_txqs; 4493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4494 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4495 4496 if (!buf) 4497 return -EINVAL; 4498 4499 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4500 cmd->num_qs = num_qs; 4501 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4502 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4503 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4504 cmd->blocked_cgds = 0; 4505 4506 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4507 if (status) 4508 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4509 hw->adminq.sq_last_status); 4510 return status; 4511 } 4512 4513 /** 4514 * ice_aq_add_rdma_qsets 4515 * @hw: pointer to the hardware structure 4516 * @num_qset_grps: Number of RDMA Qset groups 4517 * @qset_list: list of Qset groups to be added 4518 * @buf_size: size of buffer for indirect command 4519 * @cd: pointer to command details structure or NULL 4520 * 4521 * Add Tx RDMA Qsets (0x0C33) 4522 */ 4523 static int 4524 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4525 struct ice_aqc_add_rdma_qset_data *qset_list, 4526 u16 buf_size, struct ice_sq_cd *cd) 4527 { 4528 struct ice_aqc_add_rdma_qset_data *list; 4529 struct ice_aqc_add_rdma_qset *cmd; 4530 struct ice_aq_desc desc; 4531 u16 i, sum_size = 0; 4532 4533 cmd = &desc.params.add_rdma_qset; 4534 4535 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4536 4537 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4538 return -EINVAL; 4539 4540 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4541 u16 num_qsets = le16_to_cpu(list->num_qsets); 4542 4543 sum_size += struct_size(list, rdma_qsets, num_qsets); 4544 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4545 num_qsets); 4546 } 4547 4548 if (buf_size != sum_size) 4549 return -EINVAL; 4550 4551 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4552 4553 cmd->num_qset_grps = num_qset_grps; 4554 4555 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4556 } 4557 4558 /* End of FW Admin Queue command wrappers */ 4559 4560 /** 4561 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4562 * @hw: pointer to the HW struct 4563 * @vsi_handle: software VSI handle 4564 * @tc: TC number 4565 * @q_handle: software queue handle 4566 */ 4567 struct ice_q_ctx * 4568 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4569 { 4570 struct ice_vsi_ctx *vsi; 4571 struct ice_q_ctx *q_ctx; 4572 4573 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4574 if (!vsi) 4575 return NULL; 4576 if (q_handle >= vsi->num_lan_q_entries[tc]) 4577 return NULL; 4578 if (!vsi->lan_q_ctx[tc]) 4579 return NULL; 4580 q_ctx = vsi->lan_q_ctx[tc]; 4581 return &q_ctx[q_handle]; 4582 } 4583 4584 /** 4585 * ice_ena_vsi_txq 4586 * @pi: port information structure 4587 * @vsi_handle: software VSI handle 4588 * @tc: TC number 4589 * @q_handle: software queue handle 4590 * @num_qgrps: Number of added queue groups 4591 * @buf: list of queue groups to be added 4592 * @buf_size: size of buffer for indirect command 4593 * @cd: pointer to command details structure or NULL 4594 * 4595 * This function adds one LAN queue 4596 */ 4597 int 4598 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4599 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4600 struct ice_sq_cd *cd) 4601 { 4602 struct ice_aqc_txsched_elem_data node = { 0 }; 4603 struct ice_sched_node *parent; 4604 struct ice_q_ctx *q_ctx; 4605 struct ice_hw *hw; 4606 int status; 4607 4608 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4609 return -EIO; 4610 4611 if (num_qgrps > 1 || buf->num_txqs > 1) 4612 return -ENOSPC; 4613 4614 hw = pi->hw; 4615 4616 if (!ice_is_vsi_valid(hw, vsi_handle)) 4617 return -EINVAL; 4618 4619 mutex_lock(&pi->sched_lock); 4620 4621 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4622 if (!q_ctx) { 4623 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4624 q_handle); 4625 status = -EINVAL; 4626 goto ena_txq_exit; 4627 } 4628 4629 /* find a parent node */ 4630 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4631 ICE_SCHED_NODE_OWNER_LAN); 4632 if (!parent) { 4633 status = -EINVAL; 4634 goto ena_txq_exit; 4635 } 4636 4637 buf->parent_teid = parent->info.node_teid; 4638 node.parent_teid = parent->info.node_teid; 4639 /* Mark that the values in the "generic" section as valid. The default 4640 * value in the "generic" section is zero. This means that : 4641 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4642 * - 0 priority among siblings, indicated by Bit 1-3. 4643 * - WFQ, indicated by Bit 4. 4644 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4645 * Bit 5-6. 4646 * - Bit 7 is reserved. 4647 * Without setting the generic section as valid in valid_sections, the 4648 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4649 */ 4650 buf->txqs[0].info.valid_sections = 4651 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4652 ICE_AQC_ELEM_VALID_EIR; 4653 buf->txqs[0].info.generic = 0; 4654 buf->txqs[0].info.cir_bw.bw_profile_idx = 4655 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4656 buf->txqs[0].info.cir_bw.bw_alloc = 4657 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4658 buf->txqs[0].info.eir_bw.bw_profile_idx = 4659 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4660 buf->txqs[0].info.eir_bw.bw_alloc = 4661 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4662 4663 /* add the LAN queue */ 4664 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4665 if (status) { 4666 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4667 le16_to_cpu(buf->txqs[0].txq_id), 4668 hw->adminq.sq_last_status); 4669 goto ena_txq_exit; 4670 } 4671 4672 node.node_teid = buf->txqs[0].q_teid; 4673 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4674 q_ctx->q_handle = q_handle; 4675 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4676 4677 /* add a leaf node into scheduler tree queue layer */ 4678 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4679 if (!status) 4680 status = ice_sched_replay_q_bw(pi, q_ctx); 4681 4682 ena_txq_exit: 4683 mutex_unlock(&pi->sched_lock); 4684 return status; 4685 } 4686 4687 /** 4688 * ice_dis_vsi_txq 4689 * @pi: port information structure 4690 * @vsi_handle: software VSI handle 4691 * @tc: TC number 4692 * @num_queues: number of queues 4693 * @q_handles: pointer to software queue handle array 4694 * @q_ids: pointer to the q_id array 4695 * @q_teids: pointer to queue node teids 4696 * @rst_src: if called due to reset, specifies the reset source 4697 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4698 * @cd: pointer to command details structure or NULL 4699 * 4700 * This function removes queues and their corresponding nodes in SW DB 4701 */ 4702 int 4703 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4704 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4705 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4706 struct ice_sq_cd *cd) 4707 { 4708 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4709 u16 i, buf_size = __struct_size(qg_list); 4710 struct ice_q_ctx *q_ctx; 4711 int status = -ENOENT; 4712 struct ice_hw *hw; 4713 4714 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4715 return -EIO; 4716 4717 hw = pi->hw; 4718 4719 if (!num_queues) { 4720 /* if queue is disabled already yet the disable queue command 4721 * has to be sent to complete the VF reset, then call 4722 * ice_aq_dis_lan_txq without any queue information 4723 */ 4724 if (rst_src) 4725 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4726 vmvf_num, NULL); 4727 return -EIO; 4728 } 4729 4730 mutex_lock(&pi->sched_lock); 4731 4732 for (i = 0; i < num_queues; i++) { 4733 struct ice_sched_node *node; 4734 4735 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4736 if (!node) 4737 continue; 4738 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4739 if (!q_ctx) { 4740 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4741 q_handles[i]); 4742 continue; 4743 } 4744 if (q_ctx->q_handle != q_handles[i]) { 4745 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4746 q_ctx->q_handle, q_handles[i]); 4747 continue; 4748 } 4749 qg_list->parent_teid = node->info.parent_teid; 4750 qg_list->num_qs = 1; 4751 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4752 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4753 vmvf_num, cd); 4754 4755 if (status) 4756 break; 4757 ice_free_sched_node(pi, node); 4758 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4759 q_ctx->q_teid = ICE_INVAL_TEID; 4760 } 4761 mutex_unlock(&pi->sched_lock); 4762 return status; 4763 } 4764 4765 /** 4766 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4767 * @pi: port information structure 4768 * @vsi_handle: software VSI handle 4769 * @tc_bitmap: TC bitmap 4770 * @maxqs: max queues array per TC 4771 * @owner: LAN or RDMA 4772 * 4773 * This function adds/updates the VSI queues per TC. 4774 */ 4775 static int 4776 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4777 u16 *maxqs, u8 owner) 4778 { 4779 int status = 0; 4780 u8 i; 4781 4782 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4783 return -EIO; 4784 4785 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4786 return -EINVAL; 4787 4788 mutex_lock(&pi->sched_lock); 4789 4790 ice_for_each_traffic_class(i) { 4791 /* configuration is possible only if TC node is present */ 4792 if (!ice_sched_get_tc_node(pi, i)) 4793 continue; 4794 4795 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4796 ice_is_tc_ena(tc_bitmap, i)); 4797 if (status) 4798 break; 4799 } 4800 4801 mutex_unlock(&pi->sched_lock); 4802 return status; 4803 } 4804 4805 /** 4806 * ice_cfg_vsi_lan - configure VSI LAN queues 4807 * @pi: port information structure 4808 * @vsi_handle: software VSI handle 4809 * @tc_bitmap: TC bitmap 4810 * @max_lanqs: max LAN queues array per TC 4811 * 4812 * This function adds/updates the VSI LAN queues per TC. 4813 */ 4814 int 4815 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4816 u16 *max_lanqs) 4817 { 4818 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4819 ICE_SCHED_NODE_OWNER_LAN); 4820 } 4821 4822 /** 4823 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4824 * @pi: port information structure 4825 * @vsi_handle: software VSI handle 4826 * @tc_bitmap: TC bitmap 4827 * @max_rdmaqs: max RDMA queues array per TC 4828 * 4829 * This function adds/updates the VSI RDMA queues per TC. 4830 */ 4831 int 4832 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4833 u16 *max_rdmaqs) 4834 { 4835 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4836 ICE_SCHED_NODE_OWNER_RDMA); 4837 } 4838 4839 /** 4840 * ice_ena_vsi_rdma_qset 4841 * @pi: port information structure 4842 * @vsi_handle: software VSI handle 4843 * @tc: TC number 4844 * @rdma_qset: pointer to RDMA Qset 4845 * @num_qsets: number of RDMA Qsets 4846 * @qset_teid: pointer to Qset node TEIDs 4847 * 4848 * This function adds RDMA Qset 4849 */ 4850 int 4851 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4852 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4853 { 4854 struct ice_aqc_txsched_elem_data node = { 0 }; 4855 struct ice_aqc_add_rdma_qset_data *buf; 4856 struct ice_sched_node *parent; 4857 struct ice_hw *hw; 4858 u16 i, buf_size; 4859 int ret; 4860 4861 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4862 return -EIO; 4863 hw = pi->hw; 4864 4865 if (!ice_is_vsi_valid(hw, vsi_handle)) 4866 return -EINVAL; 4867 4868 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4869 buf = kzalloc(buf_size, GFP_KERNEL); 4870 if (!buf) 4871 return -ENOMEM; 4872 mutex_lock(&pi->sched_lock); 4873 4874 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4875 ICE_SCHED_NODE_OWNER_RDMA); 4876 if (!parent) { 4877 ret = -EINVAL; 4878 goto rdma_error_exit; 4879 } 4880 buf->parent_teid = parent->info.node_teid; 4881 node.parent_teid = parent->info.node_teid; 4882 4883 buf->num_qsets = cpu_to_le16(num_qsets); 4884 for (i = 0; i < num_qsets; i++) { 4885 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4886 buf->rdma_qsets[i].info.valid_sections = 4887 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4888 ICE_AQC_ELEM_VALID_EIR; 4889 buf->rdma_qsets[i].info.generic = 0; 4890 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4891 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4892 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4893 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4894 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4895 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4896 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4897 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4898 } 4899 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4900 if (ret) { 4901 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4902 goto rdma_error_exit; 4903 } 4904 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4905 for (i = 0; i < num_qsets; i++) { 4906 node.node_teid = buf->rdma_qsets[i].qset_teid; 4907 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4908 &node, NULL); 4909 if (ret) 4910 break; 4911 qset_teid[i] = le32_to_cpu(node.node_teid); 4912 } 4913 rdma_error_exit: 4914 mutex_unlock(&pi->sched_lock); 4915 kfree(buf); 4916 return ret; 4917 } 4918 4919 /** 4920 * ice_dis_vsi_rdma_qset - free RDMA resources 4921 * @pi: port_info struct 4922 * @count: number of RDMA Qsets to free 4923 * @qset_teid: TEID of Qset node 4924 * @q_id: list of queue IDs being disabled 4925 */ 4926 int 4927 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4928 u16 *q_id) 4929 { 4930 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4931 u16 qg_size = __struct_size(qg_list); 4932 struct ice_hw *hw; 4933 int status = 0; 4934 int i; 4935 4936 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4937 return -EIO; 4938 4939 hw = pi->hw; 4940 4941 mutex_lock(&pi->sched_lock); 4942 4943 for (i = 0; i < count; i++) { 4944 struct ice_sched_node *node; 4945 4946 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4947 if (!node) 4948 continue; 4949 4950 qg_list->parent_teid = node->info.parent_teid; 4951 qg_list->num_qs = 1; 4952 qg_list->q_id[0] = 4953 cpu_to_le16(q_id[i] | 4954 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4955 4956 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4957 ICE_NO_RESET, 0, NULL); 4958 if (status) 4959 break; 4960 4961 ice_free_sched_node(pi, node); 4962 } 4963 4964 mutex_unlock(&pi->sched_lock); 4965 return status; 4966 } 4967 4968 /** 4969 * ice_aq_get_cgu_abilities - get cgu abilities 4970 * @hw: pointer to the HW struct 4971 * @abilities: CGU abilities 4972 * 4973 * Get CGU abilities (0x0C61) 4974 * Return: 0 on success or negative value on failure. 4975 */ 4976 int 4977 ice_aq_get_cgu_abilities(struct ice_hw *hw, 4978 struct ice_aqc_get_cgu_abilities *abilities) 4979 { 4980 struct ice_aq_desc desc; 4981 4982 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 4983 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 4984 } 4985 4986 /** 4987 * ice_aq_set_input_pin_cfg - set input pin config 4988 * @hw: pointer to the HW struct 4989 * @input_idx: Input index 4990 * @flags1: Input flags 4991 * @flags2: Input flags 4992 * @freq: Frequency in Hz 4993 * @phase_delay: Delay in ps 4994 * 4995 * Set CGU input config (0x0C62) 4996 * Return: 0 on success or negative value on failure. 4997 */ 4998 int 4999 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5000 u32 freq, s32 phase_delay) 5001 { 5002 struct ice_aqc_set_cgu_input_config *cmd; 5003 struct ice_aq_desc desc; 5004 5005 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5006 cmd = &desc.params.set_cgu_input_config; 5007 cmd->input_idx = input_idx; 5008 cmd->flags1 = flags1; 5009 cmd->flags2 = flags2; 5010 cmd->freq = cpu_to_le32(freq); 5011 cmd->phase_delay = cpu_to_le32(phase_delay); 5012 5013 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5014 } 5015 5016 /** 5017 * ice_aq_get_input_pin_cfg - get input pin config 5018 * @hw: pointer to the HW struct 5019 * @input_idx: Input index 5020 * @status: Pin status 5021 * @type: Pin type 5022 * @flags1: Input flags 5023 * @flags2: Input flags 5024 * @freq: Frequency in Hz 5025 * @phase_delay: Delay in ps 5026 * 5027 * Get CGU input config (0x0C63) 5028 * Return: 0 on success or negative value on failure. 5029 */ 5030 int 5031 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5032 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5033 { 5034 struct ice_aqc_get_cgu_input_config *cmd; 5035 struct ice_aq_desc desc; 5036 int ret; 5037 5038 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5039 cmd = &desc.params.get_cgu_input_config; 5040 cmd->input_idx = input_idx; 5041 5042 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5043 if (!ret) { 5044 if (status) 5045 *status = cmd->status; 5046 if (type) 5047 *type = cmd->type; 5048 if (flags1) 5049 *flags1 = cmd->flags1; 5050 if (flags2) 5051 *flags2 = cmd->flags2; 5052 if (freq) 5053 *freq = le32_to_cpu(cmd->freq); 5054 if (phase_delay) 5055 *phase_delay = le32_to_cpu(cmd->phase_delay); 5056 } 5057 5058 return ret; 5059 } 5060 5061 /** 5062 * ice_aq_set_output_pin_cfg - set output pin config 5063 * @hw: pointer to the HW struct 5064 * @output_idx: Output index 5065 * @flags: Output flags 5066 * @src_sel: Index of DPLL block 5067 * @freq: Output frequency 5068 * @phase_delay: Output phase compensation 5069 * 5070 * Set CGU output config (0x0C64) 5071 * Return: 0 on success or negative value on failure. 5072 */ 5073 int 5074 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5075 u8 src_sel, u32 freq, s32 phase_delay) 5076 { 5077 struct ice_aqc_set_cgu_output_config *cmd; 5078 struct ice_aq_desc desc; 5079 5080 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5081 cmd = &desc.params.set_cgu_output_config; 5082 cmd->output_idx = output_idx; 5083 cmd->flags = flags; 5084 cmd->src_sel = src_sel; 5085 cmd->freq = cpu_to_le32(freq); 5086 cmd->phase_delay = cpu_to_le32(phase_delay); 5087 5088 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5089 } 5090 5091 /** 5092 * ice_aq_get_output_pin_cfg - get output pin config 5093 * @hw: pointer to the HW struct 5094 * @output_idx: Output index 5095 * @flags: Output flags 5096 * @src_sel: Internal DPLL source 5097 * @freq: Output frequency 5098 * @src_freq: Source frequency 5099 * 5100 * Get CGU output config (0x0C65) 5101 * Return: 0 on success or negative value on failure. 5102 */ 5103 int 5104 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5105 u8 *src_sel, u32 *freq, u32 *src_freq) 5106 { 5107 struct ice_aqc_get_cgu_output_config *cmd; 5108 struct ice_aq_desc desc; 5109 int ret; 5110 5111 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5112 cmd = &desc.params.get_cgu_output_config; 5113 cmd->output_idx = output_idx; 5114 5115 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5116 if (!ret) { 5117 if (flags) 5118 *flags = cmd->flags; 5119 if (src_sel) 5120 *src_sel = cmd->src_sel; 5121 if (freq) 5122 *freq = le32_to_cpu(cmd->freq); 5123 if (src_freq) 5124 *src_freq = le32_to_cpu(cmd->src_freq); 5125 } 5126 5127 return ret; 5128 } 5129 5130 /** 5131 * ice_aq_get_cgu_dpll_status - get dpll status 5132 * @hw: pointer to the HW struct 5133 * @dpll_num: DPLL index 5134 * @ref_state: Reference clock state 5135 * @config: current DPLL config 5136 * @dpll_state: current DPLL state 5137 * @phase_offset: Phase offset in ns 5138 * @eec_mode: EEC_mode 5139 * 5140 * Get CGU DPLL status (0x0C66) 5141 * Return: 0 on success or negative value on failure. 5142 */ 5143 int 5144 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5145 u8 *dpll_state, u8 *config, s64 *phase_offset, 5146 u8 *eec_mode) 5147 { 5148 struct ice_aqc_get_cgu_dpll_status *cmd; 5149 struct ice_aq_desc desc; 5150 int status; 5151 5152 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5153 cmd = &desc.params.get_cgu_dpll_status; 5154 cmd->dpll_num = dpll_num; 5155 5156 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5157 if (!status) { 5158 *ref_state = cmd->ref_state; 5159 *dpll_state = cmd->dpll_state; 5160 *config = cmd->config; 5161 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5162 *phase_offset <<= 32; 5163 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5164 *phase_offset = sign_extend64(*phase_offset, 47); 5165 *eec_mode = cmd->eec_mode; 5166 } 5167 5168 return status; 5169 } 5170 5171 /** 5172 * ice_aq_set_cgu_dpll_config - set dpll config 5173 * @hw: pointer to the HW struct 5174 * @dpll_num: DPLL index 5175 * @ref_state: Reference clock state 5176 * @config: DPLL config 5177 * @eec_mode: EEC mode 5178 * 5179 * Set CGU DPLL config (0x0C67) 5180 * Return: 0 on success or negative value on failure. 5181 */ 5182 int 5183 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5184 u8 config, u8 eec_mode) 5185 { 5186 struct ice_aqc_set_cgu_dpll_config *cmd; 5187 struct ice_aq_desc desc; 5188 5189 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5190 cmd = &desc.params.set_cgu_dpll_config; 5191 cmd->dpll_num = dpll_num; 5192 cmd->ref_state = ref_state; 5193 cmd->config = config; 5194 cmd->eec_mode = eec_mode; 5195 5196 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5197 } 5198 5199 /** 5200 * ice_aq_set_cgu_ref_prio - set input reference priority 5201 * @hw: pointer to the HW struct 5202 * @dpll_num: DPLL index 5203 * @ref_idx: Reference pin index 5204 * @ref_priority: Reference input priority 5205 * 5206 * Set CGU reference priority (0x0C68) 5207 * Return: 0 on success or negative value on failure. 5208 */ 5209 int 5210 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5211 u8 ref_priority) 5212 { 5213 struct ice_aqc_set_cgu_ref_prio *cmd; 5214 struct ice_aq_desc desc; 5215 5216 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5217 cmd = &desc.params.set_cgu_ref_prio; 5218 cmd->dpll_num = dpll_num; 5219 cmd->ref_idx = ref_idx; 5220 cmd->ref_priority = ref_priority; 5221 5222 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5223 } 5224 5225 /** 5226 * ice_aq_get_cgu_ref_prio - get input reference priority 5227 * @hw: pointer to the HW struct 5228 * @dpll_num: DPLL index 5229 * @ref_idx: Reference pin index 5230 * @ref_prio: Reference input priority 5231 * 5232 * Get CGU reference priority (0x0C69) 5233 * Return: 0 on success or negative value on failure. 5234 */ 5235 int 5236 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5237 u8 *ref_prio) 5238 { 5239 struct ice_aqc_get_cgu_ref_prio *cmd; 5240 struct ice_aq_desc desc; 5241 int status; 5242 5243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5244 cmd = &desc.params.get_cgu_ref_prio; 5245 cmd->dpll_num = dpll_num; 5246 cmd->ref_idx = ref_idx; 5247 5248 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5249 if (!status) 5250 *ref_prio = cmd->ref_priority; 5251 5252 return status; 5253 } 5254 5255 /** 5256 * ice_aq_get_cgu_info - get cgu info 5257 * @hw: pointer to the HW struct 5258 * @cgu_id: CGU ID 5259 * @cgu_cfg_ver: CGU config version 5260 * @cgu_fw_ver: CGU firmware version 5261 * 5262 * Get CGU info (0x0C6A) 5263 * Return: 0 on success or negative value on failure. 5264 */ 5265 int 5266 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5267 u32 *cgu_fw_ver) 5268 { 5269 struct ice_aqc_get_cgu_info *cmd; 5270 struct ice_aq_desc desc; 5271 int status; 5272 5273 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5274 cmd = &desc.params.get_cgu_info; 5275 5276 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5277 if (!status) { 5278 *cgu_id = le32_to_cpu(cmd->cgu_id); 5279 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5280 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5281 } 5282 5283 return status; 5284 } 5285 5286 /** 5287 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5288 * @hw: pointer to the HW struct 5289 * @phy_output: PHY reference clock output pin 5290 * @enable: GPIO state to be applied 5291 * @freq: PHY output frequency 5292 * 5293 * Set phy recovered clock as reference (0x0630) 5294 * Return: 0 on success or negative value on failure. 5295 */ 5296 int 5297 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5298 u32 *freq) 5299 { 5300 struct ice_aqc_set_phy_rec_clk_out *cmd; 5301 struct ice_aq_desc desc; 5302 int status; 5303 5304 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5305 cmd = &desc.params.set_phy_rec_clk_out; 5306 cmd->phy_output = phy_output; 5307 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5308 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5309 cmd->freq = cpu_to_le32(*freq); 5310 5311 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5312 if (!status) 5313 *freq = le32_to_cpu(cmd->freq); 5314 5315 return status; 5316 } 5317 5318 /** 5319 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5320 * @hw: pointer to the HW struct 5321 * @phy_output: PHY reference clock output pin 5322 * @port_num: Port number 5323 * @flags: PHY flags 5324 * @node_handle: PHY output frequency 5325 * 5326 * Get PHY recovered clock output info (0x0631) 5327 * Return: 0 on success or negative value on failure. 5328 */ 5329 int 5330 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5331 u8 *flags, u16 *node_handle) 5332 { 5333 struct ice_aqc_get_phy_rec_clk_out *cmd; 5334 struct ice_aq_desc desc; 5335 int status; 5336 5337 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5338 cmd = &desc.params.get_phy_rec_clk_out; 5339 cmd->phy_output = *phy_output; 5340 5341 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5342 if (!status) { 5343 *phy_output = cmd->phy_output; 5344 if (port_num) 5345 *port_num = cmd->port_num; 5346 if (flags) 5347 *flags = cmd->flags; 5348 if (node_handle) 5349 *node_handle = le16_to_cpu(cmd->node_handle); 5350 } 5351 5352 return status; 5353 } 5354 5355 /** 5356 * ice_aq_get_sensor_reading 5357 * @hw: pointer to the HW struct 5358 * @data: pointer to data to be read from the sensor 5359 * 5360 * Get sensor reading (0x0632) 5361 */ 5362 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5363 struct ice_aqc_get_sensor_reading_resp *data) 5364 { 5365 struct ice_aqc_get_sensor_reading *cmd; 5366 struct ice_aq_desc desc; 5367 int status; 5368 5369 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5370 cmd = &desc.params.get_sensor_reading; 5371 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5372 #define ICE_INTERNAL_TEMP_SENSOR 0 5373 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5374 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5375 5376 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5377 if (!status) 5378 memcpy(data, &desc.params.get_sensor_reading_resp, 5379 sizeof(*data)); 5380 5381 return status; 5382 } 5383 5384 /** 5385 * ice_replay_pre_init - replay pre initialization 5386 * @hw: pointer to the HW struct 5387 * 5388 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5389 */ 5390 static int ice_replay_pre_init(struct ice_hw *hw) 5391 { 5392 struct ice_switch_info *sw = hw->switch_info; 5393 u8 i; 5394 5395 /* Delete old entries from replay filter list head if there is any */ 5396 ice_rm_all_sw_replay_rule_info(hw); 5397 /* In start of replay, move entries into replay_rules list, it 5398 * will allow adding rules entries back to filt_rules list, 5399 * which is operational list. 5400 */ 5401 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5402 list_replace_init(&sw->recp_list[i].filt_rules, 5403 &sw->recp_list[i].filt_replay_rules); 5404 ice_sched_replay_agg_vsi_preinit(hw); 5405 5406 return 0; 5407 } 5408 5409 /** 5410 * ice_replay_vsi - replay VSI configuration 5411 * @hw: pointer to the HW struct 5412 * @vsi_handle: driver VSI handle 5413 * 5414 * Restore all VSI configuration after reset. It is required to call this 5415 * function with main VSI first. 5416 */ 5417 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5418 { 5419 int status; 5420 5421 if (!ice_is_vsi_valid(hw, vsi_handle)) 5422 return -EINVAL; 5423 5424 /* Replay pre-initialization if there is any */ 5425 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5426 status = ice_replay_pre_init(hw); 5427 if (status) 5428 return status; 5429 } 5430 /* Replay per VSI all RSS configurations */ 5431 status = ice_replay_rss_cfg(hw, vsi_handle); 5432 if (status) 5433 return status; 5434 /* Replay per VSI all filters */ 5435 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5436 if (!status) 5437 status = ice_replay_vsi_agg(hw, vsi_handle); 5438 return status; 5439 } 5440 5441 /** 5442 * ice_replay_post - post replay configuration cleanup 5443 * @hw: pointer to the HW struct 5444 * 5445 * Post replay cleanup. 5446 */ 5447 void ice_replay_post(struct ice_hw *hw) 5448 { 5449 /* Delete old entries from replay filter list head */ 5450 ice_rm_all_sw_replay_rule_info(hw); 5451 ice_sched_replay_agg(hw); 5452 } 5453 5454 /** 5455 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5456 * @hw: ptr to the hardware info 5457 * @reg: offset of 64 bit HW register to read from 5458 * @prev_stat_loaded: bool to specify if previous stats are loaded 5459 * @prev_stat: ptr to previous loaded stat value 5460 * @cur_stat: ptr to current stat value 5461 */ 5462 void 5463 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5464 u64 *prev_stat, u64 *cur_stat) 5465 { 5466 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5467 5468 /* device stats are not reset at PFR, they likely will not be zeroed 5469 * when the driver starts. Thus, save the value from the first read 5470 * without adding to the statistic value so that we report stats which 5471 * count up from zero. 5472 */ 5473 if (!prev_stat_loaded) { 5474 *prev_stat = new_data; 5475 return; 5476 } 5477 5478 /* Calculate the difference between the new and old values, and then 5479 * add it to the software stat value. 5480 */ 5481 if (new_data >= *prev_stat) 5482 *cur_stat += new_data - *prev_stat; 5483 else 5484 /* to manage the potential roll-over */ 5485 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5486 5487 /* Update the previously stored value to prepare for next read */ 5488 *prev_stat = new_data; 5489 } 5490 5491 /** 5492 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5493 * @hw: ptr to the hardware info 5494 * @reg: offset of HW register to read from 5495 * @prev_stat_loaded: bool to specify if previous stats are loaded 5496 * @prev_stat: ptr to previous loaded stat value 5497 * @cur_stat: ptr to current stat value 5498 */ 5499 void 5500 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5501 u64 *prev_stat, u64 *cur_stat) 5502 { 5503 u32 new_data; 5504 5505 new_data = rd32(hw, reg); 5506 5507 /* device stats are not reset at PFR, they likely will not be zeroed 5508 * when the driver starts. Thus, save the value from the first read 5509 * without adding to the statistic value so that we report stats which 5510 * count up from zero. 5511 */ 5512 if (!prev_stat_loaded) { 5513 *prev_stat = new_data; 5514 return; 5515 } 5516 5517 /* Calculate the difference between the new and old values, and then 5518 * add it to the software stat value. 5519 */ 5520 if (new_data >= *prev_stat) 5521 *cur_stat += new_data - *prev_stat; 5522 else 5523 /* to manage the potential roll-over */ 5524 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5525 5526 /* Update the previously stored value to prepare for next read */ 5527 *prev_stat = new_data; 5528 } 5529 5530 /** 5531 * ice_sched_query_elem - query element information from HW 5532 * @hw: pointer to the HW struct 5533 * @node_teid: node TEID to be queried 5534 * @buf: buffer to element information 5535 * 5536 * This function queries HW element information 5537 */ 5538 int 5539 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5540 struct ice_aqc_txsched_elem_data *buf) 5541 { 5542 u16 buf_size, num_elem_ret = 0; 5543 int status; 5544 5545 buf_size = sizeof(*buf); 5546 memset(buf, 0, buf_size); 5547 buf->node_teid = cpu_to_le32(node_teid); 5548 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5549 NULL); 5550 if (status || num_elem_ret != 1) 5551 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5552 return status; 5553 } 5554 5555 /** 5556 * ice_aq_read_i2c 5557 * @hw: pointer to the hw struct 5558 * @topo_addr: topology address for a device to communicate with 5559 * @bus_addr: 7-bit I2C bus address 5560 * @addr: I2C memory address (I2C offset) with up to 16 bits 5561 * @params: I2C parameters: bit [7] - Repeated start, 5562 * bits [6:5] data offset size, 5563 * bit [4] - I2C address type, 5564 * bits [3:0] - data size to read (0-16 bytes) 5565 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5566 * @cd: pointer to command details structure or NULL 5567 * 5568 * Read I2C (0x06E2) 5569 */ 5570 int 5571 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5572 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5573 struct ice_sq_cd *cd) 5574 { 5575 struct ice_aq_desc desc = { 0 }; 5576 struct ice_aqc_i2c *cmd; 5577 u8 data_size; 5578 int status; 5579 5580 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5581 cmd = &desc.params.read_write_i2c; 5582 5583 if (!data) 5584 return -EINVAL; 5585 5586 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5587 5588 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5589 cmd->topo_addr = topo_addr; 5590 cmd->i2c_params = params; 5591 cmd->i2c_addr = addr; 5592 5593 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5594 if (!status) { 5595 struct ice_aqc_read_i2c_resp *resp; 5596 u8 i; 5597 5598 resp = &desc.params.read_i2c_resp; 5599 for (i = 0; i < data_size; i++) { 5600 *data = resp->i2c_data[i]; 5601 data++; 5602 } 5603 } 5604 5605 return status; 5606 } 5607 5608 /** 5609 * ice_aq_write_i2c 5610 * @hw: pointer to the hw struct 5611 * @topo_addr: topology address for a device to communicate with 5612 * @bus_addr: 7-bit I2C bus address 5613 * @addr: I2C memory address (I2C offset) with up to 16 bits 5614 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5615 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5616 * @cd: pointer to command details structure or NULL 5617 * 5618 * Write I2C (0x06E3) 5619 * 5620 * * Return: 5621 * * 0 - Successful write to the i2c device 5622 * * -EINVAL - Data size greater than 4 bytes 5623 * * -EIO - FW error 5624 */ 5625 int 5626 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5627 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5628 struct ice_sq_cd *cd) 5629 { 5630 struct ice_aq_desc desc = { 0 }; 5631 struct ice_aqc_i2c *cmd; 5632 u8 data_size; 5633 5634 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5635 cmd = &desc.params.read_write_i2c; 5636 5637 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5638 5639 /* data_size limited to 4 */ 5640 if (data_size > 4) 5641 return -EINVAL; 5642 5643 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5644 cmd->topo_addr = topo_addr; 5645 cmd->i2c_params = params; 5646 cmd->i2c_addr = addr; 5647 5648 memcpy(cmd->i2c_data, data, data_size); 5649 5650 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5651 } 5652 5653 /** 5654 * ice_get_pca9575_handle - find and return the PCA9575 controller 5655 * @hw: pointer to the hw struct 5656 * @pca9575_handle: GPIO controller's handle 5657 * 5658 * Find and return the GPIO controller's handle in the netlist. 5659 * When found - the value will be cached in the hw structure and following calls 5660 * will return cached value. 5661 * 5662 * Return: 0 on success, -ENXIO when there's no PCA9575 present. 5663 */ 5664 int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) 5665 { 5666 struct ice_aqc_get_link_topo *cmd; 5667 struct ice_aq_desc desc; 5668 int err; 5669 u8 idx; 5670 5671 /* If handle was read previously return cached value */ 5672 if (hw->io_expander_handle) { 5673 *pca9575_handle = hw->io_expander_handle; 5674 return 0; 5675 } 5676 5677 #define SW_PCA9575_SFP_TOPO_IDX 2 5678 #define SW_PCA9575_QSFP_TOPO_IDX 1 5679 5680 /* Check if the SW IO expander controlling SMA exists in the netlist. */ 5681 if (hw->device_id == ICE_DEV_ID_E810C_SFP) 5682 idx = SW_PCA9575_SFP_TOPO_IDX; 5683 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) 5684 idx = SW_PCA9575_QSFP_TOPO_IDX; 5685 else 5686 return -ENXIO; 5687 5688 /* If handle was not detected read it from the netlist */ 5689 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 5690 cmd = &desc.params.get_link_topo; 5691 cmd->addr.topo_params.node_type_ctx = 5692 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL; 5693 cmd->addr.topo_params.index = idx; 5694 5695 err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5696 if (err) 5697 return -ENXIO; 5698 5699 /* Verify if we found the right IO expander type */ 5700 if (desc.params.get_link_topo.node_part_num != 5701 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) 5702 return -ENXIO; 5703 5704 /* If present save the handle and return it */ 5705 hw->io_expander_handle = 5706 le16_to_cpu(desc.params.get_link_topo.addr.handle); 5707 *pca9575_handle = hw->io_expander_handle; 5708 5709 return 0; 5710 } 5711 5712 /** 5713 * ice_read_pca9575_reg - read the register from the PCA9575 controller 5714 * @hw: pointer to the hw struct 5715 * @offset: GPIO controller register offset 5716 * @data: pointer to data to be read from the GPIO controller 5717 * 5718 * Return: 0 on success, negative error code otherwise. 5719 */ 5720 int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) 5721 { 5722 struct ice_aqc_link_topo_addr link_topo; 5723 __le16 addr; 5724 u16 handle; 5725 int err; 5726 5727 memset(&link_topo, 0, sizeof(link_topo)); 5728 5729 err = ice_get_pca9575_handle(hw, &handle); 5730 if (err) 5731 return err; 5732 5733 link_topo.handle = cpu_to_le16(handle); 5734 link_topo.topo_params.node_type_ctx = 5735 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, 5736 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); 5737 5738 addr = cpu_to_le16((u16)offset); 5739 5740 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); 5741 } 5742 5743 /** 5744 * ice_aq_set_gpio 5745 * @hw: pointer to the hw struct 5746 * @gpio_ctrl_handle: GPIO controller node handle 5747 * @pin_idx: IO Number of the GPIO that needs to be set 5748 * @value: SW provide IO value to set in the LSB 5749 * @cd: pointer to command details structure or NULL 5750 * 5751 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5752 */ 5753 int 5754 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5755 struct ice_sq_cd *cd) 5756 { 5757 struct ice_aqc_gpio *cmd; 5758 struct ice_aq_desc desc; 5759 5760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5761 cmd = &desc.params.read_write_gpio; 5762 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5763 cmd->gpio_num = pin_idx; 5764 cmd->gpio_val = value ? 1 : 0; 5765 5766 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5767 } 5768 5769 /** 5770 * ice_aq_get_gpio 5771 * @hw: pointer to the hw struct 5772 * @gpio_ctrl_handle: GPIO controller node handle 5773 * @pin_idx: IO Number of the GPIO that needs to be set 5774 * @value: IO value read 5775 * @cd: pointer to command details structure or NULL 5776 * 5777 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5778 * the topology 5779 */ 5780 int 5781 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5782 bool *value, struct ice_sq_cd *cd) 5783 { 5784 struct ice_aqc_gpio *cmd; 5785 struct ice_aq_desc desc; 5786 int status; 5787 5788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5789 cmd = &desc.params.read_write_gpio; 5790 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5791 cmd->gpio_num = pin_idx; 5792 5793 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5794 if (status) 5795 return status; 5796 5797 *value = !!cmd->gpio_val; 5798 return 0; 5799 } 5800 5801 /** 5802 * ice_is_fw_api_min_ver 5803 * @hw: pointer to the hardware structure 5804 * @maj: major version 5805 * @min: minor version 5806 * @patch: patch version 5807 * 5808 * Checks if the firmware API is minimum version 5809 */ 5810 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5811 { 5812 if (hw->api_maj_ver == maj) { 5813 if (hw->api_min_ver > min) 5814 return true; 5815 if (hw->api_min_ver == min && hw->api_patch >= patch) 5816 return true; 5817 } else if (hw->api_maj_ver > maj) { 5818 return true; 5819 } 5820 5821 return false; 5822 } 5823 5824 /** 5825 * ice_fw_supports_link_override 5826 * @hw: pointer to the hardware structure 5827 * 5828 * Checks if the firmware supports link override 5829 */ 5830 bool ice_fw_supports_link_override(struct ice_hw *hw) 5831 { 5832 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5833 ICE_FW_API_LINK_OVERRIDE_MIN, 5834 ICE_FW_API_LINK_OVERRIDE_PATCH); 5835 } 5836 5837 /** 5838 * ice_get_link_default_override 5839 * @ldo: pointer to the link default override struct 5840 * @pi: pointer to the port info struct 5841 * 5842 * Gets the link default override for a port 5843 */ 5844 int 5845 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5846 struct ice_port_info *pi) 5847 { 5848 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5849 struct ice_hw *hw = pi->hw; 5850 int status; 5851 5852 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5853 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5854 if (status) { 5855 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5856 return status; 5857 } 5858 5859 /* Each port has its own config; calculate for our port */ 5860 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5861 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5862 5863 /* link options first */ 5864 status = ice_read_sr_word(hw, tlv_start, &buf); 5865 if (status) { 5866 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5867 return status; 5868 } 5869 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5870 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5871 ICE_LINK_OVERRIDE_PHY_CFG_S; 5872 5873 /* link PHY config */ 5874 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5875 status = ice_read_sr_word(hw, offset, &buf); 5876 if (status) { 5877 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5878 return status; 5879 } 5880 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5881 5882 /* PHY types low */ 5883 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5884 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5885 status = ice_read_sr_word(hw, (offset + i), &buf); 5886 if (status) { 5887 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5888 return status; 5889 } 5890 /* shift 16 bits at a time to fill 64 bits */ 5891 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5892 } 5893 5894 /* PHY types high */ 5895 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5896 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5897 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5898 status = ice_read_sr_word(hw, (offset + i), &buf); 5899 if (status) { 5900 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5901 return status; 5902 } 5903 /* shift 16 bits at a time to fill 64 bits */ 5904 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5905 } 5906 5907 return status; 5908 } 5909 5910 /** 5911 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5912 * @caps: get PHY capability data 5913 */ 5914 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5915 { 5916 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5917 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5918 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5919 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5920 return true; 5921 5922 return false; 5923 } 5924 5925 /** 5926 * ice_is_fw_health_report_supported - checks if firmware supports health events 5927 * @hw: pointer to the hardware structure 5928 * 5929 * Return: true if firmware supports health status reports, 5930 * false otherwise 5931 */ 5932 bool ice_is_fw_health_report_supported(struct ice_hw *hw) 5933 { 5934 return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, 5935 ICE_FW_API_HEALTH_REPORT_MIN, 5936 ICE_FW_API_HEALTH_REPORT_PATCH); 5937 } 5938 5939 /** 5940 * ice_aq_set_health_status_cfg - Configure FW health events 5941 * @hw: pointer to the HW struct 5942 * @event_source: type of diagnostic events to enable 5943 * 5944 * Configure the health status event types that the firmware will send to this 5945 * PF. The supported event types are: PF-specific, all PFs, and global. 5946 * 5947 * Return: 0 on success, negative error code otherwise. 5948 */ 5949 int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) 5950 { 5951 struct ice_aqc_set_health_status_cfg *cmd; 5952 struct ice_aq_desc desc; 5953 5954 cmd = &desc.params.set_health_status_cfg; 5955 5956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); 5957 5958 cmd->event_source = event_source; 5959 5960 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5961 } 5962 5963 /** 5964 * ice_aq_set_lldp_mib - Set the LLDP MIB 5965 * @hw: pointer to the HW struct 5966 * @mib_type: Local, Remote or both Local and Remote MIBs 5967 * @buf: pointer to the caller-supplied buffer to store the MIB block 5968 * @buf_size: size of the buffer (in bytes) 5969 * @cd: pointer to command details structure or NULL 5970 * 5971 * Set the LLDP MIB. (0x0A08) 5972 */ 5973 int 5974 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5975 struct ice_sq_cd *cd) 5976 { 5977 struct ice_aqc_lldp_set_local_mib *cmd; 5978 struct ice_aq_desc desc; 5979 5980 cmd = &desc.params.lldp_set_mib; 5981 5982 if (buf_size == 0 || !buf) 5983 return -EINVAL; 5984 5985 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5986 5987 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5988 desc.datalen = cpu_to_le16(buf_size); 5989 5990 cmd->type = mib_type; 5991 cmd->length = cpu_to_le16(buf_size); 5992 5993 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5994 } 5995 5996 /** 5997 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5998 * @hw: pointer to HW struct 5999 */ 6000 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6001 { 6002 if (hw->mac_type != ICE_MAC_E810) 6003 return false; 6004 6005 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6006 ICE_FW_API_LLDP_FLTR_MIN, 6007 ICE_FW_API_LLDP_FLTR_PATCH); 6008 } 6009 6010 /** 6011 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6012 * @hw: pointer to HW struct 6013 * @vsi_num: absolute HW index for VSI 6014 * @add: boolean for if adding or removing a filter 6015 */ 6016 int 6017 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6018 { 6019 struct ice_aqc_lldp_filter_ctrl *cmd; 6020 struct ice_aq_desc desc; 6021 6022 cmd = &desc.params.lldp_filter_ctrl; 6023 6024 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6025 6026 if (add) 6027 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6028 else 6029 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6030 6031 cmd->vsi_num = cpu_to_le16(vsi_num); 6032 6033 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6034 } 6035 6036 /** 6037 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6038 * @hw: pointer to HW struct 6039 */ 6040 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6041 { 6042 struct ice_aq_desc desc; 6043 6044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6045 6046 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6047 } 6048 6049 /** 6050 * ice_fw_supports_report_dflt_cfg 6051 * @hw: pointer to the hardware structure 6052 * 6053 * Checks if the firmware supports report default configuration 6054 */ 6055 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6056 { 6057 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6058 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6059 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6060 } 6061 6062 /* each of the indexes into the following array match the speed of a return 6063 * value from the list of AQ returned speeds like the range: 6064 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6065 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6066 * array. The array is defined as 15 elements long because the link_speed 6067 * returned by the firmware is a 16 bit * value, but is indexed 6068 * by [fls(speed) - 1] 6069 */ 6070 static const u32 ice_aq_to_link_speed[] = { 6071 SPEED_10, /* BIT(0) */ 6072 SPEED_100, 6073 SPEED_1000, 6074 SPEED_2500, 6075 SPEED_5000, 6076 SPEED_10000, 6077 SPEED_20000, 6078 SPEED_25000, 6079 SPEED_40000, 6080 SPEED_50000, 6081 SPEED_100000, /* BIT(10) */ 6082 SPEED_200000, 6083 }; 6084 6085 /** 6086 * ice_get_link_speed - get integer speed from table 6087 * @index: array index from fls(aq speed) - 1 6088 * 6089 * Returns: u32 value containing integer speed 6090 */ 6091 u32 ice_get_link_speed(u16 index) 6092 { 6093 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6094 return 0; 6095 6096 return ice_aq_to_link_speed[index]; 6097 } 6098