1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 #include <linux/packing.h> 10 11 #define ICE_PF_RESET_WAIT_COUNT 300 12 #define ICE_MAX_NETLIST_SIZE 10 13 14 static const char * const ice_link_mode_str_low[] = { 15 [0] = "100BASE_TX", 16 [1] = "100M_SGMII", 17 [2] = "1000BASE_T", 18 [3] = "1000BASE_SX", 19 [4] = "1000BASE_LX", 20 [5] = "1000BASE_KX", 21 [6] = "1G_SGMII", 22 [7] = "2500BASE_T", 23 [8] = "2500BASE_X", 24 [9] = "2500BASE_KX", 25 [10] = "5GBASE_T", 26 [11] = "5GBASE_KR", 27 [12] = "10GBASE_T", 28 [13] = "10G_SFI_DA", 29 [14] = "10GBASE_SR", 30 [15] = "10GBASE_LR", 31 [16] = "10GBASE_KR_CR1", 32 [17] = "10G_SFI_AOC_ACC", 33 [18] = "10G_SFI_C2C", 34 [19] = "25GBASE_T", 35 [20] = "25GBASE_CR", 36 [21] = "25GBASE_CR_S", 37 [22] = "25GBASE_CR1", 38 [23] = "25GBASE_SR", 39 [24] = "25GBASE_LR", 40 [25] = "25GBASE_KR", 41 [26] = "25GBASE_KR_S", 42 [27] = "25GBASE_KR1", 43 [28] = "25G_AUI_AOC_ACC", 44 [29] = "25G_AUI_C2C", 45 [30] = "40GBASE_CR4", 46 [31] = "40GBASE_SR4", 47 [32] = "40GBASE_LR4", 48 [33] = "40GBASE_KR4", 49 [34] = "40G_XLAUI_AOC_ACC", 50 [35] = "40G_XLAUI", 51 [36] = "50GBASE_CR2", 52 [37] = "50GBASE_SR2", 53 [38] = "50GBASE_LR2", 54 [39] = "50GBASE_KR2", 55 [40] = "50G_LAUI2_AOC_ACC", 56 [41] = "50G_LAUI2", 57 [42] = "50G_AUI2_AOC_ACC", 58 [43] = "50G_AUI2", 59 [44] = "50GBASE_CP", 60 [45] = "50GBASE_SR", 61 [46] = "50GBASE_FR", 62 [47] = "50GBASE_LR", 63 [48] = "50GBASE_KR_PAM4", 64 [49] = "50G_AUI1_AOC_ACC", 65 [50] = "50G_AUI1", 66 [51] = "100GBASE_CR4", 67 [52] = "100GBASE_SR4", 68 [53] = "100GBASE_LR4", 69 [54] = "100GBASE_KR4", 70 [55] = "100G_CAUI4_AOC_ACC", 71 [56] = "100G_CAUI4", 72 [57] = "100G_AUI4_AOC_ACC", 73 [58] = "100G_AUI4", 74 [59] = "100GBASE_CR_PAM4", 75 [60] = "100GBASE_KR_PAM4", 76 [61] = "100GBASE_CP2", 77 [62] = "100GBASE_SR2", 78 [63] = "100GBASE_DR", 79 }; 80 81 static const char * const ice_link_mode_str_high[] = { 82 [0] = "100GBASE_KR2_PAM4", 83 [1] = "100G_CAUI2_AOC_ACC", 84 [2] = "100G_CAUI2", 85 [3] = "100G_AUI2_AOC_ACC", 86 [4] = "100G_AUI2", 87 }; 88 89 /** 90 * ice_dump_phy_type - helper function to dump phy_type 91 * @hw: pointer to the HW structure 92 * @low: 64 bit value for phy_type_low 93 * @high: 64 bit value for phy_type_high 94 * @prefix: prefix string to differentiate multiple dumps 95 */ 96 static void 97 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 98 { 99 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 100 101 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 102 if (low & BIT_ULL(i)) 103 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 104 prefix, i, ice_link_mode_str_low[i]); 105 } 106 107 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 108 109 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 110 if (high & BIT_ULL(i)) 111 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 112 prefix, i, ice_link_mode_str_high[i]); 113 } 114 } 115 116 /** 117 * ice_set_mac_type - Sets MAC type 118 * @hw: pointer to the HW structure 119 * 120 * This function sets the MAC type of the adapter based on the 121 * vendor ID and device ID stored in the HW structure. 122 */ 123 static int ice_set_mac_type(struct ice_hw *hw) 124 { 125 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 126 return -ENODEV; 127 128 switch (hw->device_id) { 129 case ICE_DEV_ID_E810C_BACKPLANE: 130 case ICE_DEV_ID_E810C_QSFP: 131 case ICE_DEV_ID_E810C_SFP: 132 case ICE_DEV_ID_E810_XXV_BACKPLANE: 133 case ICE_DEV_ID_E810_XXV_QSFP: 134 case ICE_DEV_ID_E810_XXV_SFP: 135 hw->mac_type = ICE_MAC_E810; 136 break; 137 case ICE_DEV_ID_E823C_10G_BASE_T: 138 case ICE_DEV_ID_E823C_BACKPLANE: 139 case ICE_DEV_ID_E823C_QSFP: 140 case ICE_DEV_ID_E823C_SFP: 141 case ICE_DEV_ID_E823C_SGMII: 142 case ICE_DEV_ID_E822C_10G_BASE_T: 143 case ICE_DEV_ID_E822C_BACKPLANE: 144 case ICE_DEV_ID_E822C_QSFP: 145 case ICE_DEV_ID_E822C_SFP: 146 case ICE_DEV_ID_E822C_SGMII: 147 case ICE_DEV_ID_E822L_10G_BASE_T: 148 case ICE_DEV_ID_E822L_BACKPLANE: 149 case ICE_DEV_ID_E822L_SFP: 150 case ICE_DEV_ID_E822L_SGMII: 151 case ICE_DEV_ID_E823L_10G_BASE_T: 152 case ICE_DEV_ID_E823L_1GBE: 153 case ICE_DEV_ID_E823L_BACKPLANE: 154 case ICE_DEV_ID_E823L_QSFP: 155 case ICE_DEV_ID_E823L_SFP: 156 hw->mac_type = ICE_MAC_GENERIC; 157 break; 158 case ICE_DEV_ID_E825C_BACKPLANE: 159 case ICE_DEV_ID_E825C_QSFP: 160 case ICE_DEV_ID_E825C_SFP: 161 case ICE_DEV_ID_E825C_SGMII: 162 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 163 break; 164 case ICE_DEV_ID_E830CC_BACKPLANE: 165 case ICE_DEV_ID_E830CC_QSFP56: 166 case ICE_DEV_ID_E830CC_SFP: 167 case ICE_DEV_ID_E830CC_SFP_DD: 168 case ICE_DEV_ID_E830C_BACKPLANE: 169 case ICE_DEV_ID_E830_XXV_BACKPLANE: 170 case ICE_DEV_ID_E830C_QSFP: 171 case ICE_DEV_ID_E830_XXV_QSFP: 172 case ICE_DEV_ID_E830C_SFP: 173 case ICE_DEV_ID_E830_XXV_SFP: 174 hw->mac_type = ICE_MAC_E830; 175 break; 176 default: 177 hw->mac_type = ICE_MAC_UNKNOWN; 178 break; 179 } 180 181 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 182 return 0; 183 } 184 185 /** 186 * ice_is_generic_mac - check if device's mac_type is generic 187 * @hw: pointer to the hardware structure 188 * 189 * Return: true if mac_type is generic (with SBQ support), false if not 190 */ 191 bool ice_is_generic_mac(struct ice_hw *hw) 192 { 193 return (hw->mac_type == ICE_MAC_GENERIC || 194 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 195 } 196 197 /** 198 * ice_is_e810 199 * @hw: pointer to the hardware structure 200 * 201 * returns true if the device is E810 based, false if not. 202 */ 203 bool ice_is_e810(struct ice_hw *hw) 204 { 205 return hw->mac_type == ICE_MAC_E810; 206 } 207 208 /** 209 * ice_is_e810t 210 * @hw: pointer to the hardware structure 211 * 212 * returns true if the device is E810T based, false if not. 213 */ 214 bool ice_is_e810t(struct ice_hw *hw) 215 { 216 switch (hw->device_id) { 217 case ICE_DEV_ID_E810C_SFP: 218 switch (hw->subsystem_device_id) { 219 case ICE_SUBDEV_ID_E810T: 220 case ICE_SUBDEV_ID_E810T2: 221 case ICE_SUBDEV_ID_E810T3: 222 case ICE_SUBDEV_ID_E810T4: 223 case ICE_SUBDEV_ID_E810T6: 224 case ICE_SUBDEV_ID_E810T7: 225 return true; 226 } 227 break; 228 case ICE_DEV_ID_E810C_QSFP: 229 switch (hw->subsystem_device_id) { 230 case ICE_SUBDEV_ID_E810T2: 231 case ICE_SUBDEV_ID_E810T3: 232 case ICE_SUBDEV_ID_E810T5: 233 return true; 234 } 235 break; 236 default: 237 break; 238 } 239 240 return false; 241 } 242 243 /** 244 * ice_is_e822 - Check if a device is E822 family device 245 * @hw: pointer to the hardware structure 246 * 247 * Return: true if the device is E822 based, false if not. 248 */ 249 bool ice_is_e822(struct ice_hw *hw) 250 { 251 switch (hw->device_id) { 252 case ICE_DEV_ID_E822C_BACKPLANE: 253 case ICE_DEV_ID_E822C_QSFP: 254 case ICE_DEV_ID_E822C_SFP: 255 case ICE_DEV_ID_E822C_10G_BASE_T: 256 case ICE_DEV_ID_E822C_SGMII: 257 case ICE_DEV_ID_E822L_BACKPLANE: 258 case ICE_DEV_ID_E822L_SFP: 259 case ICE_DEV_ID_E822L_10G_BASE_T: 260 case ICE_DEV_ID_E822L_SGMII: 261 return true; 262 default: 263 return false; 264 } 265 } 266 267 /** 268 * ice_is_e823 269 * @hw: pointer to the hardware structure 270 * 271 * returns true if the device is E823-L or E823-C based, false if not. 272 */ 273 bool ice_is_e823(struct ice_hw *hw) 274 { 275 switch (hw->device_id) { 276 case ICE_DEV_ID_E823L_BACKPLANE: 277 case ICE_DEV_ID_E823L_SFP: 278 case ICE_DEV_ID_E823L_10G_BASE_T: 279 case ICE_DEV_ID_E823L_1GBE: 280 case ICE_DEV_ID_E823L_QSFP: 281 case ICE_DEV_ID_E823C_BACKPLANE: 282 case ICE_DEV_ID_E823C_QSFP: 283 case ICE_DEV_ID_E823C_SFP: 284 case ICE_DEV_ID_E823C_10G_BASE_T: 285 case ICE_DEV_ID_E823C_SGMII: 286 return true; 287 default: 288 return false; 289 } 290 } 291 292 /** 293 * ice_is_e825c - Check if a device is E825C family device 294 * @hw: pointer to the hardware structure 295 * 296 * Return: true if the device is E825-C based, false if not. 297 */ 298 bool ice_is_e825c(struct ice_hw *hw) 299 { 300 switch (hw->device_id) { 301 case ICE_DEV_ID_E825C_BACKPLANE: 302 case ICE_DEV_ID_E825C_QSFP: 303 case ICE_DEV_ID_E825C_SFP: 304 case ICE_DEV_ID_E825C_SGMII: 305 return true; 306 default: 307 return false; 308 } 309 } 310 311 /** 312 * ice_is_pf_c827 - check if pf contains c827 phy 313 * @hw: pointer to the hw struct 314 * 315 * Return: true if the device has c827 phy. 316 */ 317 static bool ice_is_pf_c827(struct ice_hw *hw) 318 { 319 struct ice_aqc_get_link_topo cmd = {}; 320 u8 node_part_number; 321 u16 node_handle; 322 int status; 323 324 if (hw->mac_type != ICE_MAC_E810) 325 return false; 326 327 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 328 return true; 329 330 cmd.addr.topo_params.node_type_ctx = 331 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 332 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 333 cmd.addr.topo_params.index = 0; 334 335 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 336 &node_handle); 337 338 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 339 return false; 340 341 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 342 return true; 343 344 return false; 345 } 346 347 /** 348 * ice_clear_pf_cfg - Clear PF configuration 349 * @hw: pointer to the hardware structure 350 * 351 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 352 * configuration, flow director filters, etc.). 353 */ 354 int ice_clear_pf_cfg(struct ice_hw *hw) 355 { 356 struct ice_aq_desc desc; 357 358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 359 360 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 361 } 362 363 /** 364 * ice_aq_manage_mac_read - manage MAC address read command 365 * @hw: pointer to the HW struct 366 * @buf: a virtual buffer to hold the manage MAC read response 367 * @buf_size: Size of the virtual buffer 368 * @cd: pointer to command details structure or NULL 369 * 370 * This function is used to return per PF station MAC address (0x0107). 371 * NOTE: Upon successful completion of this command, MAC address information 372 * is returned in user specified buffer. Please interpret user specified 373 * buffer as "manage_mac_read" response. 374 * Response such as various MAC addresses are stored in HW struct (port.mac) 375 * ice_discover_dev_caps is expected to be called before this function is 376 * called. 377 */ 378 static int 379 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 380 struct ice_sq_cd *cd) 381 { 382 struct ice_aqc_manage_mac_read_resp *resp; 383 struct ice_aqc_manage_mac_read *cmd; 384 struct ice_aq_desc desc; 385 int status; 386 u16 flags; 387 u8 i; 388 389 cmd = &desc.params.mac_read; 390 391 if (buf_size < sizeof(*resp)) 392 return -EINVAL; 393 394 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 395 396 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 397 if (status) 398 return status; 399 400 resp = buf; 401 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 402 403 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 404 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 405 return -EIO; 406 } 407 408 /* A single port can report up to two (LAN and WoL) addresses */ 409 for (i = 0; i < cmd->num_addr; i++) 410 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 411 ether_addr_copy(hw->port_info->mac.lan_addr, 412 resp[i].mac_addr); 413 ether_addr_copy(hw->port_info->mac.perm_addr, 414 resp[i].mac_addr); 415 break; 416 } 417 418 return 0; 419 } 420 421 /** 422 * ice_aq_get_phy_caps - returns PHY capabilities 423 * @pi: port information structure 424 * @qual_mods: report qualified modules 425 * @report_mode: report mode capabilities 426 * @pcaps: structure for PHY capabilities to be filled 427 * @cd: pointer to command details structure or NULL 428 * 429 * Returns the various PHY capabilities supported on the Port (0x0600) 430 */ 431 int 432 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 433 struct ice_aqc_get_phy_caps_data *pcaps, 434 struct ice_sq_cd *cd) 435 { 436 struct ice_aqc_get_phy_caps *cmd; 437 u16 pcaps_size = sizeof(*pcaps); 438 struct ice_aq_desc desc; 439 const char *prefix; 440 struct ice_hw *hw; 441 int status; 442 443 cmd = &desc.params.get_phy; 444 445 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 446 return -EINVAL; 447 hw = pi->hw; 448 449 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 450 !ice_fw_supports_report_dflt_cfg(hw)) 451 return -EINVAL; 452 453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 454 455 if (qual_mods) 456 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 457 458 cmd->param0 |= cpu_to_le16(report_mode); 459 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 460 461 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 462 463 switch (report_mode) { 464 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 465 prefix = "phy_caps_media"; 466 break; 467 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 468 prefix = "phy_caps_no_media"; 469 break; 470 case ICE_AQC_REPORT_ACTIVE_CFG: 471 prefix = "phy_caps_active"; 472 break; 473 case ICE_AQC_REPORT_DFLT_CFG: 474 prefix = "phy_caps_default"; 475 break; 476 default: 477 prefix = "phy_caps_invalid"; 478 } 479 480 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 481 le64_to_cpu(pcaps->phy_type_high), prefix); 482 483 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 484 prefix, report_mode); 485 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 486 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 487 pcaps->low_power_ctrl_an); 488 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 489 pcaps->eee_cap); 490 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 491 pcaps->eeer_value); 492 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 493 pcaps->link_fec_options); 494 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 495 prefix, pcaps->module_compliance_enforcement); 496 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 497 prefix, pcaps->extended_compliance_code); 498 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 499 pcaps->module_type[0]); 500 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 501 pcaps->module_type[1]); 502 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 503 pcaps->module_type[2]); 504 505 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 506 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 507 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 508 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 509 sizeof(pi->phy.link_info.module_type)); 510 } 511 512 return status; 513 } 514 515 /** 516 * ice_aq_get_link_topo_handle - get link topology node return status 517 * @pi: port information structure 518 * @node_type: requested node type 519 * @cd: pointer to command details structure or NULL 520 * 521 * Get link topology node return status for specified node type (0x06E0) 522 * 523 * Node type cage can be used to determine if cage is present. If AQC 524 * returns error (ENOENT), then no cage present. If no cage present, then 525 * connection type is backplane or BASE-T. 526 */ 527 static int 528 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 529 struct ice_sq_cd *cd) 530 { 531 struct ice_aqc_get_link_topo *cmd; 532 struct ice_aq_desc desc; 533 534 cmd = &desc.params.get_link_topo; 535 536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 537 538 cmd->addr.topo_params.node_type_ctx = 539 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 540 ICE_AQC_LINK_TOPO_NODE_CTX_S); 541 542 /* set node type */ 543 cmd->addr.topo_params.node_type_ctx |= 544 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 545 546 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 547 } 548 549 /** 550 * ice_aq_get_netlist_node 551 * @hw: pointer to the hw struct 552 * @cmd: get_link_topo AQ structure 553 * @node_part_number: output node part number if node found 554 * @node_handle: output node handle parameter if node found 555 * 556 * Get netlist node handle. 557 */ 558 int 559 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 560 u8 *node_part_number, u16 *node_handle) 561 { 562 struct ice_aq_desc desc; 563 564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 565 desc.params.get_link_topo = *cmd; 566 567 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 568 return -EINTR; 569 570 if (node_handle) 571 *node_handle = 572 le16_to_cpu(desc.params.get_link_topo.addr.handle); 573 if (node_part_number) 574 *node_part_number = desc.params.get_link_topo.node_part_num; 575 576 return 0; 577 } 578 579 /** 580 * ice_find_netlist_node 581 * @hw: pointer to the hw struct 582 * @node_type: type of netlist node to look for 583 * @ctx: context of the search 584 * @node_part_number: node part number to look for 585 * @node_handle: output parameter if node found - optional 586 * 587 * Scan the netlist for a node handle of the given node type and part number. 588 * 589 * If node_handle is non-NULL it will be modified on function exit. It is only 590 * valid if the function returns zero, and should be ignored on any non-zero 591 * return value. 592 * 593 * Return: 594 * * 0 if the node is found, 595 * * -ENOENT if no handle was found, 596 * * negative error code on failure to access the AQ. 597 */ 598 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx, 599 u8 node_part_number, u16 *node_handle) 600 { 601 u8 idx; 602 603 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 604 struct ice_aqc_get_link_topo cmd = {}; 605 u8 rec_node_part_number; 606 int status; 607 608 cmd.addr.topo_params.node_type_ctx = 609 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) | 610 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx); 611 cmd.addr.topo_params.index = idx; 612 613 status = ice_aq_get_netlist_node(hw, &cmd, 614 &rec_node_part_number, 615 node_handle); 616 if (status) 617 return status; 618 619 if (rec_node_part_number == node_part_number) 620 return 0; 621 } 622 623 return -ENOENT; 624 } 625 626 /** 627 * ice_is_media_cage_present 628 * @pi: port information structure 629 * 630 * Returns true if media cage is present, else false. If no cage, then 631 * media type is backplane or BASE-T. 632 */ 633 static bool ice_is_media_cage_present(struct ice_port_info *pi) 634 { 635 /* Node type cage can be used to determine if cage is present. If AQC 636 * returns error (ENOENT), then no cage present. If no cage present then 637 * connection type is backplane or BASE-T. 638 */ 639 return !ice_aq_get_link_topo_handle(pi, 640 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 641 NULL); 642 } 643 644 /** 645 * ice_get_media_type - Gets media type 646 * @pi: port information structure 647 */ 648 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 649 { 650 struct ice_link_status *hw_link_info; 651 652 if (!pi) 653 return ICE_MEDIA_UNKNOWN; 654 655 hw_link_info = &pi->phy.link_info; 656 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 657 /* If more than one media type is selected, report unknown */ 658 return ICE_MEDIA_UNKNOWN; 659 660 if (hw_link_info->phy_type_low) { 661 /* 1G SGMII is a special case where some DA cable PHYs 662 * may show this as an option when it really shouldn't 663 * be since SGMII is meant to be between a MAC and a PHY 664 * in a backplane. Try to detect this case and handle it 665 */ 666 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 667 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 668 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 669 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 670 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 671 return ICE_MEDIA_DA; 672 673 switch (hw_link_info->phy_type_low) { 674 case ICE_PHY_TYPE_LOW_1000BASE_SX: 675 case ICE_PHY_TYPE_LOW_1000BASE_LX: 676 case ICE_PHY_TYPE_LOW_10GBASE_SR: 677 case ICE_PHY_TYPE_LOW_10GBASE_LR: 678 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 679 case ICE_PHY_TYPE_LOW_25GBASE_SR: 680 case ICE_PHY_TYPE_LOW_25GBASE_LR: 681 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 682 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 683 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 684 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 685 case ICE_PHY_TYPE_LOW_50GBASE_SR: 686 case ICE_PHY_TYPE_LOW_50GBASE_FR: 687 case ICE_PHY_TYPE_LOW_50GBASE_LR: 688 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 689 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 690 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 691 case ICE_PHY_TYPE_LOW_100GBASE_DR: 692 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 693 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 694 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 695 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 696 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 697 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 698 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 699 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 700 return ICE_MEDIA_FIBER; 701 case ICE_PHY_TYPE_LOW_100BASE_TX: 702 case ICE_PHY_TYPE_LOW_1000BASE_T: 703 case ICE_PHY_TYPE_LOW_2500BASE_T: 704 case ICE_PHY_TYPE_LOW_5GBASE_T: 705 case ICE_PHY_TYPE_LOW_10GBASE_T: 706 case ICE_PHY_TYPE_LOW_25GBASE_T: 707 return ICE_MEDIA_BASET; 708 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 709 case ICE_PHY_TYPE_LOW_25GBASE_CR: 710 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 711 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 712 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 713 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 714 case ICE_PHY_TYPE_LOW_50GBASE_CP: 715 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 716 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 717 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 718 return ICE_MEDIA_DA; 719 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 720 case ICE_PHY_TYPE_LOW_40G_XLAUI: 721 case ICE_PHY_TYPE_LOW_50G_LAUI2: 722 case ICE_PHY_TYPE_LOW_50G_AUI2: 723 case ICE_PHY_TYPE_LOW_50G_AUI1: 724 case ICE_PHY_TYPE_LOW_100G_AUI4: 725 case ICE_PHY_TYPE_LOW_100G_CAUI4: 726 if (ice_is_media_cage_present(pi)) 727 return ICE_MEDIA_DA; 728 fallthrough; 729 case ICE_PHY_TYPE_LOW_1000BASE_KX: 730 case ICE_PHY_TYPE_LOW_2500BASE_KX: 731 case ICE_PHY_TYPE_LOW_2500BASE_X: 732 case ICE_PHY_TYPE_LOW_5GBASE_KR: 733 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 734 case ICE_PHY_TYPE_LOW_25GBASE_KR: 735 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 736 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 737 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 738 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 739 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 740 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 741 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 742 return ICE_MEDIA_BACKPLANE; 743 } 744 } else { 745 switch (hw_link_info->phy_type_high) { 746 case ICE_PHY_TYPE_HIGH_100G_AUI2: 747 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 748 if (ice_is_media_cage_present(pi)) 749 return ICE_MEDIA_DA; 750 fallthrough; 751 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 752 return ICE_MEDIA_BACKPLANE; 753 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 754 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 755 return ICE_MEDIA_FIBER; 756 } 757 } 758 return ICE_MEDIA_UNKNOWN; 759 } 760 761 /** 762 * ice_get_link_status_datalen 763 * @hw: pointer to the HW struct 764 * 765 * Returns datalength for the Get Link Status AQ command, which is bigger for 766 * newer adapter families handled by ice driver. 767 */ 768 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 769 { 770 switch (hw->mac_type) { 771 case ICE_MAC_E830: 772 return ICE_AQC_LS_DATA_SIZE_V2; 773 case ICE_MAC_E810: 774 default: 775 return ICE_AQC_LS_DATA_SIZE_V1; 776 } 777 } 778 779 /** 780 * ice_aq_get_link_info 781 * @pi: port information structure 782 * @ena_lse: enable/disable LinkStatusEvent reporting 783 * @link: pointer to link status structure - optional 784 * @cd: pointer to command details structure or NULL 785 * 786 * Get Link Status (0x607). Returns the link status of the adapter. 787 */ 788 int 789 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 790 struct ice_link_status *link, struct ice_sq_cd *cd) 791 { 792 struct ice_aqc_get_link_status_data link_data = { 0 }; 793 struct ice_aqc_get_link_status *resp; 794 struct ice_link_status *li_old, *li; 795 enum ice_media_type *hw_media_type; 796 struct ice_fc_info *hw_fc_info; 797 bool tx_pause, rx_pause; 798 struct ice_aq_desc desc; 799 struct ice_hw *hw; 800 u16 cmd_flags; 801 int status; 802 803 if (!pi) 804 return -EINVAL; 805 hw = pi->hw; 806 li_old = &pi->phy.link_info_old; 807 hw_media_type = &pi->phy.media_type; 808 li = &pi->phy.link_info; 809 hw_fc_info = &pi->fc; 810 811 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 812 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 813 resp = &desc.params.get_link_status; 814 resp->cmd_flags = cpu_to_le16(cmd_flags); 815 resp->lport_num = pi->lport; 816 817 status = ice_aq_send_cmd(hw, &desc, &link_data, 818 ice_get_link_status_datalen(hw), cd); 819 if (status) 820 return status; 821 822 /* save off old link status information */ 823 *li_old = *li; 824 825 /* update current link status information */ 826 li->link_speed = le16_to_cpu(link_data.link_speed); 827 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 828 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 829 *hw_media_type = ice_get_media_type(pi); 830 li->link_info = link_data.link_info; 831 li->link_cfg_err = link_data.link_cfg_err; 832 li->an_info = link_data.an_info; 833 li->ext_info = link_data.ext_info; 834 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 835 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 836 li->topo_media_conflict = link_data.topo_media_conflict; 837 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 838 ICE_AQ_CFG_PACING_TYPE_M); 839 840 /* update fc info */ 841 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 842 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 843 if (tx_pause && rx_pause) 844 hw_fc_info->current_mode = ICE_FC_FULL; 845 else if (tx_pause) 846 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 847 else if (rx_pause) 848 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 849 else 850 hw_fc_info->current_mode = ICE_FC_NONE; 851 852 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 853 854 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 855 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 856 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 857 (unsigned long long)li->phy_type_low); 858 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 859 (unsigned long long)li->phy_type_high); 860 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 861 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 862 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 863 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 864 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 865 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 866 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 867 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 868 li->max_frame_size); 869 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 870 871 /* save link status information */ 872 if (link) 873 *link = *li; 874 875 /* flag cleared so calling functions don't call AQ again */ 876 pi->phy.get_link_info = false; 877 878 return 0; 879 } 880 881 /** 882 * ice_fill_tx_timer_and_fc_thresh 883 * @hw: pointer to the HW struct 884 * @cmd: pointer to MAC cfg structure 885 * 886 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 887 * descriptor 888 */ 889 static void 890 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 891 struct ice_aqc_set_mac_cfg *cmd) 892 { 893 u32 val, fc_thres_m; 894 895 /* We read back the transmit timer and FC threshold value of 896 * LFC. Thus, we will use index = 897 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 898 * 899 * Also, because we are operating on transmit timer and FC 900 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 901 */ 902 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 903 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 904 905 if (hw->mac_type == ICE_MAC_E830) { 906 /* Retrieve the transmit timer */ 907 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 908 cmd->tx_tmr_value = 909 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 910 911 /* Retrieve the fc threshold */ 912 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 913 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 914 } else { 915 /* Retrieve the transmit timer */ 916 val = rd32(hw, 917 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 918 cmd->tx_tmr_value = 919 le16_encode_bits(val, 920 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 921 922 /* Retrieve the fc threshold */ 923 val = rd32(hw, 924 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 925 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 926 } 927 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 928 } 929 930 /** 931 * ice_aq_set_mac_cfg 932 * @hw: pointer to the HW struct 933 * @max_frame_size: Maximum Frame Size to be supported 934 * @cd: pointer to command details structure or NULL 935 * 936 * Set MAC configuration (0x0603) 937 */ 938 int 939 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 940 { 941 struct ice_aqc_set_mac_cfg *cmd; 942 struct ice_aq_desc desc; 943 944 cmd = &desc.params.set_mac_cfg; 945 946 if (max_frame_size == 0) 947 return -EINVAL; 948 949 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 950 951 cmd->max_frame_size = cpu_to_le16(max_frame_size); 952 953 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 954 955 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 956 } 957 958 /** 959 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 960 * @hw: pointer to the HW struct 961 */ 962 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 963 { 964 struct ice_switch_info *sw; 965 int status; 966 967 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 968 sizeof(*hw->switch_info), GFP_KERNEL); 969 sw = hw->switch_info; 970 971 if (!sw) 972 return -ENOMEM; 973 974 INIT_LIST_HEAD(&sw->vsi_list_map_head); 975 sw->prof_res_bm_init = 0; 976 977 /* Initialize recipe count with default recipes read from NVM */ 978 sw->recp_cnt = ICE_SW_LKUP_LAST; 979 980 status = ice_init_def_sw_recp(hw); 981 if (status) { 982 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 983 return status; 984 } 985 return 0; 986 } 987 988 /** 989 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 990 * @hw: pointer to the HW struct 991 */ 992 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 993 { 994 struct ice_switch_info *sw = hw->switch_info; 995 struct ice_vsi_list_map_info *v_pos_map; 996 struct ice_vsi_list_map_info *v_tmp_map; 997 struct ice_sw_recipe *recps; 998 u8 i; 999 1000 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 1001 list_entry) { 1002 list_del(&v_pos_map->list_entry); 1003 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 1004 } 1005 recps = sw->recp_list; 1006 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 1007 recps[i].root_rid = i; 1008 1009 if (recps[i].adv_rule) { 1010 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 1011 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 1012 1013 mutex_destroy(&recps[i].filt_rule_lock); 1014 list_for_each_entry_safe(lst_itr, tmp_entry, 1015 &recps[i].filt_rules, 1016 list_entry) { 1017 list_del(&lst_itr->list_entry); 1018 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 1019 devm_kfree(ice_hw_to_dev(hw), lst_itr); 1020 } 1021 } else { 1022 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 1023 1024 mutex_destroy(&recps[i].filt_rule_lock); 1025 list_for_each_entry_safe(lst_itr, tmp_entry, 1026 &recps[i].filt_rules, 1027 list_entry) { 1028 list_del(&lst_itr->list_entry); 1029 devm_kfree(ice_hw_to_dev(hw), lst_itr); 1030 } 1031 } 1032 } 1033 ice_rm_all_sw_replay_rule_info(hw); 1034 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 1035 devm_kfree(ice_hw_to_dev(hw), sw); 1036 } 1037 1038 /** 1039 * ice_get_itr_intrl_gran 1040 * @hw: pointer to the HW struct 1041 * 1042 * Determines the ITR/INTRL granularities based on the maximum aggregate 1043 * bandwidth according to the device's configuration during power-on. 1044 */ 1045 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1046 { 1047 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 1048 rd32(hw, GL_PWR_MODE_CTL)); 1049 1050 switch (max_agg_bw) { 1051 case ICE_MAX_AGG_BW_200G: 1052 case ICE_MAX_AGG_BW_100G: 1053 case ICE_MAX_AGG_BW_50G: 1054 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1055 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1056 break; 1057 case ICE_MAX_AGG_BW_25G: 1058 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1059 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1060 break; 1061 } 1062 } 1063 1064 /** 1065 * ice_wait_for_fw - wait for full FW readiness 1066 * @hw: pointer to the hardware structure 1067 * @timeout: milliseconds that can elapse before timing out 1068 * 1069 * Return: 0 on success, -ETIMEDOUT on timeout. 1070 */ 1071 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) 1072 { 1073 int fw_loading; 1074 u32 elapsed = 0; 1075 1076 while (elapsed <= timeout) { 1077 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; 1078 1079 /* firmware was not yet loaded, we have to wait more */ 1080 if (fw_loading) { 1081 elapsed += 100; 1082 msleep(100); 1083 continue; 1084 } 1085 return 0; 1086 } 1087 1088 return -ETIMEDOUT; 1089 } 1090 1091 /** 1092 * ice_init_hw - main hardware initialization routine 1093 * @hw: pointer to the hardware structure 1094 */ 1095 int ice_init_hw(struct ice_hw *hw) 1096 { 1097 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 1098 void *mac_buf __free(kfree) = NULL; 1099 u16 mac_buf_len; 1100 int status; 1101 1102 /* Set MAC type based on DeviceID */ 1103 status = ice_set_mac_type(hw); 1104 if (status) 1105 return status; 1106 1107 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1108 1109 status = ice_reset(hw, ICE_RESET_PFR); 1110 if (status) 1111 return status; 1112 1113 ice_get_itr_intrl_gran(hw); 1114 1115 status = ice_create_all_ctrlq(hw); 1116 if (status) 1117 goto err_unroll_cqinit; 1118 1119 status = ice_fwlog_init(hw); 1120 if (status) 1121 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1122 status); 1123 1124 status = ice_clear_pf_cfg(hw); 1125 if (status) 1126 goto err_unroll_cqinit; 1127 1128 /* Set bit to enable Flow Director filters */ 1129 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1130 INIT_LIST_HEAD(&hw->fdir_list_head); 1131 1132 ice_clear_pxe_mode(hw); 1133 1134 status = ice_init_nvm(hw); 1135 if (status) 1136 goto err_unroll_cqinit; 1137 1138 status = ice_get_caps(hw); 1139 if (status) 1140 goto err_unroll_cqinit; 1141 1142 if (!hw->port_info) 1143 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1144 sizeof(*hw->port_info), 1145 GFP_KERNEL); 1146 if (!hw->port_info) { 1147 status = -ENOMEM; 1148 goto err_unroll_cqinit; 1149 } 1150 1151 hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; 1152 /* set the back pointer to HW */ 1153 hw->port_info->hw = hw; 1154 1155 /* Initialize port_info struct with switch configuration data */ 1156 status = ice_get_initial_sw_cfg(hw); 1157 if (status) 1158 goto err_unroll_alloc; 1159 1160 hw->evb_veb = true; 1161 1162 /* init xarray for identifying scheduling nodes uniquely */ 1163 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1164 1165 /* Query the allocated resources for Tx scheduler */ 1166 status = ice_sched_query_res_alloc(hw); 1167 if (status) { 1168 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1169 goto err_unroll_alloc; 1170 } 1171 ice_sched_get_psm_clk_freq(hw); 1172 1173 /* Initialize port_info struct with scheduler data */ 1174 status = ice_sched_init_port(hw->port_info); 1175 if (status) 1176 goto err_unroll_sched; 1177 1178 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1179 if (!pcaps) { 1180 status = -ENOMEM; 1181 goto err_unroll_sched; 1182 } 1183 1184 /* Initialize port_info struct with PHY capabilities */ 1185 status = ice_aq_get_phy_caps(hw->port_info, false, 1186 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1187 NULL); 1188 if (status) 1189 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1190 status); 1191 1192 /* Initialize port_info struct with link information */ 1193 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1194 if (status) 1195 goto err_unroll_sched; 1196 1197 /* need a valid SW entry point to build a Tx tree */ 1198 if (!hw->sw_entry_point_layer) { 1199 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1200 status = -EIO; 1201 goto err_unroll_sched; 1202 } 1203 INIT_LIST_HEAD(&hw->agg_list); 1204 /* Initialize max burst size */ 1205 if (!hw->max_burst_size) 1206 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1207 1208 status = ice_init_fltr_mgmt_struct(hw); 1209 if (status) 1210 goto err_unroll_sched; 1211 1212 /* Get MAC information */ 1213 /* A single port can report up to two (LAN and WoL) addresses */ 1214 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1215 GFP_KERNEL); 1216 if (!mac_buf) { 1217 status = -ENOMEM; 1218 goto err_unroll_fltr_mgmt_struct; 1219 } 1220 1221 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1222 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1223 1224 if (status) 1225 goto err_unroll_fltr_mgmt_struct; 1226 /* enable jumbo frame support at MAC level */ 1227 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1228 if (status) 1229 goto err_unroll_fltr_mgmt_struct; 1230 /* Obtain counter base index which would be used by flow director */ 1231 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1232 if (status) 1233 goto err_unroll_fltr_mgmt_struct; 1234 status = ice_init_hw_tbls(hw); 1235 if (status) 1236 goto err_unroll_fltr_mgmt_struct; 1237 mutex_init(&hw->tnl_lock); 1238 ice_init_chk_recipe_reuse_support(hw); 1239 1240 /* Some cards require longer initialization times 1241 * due to necessity of loading FW from an external source. 1242 * This can take even half a minute. 1243 */ 1244 if (ice_is_pf_c827(hw)) { 1245 status = ice_wait_for_fw(hw, 30000); 1246 if (status) { 1247 dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); 1248 goto err_unroll_fltr_mgmt_struct; 1249 } 1250 } 1251 1252 return 0; 1253 err_unroll_fltr_mgmt_struct: 1254 ice_cleanup_fltr_mgmt_struct(hw); 1255 err_unroll_sched: 1256 ice_sched_cleanup_all(hw); 1257 err_unroll_alloc: 1258 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1259 err_unroll_cqinit: 1260 ice_destroy_all_ctrlq(hw); 1261 return status; 1262 } 1263 1264 /** 1265 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1266 * @hw: pointer to the hardware structure 1267 * 1268 * This should be called only during nominal operation, not as a result of 1269 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1270 * applicable initializations if it fails for any reason. 1271 */ 1272 void ice_deinit_hw(struct ice_hw *hw) 1273 { 1274 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1275 ice_cleanup_fltr_mgmt_struct(hw); 1276 1277 ice_sched_cleanup_all(hw); 1278 ice_sched_clear_agg(hw); 1279 ice_free_seg(hw); 1280 ice_free_hw_tbls(hw); 1281 mutex_destroy(&hw->tnl_lock); 1282 1283 ice_fwlog_deinit(hw); 1284 ice_destroy_all_ctrlq(hw); 1285 1286 /* Clear VSI contexts if not already cleared */ 1287 ice_clear_all_vsi_ctx(hw); 1288 } 1289 1290 /** 1291 * ice_check_reset - Check to see if a global reset is complete 1292 * @hw: pointer to the hardware structure 1293 */ 1294 int ice_check_reset(struct ice_hw *hw) 1295 { 1296 u32 cnt, reg = 0, grst_timeout, uld_mask; 1297 1298 /* Poll for Device Active state in case a recent CORER, GLOBR, 1299 * or EMPR has occurred. The grst delay value is in 100ms units. 1300 * Add 1sec for outstanding AQ commands that can take a long time. 1301 */ 1302 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1303 rd32(hw, GLGEN_RSTCTL)) + 10; 1304 1305 for (cnt = 0; cnt < grst_timeout; cnt++) { 1306 mdelay(100); 1307 reg = rd32(hw, GLGEN_RSTAT); 1308 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1309 break; 1310 } 1311 1312 if (cnt == grst_timeout) { 1313 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1314 return -EIO; 1315 } 1316 1317 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1318 GLNVM_ULD_PCIER_DONE_1_M |\ 1319 GLNVM_ULD_CORER_DONE_M |\ 1320 GLNVM_ULD_GLOBR_DONE_M |\ 1321 GLNVM_ULD_POR_DONE_M |\ 1322 GLNVM_ULD_POR_DONE_1_M |\ 1323 GLNVM_ULD_PCIER_DONE_2_M) 1324 1325 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1326 GLNVM_ULD_PE_DONE_M : 0); 1327 1328 /* Device is Active; check Global Reset processes are done */ 1329 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1330 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1331 if (reg == uld_mask) { 1332 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1333 break; 1334 } 1335 mdelay(10); 1336 } 1337 1338 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1339 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1340 reg); 1341 return -EIO; 1342 } 1343 1344 return 0; 1345 } 1346 1347 /** 1348 * ice_pf_reset - Reset the PF 1349 * @hw: pointer to the hardware structure 1350 * 1351 * If a global reset has been triggered, this function checks 1352 * for its completion and then issues the PF reset 1353 */ 1354 static int ice_pf_reset(struct ice_hw *hw) 1355 { 1356 u32 cnt, reg; 1357 1358 /* If at function entry a global reset was already in progress, i.e. 1359 * state is not 'device active' or any of the reset done bits are not 1360 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1361 * global reset is done. 1362 */ 1363 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1364 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1365 /* poll on global reset currently in progress until done */ 1366 if (ice_check_reset(hw)) 1367 return -EIO; 1368 1369 return 0; 1370 } 1371 1372 /* Reset the PF */ 1373 reg = rd32(hw, PFGEN_CTRL); 1374 1375 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1376 1377 /* Wait for the PFR to complete. The wait time is the global config lock 1378 * timeout plus the PFR timeout which will account for a possible reset 1379 * that is occurring during a download package operation. 1380 */ 1381 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1382 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1383 reg = rd32(hw, PFGEN_CTRL); 1384 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1385 break; 1386 1387 mdelay(1); 1388 } 1389 1390 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1391 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1392 return -EIO; 1393 } 1394 1395 return 0; 1396 } 1397 1398 /** 1399 * ice_reset - Perform different types of reset 1400 * @hw: pointer to the hardware structure 1401 * @req: reset request 1402 * 1403 * This function triggers a reset as specified by the req parameter. 1404 * 1405 * Note: 1406 * If anything other than a PF reset is triggered, PXE mode is restored. 1407 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1408 * interface has been restored in the rebuild flow. 1409 */ 1410 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1411 { 1412 u32 val = 0; 1413 1414 switch (req) { 1415 case ICE_RESET_PFR: 1416 return ice_pf_reset(hw); 1417 case ICE_RESET_CORER: 1418 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1419 val = GLGEN_RTRIG_CORER_M; 1420 break; 1421 case ICE_RESET_GLOBR: 1422 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1423 val = GLGEN_RTRIG_GLOBR_M; 1424 break; 1425 default: 1426 return -EINVAL; 1427 } 1428 1429 val |= rd32(hw, GLGEN_RTRIG); 1430 wr32(hw, GLGEN_RTRIG, val); 1431 ice_flush(hw); 1432 1433 /* wait for the FW to be ready */ 1434 return ice_check_reset(hw); 1435 } 1436 1437 /** 1438 * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers 1439 * @hw: pointer to the hardware structure 1440 * @rxq_ctx: pointer to the packed Rx queue context 1441 * @rxq_index: the index of the Rx queue 1442 */ 1443 static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, 1444 const ice_rxq_ctx_buf_t *rxq_ctx, 1445 u32 rxq_index) 1446 { 1447 /* Copy each dword separately to HW */ 1448 for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1449 u32 ctx = ((const u32 *)rxq_ctx)[i]; 1450 1451 wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); 1452 1453 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); 1454 } 1455 } 1456 1457 #define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ 1458 PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) 1459 1460 /* LAN Rx Queue Context */ 1461 static const struct packed_field_u8 ice_rlan_ctx_fields[] = { 1462 /* Field Width LSB */ 1463 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1464 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1465 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1466 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1467 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1468 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1469 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1470 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1471 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1472 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1473 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1474 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1475 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1476 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1477 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1478 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1479 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1480 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1481 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1482 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1483 }; 1484 1485 /** 1486 * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer 1487 * @ctx: the Rx queue context to pack 1488 * @buf: the HW buffer to pack into 1489 * 1490 * Pack the Rx queue context from the CPU-friendly unpacked buffer into its 1491 * bit-packed HW layout. 1492 */ 1493 static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, 1494 ice_rxq_ctx_buf_t *buf) 1495 { 1496 pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, 1497 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1498 } 1499 1500 /** 1501 * ice_write_rxq_ctx - Write Rx Queue context to hardware 1502 * @hw: pointer to the hardware structure 1503 * @rlan_ctx: pointer to the unpacked Rx queue context 1504 * @rxq_index: the index of the Rx queue 1505 * 1506 * Pack the sparse Rx Queue context into dense hardware format and write it 1507 * into the HW register space. 1508 * 1509 * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. 1510 */ 1511 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1512 u32 rxq_index) 1513 { 1514 ice_rxq_ctx_buf_t buf = {}; 1515 1516 if (rxq_index > QRX_CTRL_MAX_INDEX) 1517 return -EINVAL; 1518 1519 ice_pack_rxq_ctx(rlan_ctx, &buf); 1520 ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); 1521 1522 return 0; 1523 } 1524 1525 /* LAN Tx Queue Context */ 1526 static const struct packed_field_u8 ice_tlan_ctx_fields[] = { 1527 /* Field Width LSB */ 1528 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1529 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1530 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1531 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1532 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1533 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1534 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1535 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1536 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1537 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1538 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1539 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1540 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1541 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1542 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1543 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1544 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1545 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1546 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1547 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1548 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1549 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1550 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1551 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1552 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1553 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1554 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1555 }; 1556 1557 /** 1558 * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer 1559 * @ctx: the Tx queue context to pack 1560 * @buf: the HW buffer to pack into 1561 * 1562 * Pack the Tx queue context from the CPU-friendly unpacked buffer into its 1563 * bit-packed HW layout. 1564 */ 1565 void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) 1566 { 1567 pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1568 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1569 } 1570 1571 /* Sideband Queue command wrappers */ 1572 1573 /** 1574 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1575 * @hw: pointer to the HW struct 1576 * @desc: descriptor describing the command 1577 * @buf: buffer to use for indirect commands (NULL for direct commands) 1578 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1579 * @cd: pointer to command details structure 1580 */ 1581 static int 1582 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1583 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1584 { 1585 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1586 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1587 } 1588 1589 /** 1590 * ice_sbq_rw_reg - Fill Sideband Queue command 1591 * @hw: pointer to the HW struct 1592 * @in: message info to be filled in descriptor 1593 * @flags: control queue descriptor flags 1594 */ 1595 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) 1596 { 1597 struct ice_sbq_cmd_desc desc = {0}; 1598 struct ice_sbq_msg_req msg = {0}; 1599 u16 msg_len; 1600 int status; 1601 1602 msg_len = sizeof(msg); 1603 1604 msg.dest_dev = in->dest_dev; 1605 msg.opcode = in->opcode; 1606 msg.flags = ICE_SBQ_MSG_FLAGS; 1607 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1608 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1609 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1610 1611 if (in->opcode) 1612 msg.data = cpu_to_le32(in->data); 1613 else 1614 /* data read comes back in completion, so shorten the struct by 1615 * sizeof(msg.data) 1616 */ 1617 msg_len -= sizeof(msg.data); 1618 1619 desc.flags = cpu_to_le16(flags); 1620 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1621 desc.param0.cmd_len = cpu_to_le16(msg_len); 1622 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1623 if (!status && !in->opcode) 1624 in->data = le32_to_cpu 1625 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1626 return status; 1627 } 1628 1629 /* FW Admin Queue command wrappers */ 1630 1631 /* Software lock/mutex that is meant to be held while the Global Config Lock 1632 * in firmware is acquired by the software to prevent most (but not all) types 1633 * of AQ commands from being sent to FW 1634 */ 1635 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1636 1637 /** 1638 * ice_should_retry_sq_send_cmd 1639 * @opcode: AQ opcode 1640 * 1641 * Decide if we should retry the send command routine for the ATQ, depending 1642 * on the opcode. 1643 */ 1644 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1645 { 1646 switch (opcode) { 1647 case ice_aqc_opc_get_link_topo: 1648 case ice_aqc_opc_lldp_stop: 1649 case ice_aqc_opc_lldp_start: 1650 case ice_aqc_opc_lldp_filter_ctrl: 1651 return true; 1652 } 1653 1654 return false; 1655 } 1656 1657 /** 1658 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1659 * @hw: pointer to the HW struct 1660 * @cq: pointer to the specific Control queue 1661 * @desc: prefilled descriptor describing the command 1662 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1663 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1664 * @cd: pointer to command details structure 1665 * 1666 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1667 * Queue if the EBUSY AQ error is returned. 1668 */ 1669 static int 1670 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1671 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1672 struct ice_sq_cd *cd) 1673 { 1674 struct ice_aq_desc desc_cpy; 1675 bool is_cmd_for_retry; 1676 u8 idx = 0; 1677 u16 opcode; 1678 int status; 1679 1680 opcode = le16_to_cpu(desc->opcode); 1681 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1682 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1683 1684 if (is_cmd_for_retry) { 1685 /* All retryable cmds are direct, without buf. */ 1686 WARN_ON(buf); 1687 1688 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1689 } 1690 1691 do { 1692 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1693 1694 if (!is_cmd_for_retry || !status || 1695 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1696 break; 1697 1698 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1699 1700 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1701 1702 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1703 1704 return status; 1705 } 1706 1707 /** 1708 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1709 * @hw: pointer to the HW struct 1710 * @desc: descriptor describing the command 1711 * @buf: buffer to use for indirect commands (NULL for direct commands) 1712 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1713 * @cd: pointer to command details structure 1714 * 1715 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1716 */ 1717 int 1718 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1719 u16 buf_size, struct ice_sq_cd *cd) 1720 { 1721 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1722 bool lock_acquired = false; 1723 int status; 1724 1725 /* When a package download is in process (i.e. when the firmware's 1726 * Global Configuration Lock resource is held), only the Download 1727 * Package, Get Version, Get Package Info List, Upload Section, 1728 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1729 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1730 * Recipes to Profile Association, and Release Resource (with resource 1731 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1732 * must block until the package download completes and the Global Config 1733 * Lock is released. See also ice_acquire_global_cfg_lock(). 1734 */ 1735 switch (le16_to_cpu(desc->opcode)) { 1736 case ice_aqc_opc_download_pkg: 1737 case ice_aqc_opc_get_pkg_info_list: 1738 case ice_aqc_opc_get_ver: 1739 case ice_aqc_opc_upload_section: 1740 case ice_aqc_opc_update_pkg: 1741 case ice_aqc_opc_set_port_params: 1742 case ice_aqc_opc_get_vlan_mode_parameters: 1743 case ice_aqc_opc_set_vlan_mode_parameters: 1744 case ice_aqc_opc_set_tx_topo: 1745 case ice_aqc_opc_get_tx_topo: 1746 case ice_aqc_opc_add_recipe: 1747 case ice_aqc_opc_recipe_to_profile: 1748 case ice_aqc_opc_get_recipe: 1749 case ice_aqc_opc_get_recipe_to_profile: 1750 break; 1751 case ice_aqc_opc_release_res: 1752 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1753 break; 1754 fallthrough; 1755 default: 1756 mutex_lock(&ice_global_cfg_lock_sw); 1757 lock_acquired = true; 1758 break; 1759 } 1760 1761 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1762 if (lock_acquired) 1763 mutex_unlock(&ice_global_cfg_lock_sw); 1764 1765 return status; 1766 } 1767 1768 /** 1769 * ice_aq_get_fw_ver 1770 * @hw: pointer to the HW struct 1771 * @cd: pointer to command details structure or NULL 1772 * 1773 * Get the firmware version (0x0001) from the admin queue commands 1774 */ 1775 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1776 { 1777 struct ice_aqc_get_ver *resp; 1778 struct ice_aq_desc desc; 1779 int status; 1780 1781 resp = &desc.params.get_ver; 1782 1783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1784 1785 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1786 1787 if (!status) { 1788 hw->fw_branch = resp->fw_branch; 1789 hw->fw_maj_ver = resp->fw_major; 1790 hw->fw_min_ver = resp->fw_minor; 1791 hw->fw_patch = resp->fw_patch; 1792 hw->fw_build = le32_to_cpu(resp->fw_build); 1793 hw->api_branch = resp->api_branch; 1794 hw->api_maj_ver = resp->api_major; 1795 hw->api_min_ver = resp->api_minor; 1796 hw->api_patch = resp->api_patch; 1797 } 1798 1799 return status; 1800 } 1801 1802 /** 1803 * ice_aq_send_driver_ver 1804 * @hw: pointer to the HW struct 1805 * @dv: driver's major, minor version 1806 * @cd: pointer to command details structure or NULL 1807 * 1808 * Send the driver version (0x0002) to the firmware 1809 */ 1810 int 1811 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1812 struct ice_sq_cd *cd) 1813 { 1814 struct ice_aqc_driver_ver *cmd; 1815 struct ice_aq_desc desc; 1816 u16 len; 1817 1818 cmd = &desc.params.driver_ver; 1819 1820 if (!dv) 1821 return -EINVAL; 1822 1823 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1824 1825 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1826 cmd->major_ver = dv->major_ver; 1827 cmd->minor_ver = dv->minor_ver; 1828 cmd->build_ver = dv->build_ver; 1829 cmd->subbuild_ver = dv->subbuild_ver; 1830 1831 len = 0; 1832 while (len < sizeof(dv->driver_string) && 1833 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1834 len++; 1835 1836 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1837 } 1838 1839 /** 1840 * ice_aq_q_shutdown 1841 * @hw: pointer to the HW struct 1842 * @unloading: is the driver unloading itself 1843 * 1844 * Tell the Firmware that we're shutting down the AdminQ and whether 1845 * or not the driver is unloading as well (0x0003). 1846 */ 1847 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1848 { 1849 struct ice_aqc_q_shutdown *cmd; 1850 struct ice_aq_desc desc; 1851 1852 cmd = &desc.params.q_shutdown; 1853 1854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1855 1856 if (unloading) 1857 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1858 1859 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1860 } 1861 1862 /** 1863 * ice_aq_req_res 1864 * @hw: pointer to the HW struct 1865 * @res: resource ID 1866 * @access: access type 1867 * @sdp_number: resource number 1868 * @timeout: the maximum time in ms that the driver may hold the resource 1869 * @cd: pointer to command details structure or NULL 1870 * 1871 * Requests common resource using the admin queue commands (0x0008). 1872 * When attempting to acquire the Global Config Lock, the driver can 1873 * learn of three states: 1874 * 1) 0 - acquired lock, and can perform download package 1875 * 2) -EIO - did not get lock, driver should fail to load 1876 * 3) -EALREADY - did not get lock, but another driver has 1877 * successfully downloaded the package; the driver does 1878 * not have to download the package and can continue 1879 * loading 1880 * 1881 * Note that if the caller is in an acquire lock, perform action, release lock 1882 * phase of operation, it is possible that the FW may detect a timeout and issue 1883 * a CORER. In this case, the driver will receive a CORER interrupt and will 1884 * have to determine its cause. The calling thread that is handling this flow 1885 * will likely get an error propagated back to it indicating the Download 1886 * Package, Update Package or the Release Resource AQ commands timed out. 1887 */ 1888 static int 1889 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1890 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1891 struct ice_sq_cd *cd) 1892 { 1893 struct ice_aqc_req_res *cmd_resp; 1894 struct ice_aq_desc desc; 1895 int status; 1896 1897 cmd_resp = &desc.params.res_owner; 1898 1899 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1900 1901 cmd_resp->res_id = cpu_to_le16(res); 1902 cmd_resp->access_type = cpu_to_le16(access); 1903 cmd_resp->res_number = cpu_to_le32(sdp_number); 1904 cmd_resp->timeout = cpu_to_le32(*timeout); 1905 *timeout = 0; 1906 1907 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1908 1909 /* The completion specifies the maximum time in ms that the driver 1910 * may hold the resource in the Timeout field. 1911 */ 1912 1913 /* Global config lock response utilizes an additional status field. 1914 * 1915 * If the Global config lock resource is held by some other driver, the 1916 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1917 * and the timeout field indicates the maximum time the current owner 1918 * of the resource has to free it. 1919 */ 1920 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1921 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1922 *timeout = le32_to_cpu(cmd_resp->timeout); 1923 return 0; 1924 } else if (le16_to_cpu(cmd_resp->status) == 1925 ICE_AQ_RES_GLBL_IN_PROG) { 1926 *timeout = le32_to_cpu(cmd_resp->timeout); 1927 return -EIO; 1928 } else if (le16_to_cpu(cmd_resp->status) == 1929 ICE_AQ_RES_GLBL_DONE) { 1930 return -EALREADY; 1931 } 1932 1933 /* invalid FW response, force a timeout immediately */ 1934 *timeout = 0; 1935 return -EIO; 1936 } 1937 1938 /* If the resource is held by some other driver, the command completes 1939 * with a busy return value and the timeout field indicates the maximum 1940 * time the current owner of the resource has to free it. 1941 */ 1942 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1943 *timeout = le32_to_cpu(cmd_resp->timeout); 1944 1945 return status; 1946 } 1947 1948 /** 1949 * ice_aq_release_res 1950 * @hw: pointer to the HW struct 1951 * @res: resource ID 1952 * @sdp_number: resource number 1953 * @cd: pointer to command details structure or NULL 1954 * 1955 * release common resource using the admin queue commands (0x0009) 1956 */ 1957 static int 1958 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1959 struct ice_sq_cd *cd) 1960 { 1961 struct ice_aqc_req_res *cmd; 1962 struct ice_aq_desc desc; 1963 1964 cmd = &desc.params.res_owner; 1965 1966 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1967 1968 cmd->res_id = cpu_to_le16(res); 1969 cmd->res_number = cpu_to_le32(sdp_number); 1970 1971 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1972 } 1973 1974 /** 1975 * ice_acquire_res 1976 * @hw: pointer to the HW structure 1977 * @res: resource ID 1978 * @access: access type (read or write) 1979 * @timeout: timeout in milliseconds 1980 * 1981 * This function will attempt to acquire the ownership of a resource. 1982 */ 1983 int 1984 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1985 enum ice_aq_res_access_type access, u32 timeout) 1986 { 1987 #define ICE_RES_POLLING_DELAY_MS 10 1988 u32 delay = ICE_RES_POLLING_DELAY_MS; 1989 u32 time_left = timeout; 1990 int status; 1991 1992 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1993 1994 /* A return code of -EALREADY means that another driver has 1995 * previously acquired the resource and performed any necessary updates; 1996 * in this case the caller does not obtain the resource and has no 1997 * further work to do. 1998 */ 1999 if (status == -EALREADY) 2000 goto ice_acquire_res_exit; 2001 2002 if (status) 2003 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2004 2005 /* If necessary, poll until the current lock owner timeouts */ 2006 timeout = time_left; 2007 while (status && timeout && time_left) { 2008 mdelay(delay); 2009 timeout = (timeout > delay) ? timeout - delay : 0; 2010 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2011 2012 if (status == -EALREADY) 2013 /* lock free, but no work to do */ 2014 break; 2015 2016 if (!status) 2017 /* lock acquired */ 2018 break; 2019 } 2020 if (status && status != -EALREADY) 2021 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2022 2023 ice_acquire_res_exit: 2024 if (status == -EALREADY) { 2025 if (access == ICE_RES_WRITE) 2026 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2027 else 2028 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 2029 } 2030 return status; 2031 } 2032 2033 /** 2034 * ice_release_res 2035 * @hw: pointer to the HW structure 2036 * @res: resource ID 2037 * 2038 * This function will release a resource using the proper Admin Command. 2039 */ 2040 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2041 { 2042 unsigned long timeout; 2043 int status; 2044 2045 /* there are some rare cases when trying to release the resource 2046 * results in an admin queue timeout, so handle them correctly 2047 */ 2048 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 2049 do { 2050 status = ice_aq_release_res(hw, res, 0, NULL); 2051 if (status != -EIO) 2052 break; 2053 usleep_range(1000, 2000); 2054 } while (time_before(jiffies, timeout)); 2055 } 2056 2057 /** 2058 * ice_aq_alloc_free_res - command to allocate/free resources 2059 * @hw: pointer to the HW struct 2060 * @buf: Indirect buffer to hold data parameters and response 2061 * @buf_size: size of buffer for indirect commands 2062 * @opc: pass in the command opcode 2063 * 2064 * Helper function to allocate/free resources using the admin queue commands 2065 */ 2066 int ice_aq_alloc_free_res(struct ice_hw *hw, 2067 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2068 enum ice_adminq_opc opc) 2069 { 2070 struct ice_aqc_alloc_free_res_cmd *cmd; 2071 struct ice_aq_desc desc; 2072 2073 cmd = &desc.params.sw_res_ctrl; 2074 2075 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2076 return -EINVAL; 2077 2078 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2079 2080 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2081 2082 cmd->num_entries = cpu_to_le16(1); 2083 2084 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2085 } 2086 2087 /** 2088 * ice_alloc_hw_res - allocate resource 2089 * @hw: pointer to the HW struct 2090 * @type: type of resource 2091 * @num: number of resources to allocate 2092 * @btm: allocate from bottom 2093 * @res: pointer to array that will receive the resources 2094 */ 2095 int 2096 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2097 { 2098 struct ice_aqc_alloc_free_res_elem *buf; 2099 u16 buf_len; 2100 int status; 2101 2102 buf_len = struct_size(buf, elem, num); 2103 buf = kzalloc(buf_len, GFP_KERNEL); 2104 if (!buf) 2105 return -ENOMEM; 2106 2107 /* Prepare buffer to allocate resource. */ 2108 buf->num_elems = cpu_to_le16(num); 2109 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2110 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2111 if (btm) 2112 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2113 2114 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2115 if (status) 2116 goto ice_alloc_res_exit; 2117 2118 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2119 2120 ice_alloc_res_exit: 2121 kfree(buf); 2122 return status; 2123 } 2124 2125 /** 2126 * ice_free_hw_res - free allocated HW resource 2127 * @hw: pointer to the HW struct 2128 * @type: type of resource to free 2129 * @num: number of resources 2130 * @res: pointer to array that contains the resources to free 2131 */ 2132 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2133 { 2134 struct ice_aqc_alloc_free_res_elem *buf; 2135 u16 buf_len; 2136 int status; 2137 2138 buf_len = struct_size(buf, elem, num); 2139 buf = kzalloc(buf_len, GFP_KERNEL); 2140 if (!buf) 2141 return -ENOMEM; 2142 2143 /* Prepare buffer to free resource. */ 2144 buf->num_elems = cpu_to_le16(num); 2145 buf->res_type = cpu_to_le16(type); 2146 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2147 2148 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2149 if (status) 2150 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2151 2152 kfree(buf); 2153 return status; 2154 } 2155 2156 /** 2157 * ice_get_num_per_func - determine number of resources per PF 2158 * @hw: pointer to the HW structure 2159 * @max: value to be evenly split between each PF 2160 * 2161 * Determine the number of valid functions by going through the bitmap returned 2162 * from parsing capabilities and use this to calculate the number of resources 2163 * per PF based on the max value passed in. 2164 */ 2165 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2166 { 2167 u8 funcs; 2168 2169 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2170 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2171 ICE_CAPS_VALID_FUNCS_M); 2172 2173 if (!funcs) 2174 return 0; 2175 2176 return max / funcs; 2177 } 2178 2179 /** 2180 * ice_parse_common_caps - parse common device/function capabilities 2181 * @hw: pointer to the HW struct 2182 * @caps: pointer to common capabilities structure 2183 * @elem: the capability element to parse 2184 * @prefix: message prefix for tracing capabilities 2185 * 2186 * Given a capability element, extract relevant details into the common 2187 * capability structure. 2188 * 2189 * Returns: true if the capability matches one of the common capability ids, 2190 * false otherwise. 2191 */ 2192 static bool 2193 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2194 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2195 { 2196 u32 logical_id = le32_to_cpu(elem->logical_id); 2197 u32 phys_id = le32_to_cpu(elem->phys_id); 2198 u32 number = le32_to_cpu(elem->number); 2199 u16 cap = le16_to_cpu(elem->cap); 2200 bool found = true; 2201 2202 switch (cap) { 2203 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2204 caps->valid_functions = number; 2205 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2206 caps->valid_functions); 2207 break; 2208 case ICE_AQC_CAPS_SRIOV: 2209 caps->sr_iov_1_1 = (number == 1); 2210 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2211 caps->sr_iov_1_1); 2212 break; 2213 case ICE_AQC_CAPS_DCB: 2214 caps->dcb = (number == 1); 2215 caps->active_tc_bitmap = logical_id; 2216 caps->maxtc = phys_id; 2217 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2218 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2219 caps->active_tc_bitmap); 2220 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2221 break; 2222 case ICE_AQC_CAPS_RSS: 2223 caps->rss_table_size = number; 2224 caps->rss_table_entry_width = logical_id; 2225 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2226 caps->rss_table_size); 2227 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2228 caps->rss_table_entry_width); 2229 break; 2230 case ICE_AQC_CAPS_RXQS: 2231 caps->num_rxq = number; 2232 caps->rxq_first_id = phys_id; 2233 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2234 caps->num_rxq); 2235 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2236 caps->rxq_first_id); 2237 break; 2238 case ICE_AQC_CAPS_TXQS: 2239 caps->num_txq = number; 2240 caps->txq_first_id = phys_id; 2241 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2242 caps->num_txq); 2243 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2244 caps->txq_first_id); 2245 break; 2246 case ICE_AQC_CAPS_MSIX: 2247 caps->num_msix_vectors = number; 2248 caps->msix_vector_first_id = phys_id; 2249 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2250 caps->num_msix_vectors); 2251 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2252 caps->msix_vector_first_id); 2253 break; 2254 case ICE_AQC_CAPS_PENDING_NVM_VER: 2255 caps->nvm_update_pending_nvm = true; 2256 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2257 break; 2258 case ICE_AQC_CAPS_PENDING_OROM_VER: 2259 caps->nvm_update_pending_orom = true; 2260 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2261 break; 2262 case ICE_AQC_CAPS_PENDING_NET_VER: 2263 caps->nvm_update_pending_netlist = true; 2264 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2265 break; 2266 case ICE_AQC_CAPS_NVM_MGMT: 2267 caps->nvm_unified_update = 2268 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2269 true : false; 2270 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2271 caps->nvm_unified_update); 2272 break; 2273 case ICE_AQC_CAPS_RDMA: 2274 caps->rdma = (number == 1); 2275 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2276 break; 2277 case ICE_AQC_CAPS_MAX_MTU: 2278 caps->max_mtu = number; 2279 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2280 prefix, caps->max_mtu); 2281 break; 2282 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2283 caps->pcie_reset_avoidance = (number > 0); 2284 ice_debug(hw, ICE_DBG_INIT, 2285 "%s: pcie_reset_avoidance = %d\n", prefix, 2286 caps->pcie_reset_avoidance); 2287 break; 2288 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2289 caps->reset_restrict_support = (number == 1); 2290 ice_debug(hw, ICE_DBG_INIT, 2291 "%s: reset_restrict_support = %d\n", prefix, 2292 caps->reset_restrict_support); 2293 break; 2294 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2295 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2296 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2297 prefix, caps->roce_lag); 2298 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2299 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2300 prefix, caps->sriov_lag); 2301 break; 2302 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2303 caps->tx_sched_topo_comp_mode_en = (number == 1); 2304 break; 2305 default: 2306 /* Not one of the recognized common capabilities */ 2307 found = false; 2308 } 2309 2310 return found; 2311 } 2312 2313 /** 2314 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2315 * @hw: pointer to the HW structure 2316 * @caps: pointer to capabilities structure to fix 2317 * 2318 * Re-calculate the capabilities that are dependent on the number of physical 2319 * ports; i.e. some features are not supported or function differently on 2320 * devices with more than 4 ports. 2321 */ 2322 static void 2323 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2324 { 2325 /* This assumes device capabilities are always scanned before function 2326 * capabilities during the initialization flow. 2327 */ 2328 if (hw->dev_caps.num_funcs > 4) { 2329 /* Max 4 TCs per port */ 2330 caps->maxtc = 4; 2331 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2332 caps->maxtc); 2333 if (caps->rdma) { 2334 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2335 caps->rdma = 0; 2336 } 2337 2338 /* print message only when processing device capabilities 2339 * during initialization. 2340 */ 2341 if (caps == &hw->dev_caps.common_cap) 2342 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2343 } 2344 } 2345 2346 /** 2347 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2348 * @hw: pointer to the HW struct 2349 * @func_p: pointer to function capabilities structure 2350 * @cap: pointer to the capability element to parse 2351 * 2352 * Extract function capabilities for ICE_AQC_CAPS_VF. 2353 */ 2354 static void 2355 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2356 struct ice_aqc_list_caps_elem *cap) 2357 { 2358 u32 logical_id = le32_to_cpu(cap->logical_id); 2359 u32 number = le32_to_cpu(cap->number); 2360 2361 func_p->num_allocd_vfs = number; 2362 func_p->vf_base_id = logical_id; 2363 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2364 func_p->num_allocd_vfs); 2365 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2366 func_p->vf_base_id); 2367 } 2368 2369 /** 2370 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2371 * @hw: pointer to the HW struct 2372 * @func_p: pointer to function capabilities structure 2373 * @cap: pointer to the capability element to parse 2374 * 2375 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2376 */ 2377 static void 2378 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2379 struct ice_aqc_list_caps_elem *cap) 2380 { 2381 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2382 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2383 le32_to_cpu(cap->number)); 2384 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2385 func_p->guar_num_vsi); 2386 } 2387 2388 /** 2389 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2390 * @hw: pointer to the HW struct 2391 * @func_p: pointer to function capabilities structure 2392 * @cap: pointer to the capability element to parse 2393 * 2394 * Extract function capabilities for ICE_AQC_CAPS_1588. 2395 */ 2396 static void 2397 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2398 struct ice_aqc_list_caps_elem *cap) 2399 { 2400 struct ice_ts_func_info *info = &func_p->ts_func_info; 2401 u32 number = le32_to_cpu(cap->number); 2402 2403 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2404 func_p->common_cap.ieee_1588 = info->ena; 2405 2406 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2407 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2408 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2409 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2410 2411 if (!ice_is_e825c(hw)) { 2412 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2413 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2414 } else { 2415 info->clk_freq = ICE_TIME_REF_FREQ_156_250; 2416 info->clk_src = ICE_CLK_SRC_TCXO; 2417 } 2418 2419 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2420 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2421 } else { 2422 /* Unknown clock frequency, so assume a (probably incorrect) 2423 * default to avoid out-of-bounds look ups of frequency 2424 * related information. 2425 */ 2426 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2427 info->clk_freq); 2428 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2429 } 2430 2431 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2432 func_p->common_cap.ieee_1588); 2433 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2434 info->src_tmr_owned); 2435 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2436 info->tmr_ena); 2437 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2438 info->tmr_index_owned); 2439 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2440 info->tmr_index_assoc); 2441 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2442 info->clk_freq); 2443 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2444 info->clk_src); 2445 } 2446 2447 /** 2448 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2449 * @hw: pointer to the HW struct 2450 * @func_p: pointer to function capabilities structure 2451 * 2452 * Extract function capabilities for ICE_AQC_CAPS_FD. 2453 */ 2454 static void 2455 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2456 { 2457 u32 reg_val, gsize, bsize; 2458 2459 reg_val = rd32(hw, GLQF_FD_SIZE); 2460 switch (hw->mac_type) { 2461 case ICE_MAC_E830: 2462 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2463 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2464 break; 2465 case ICE_MAC_E810: 2466 default: 2467 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2468 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2469 } 2470 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2471 func_p->fd_fltr_best_effort = bsize; 2472 2473 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2474 func_p->fd_fltr_guar); 2475 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2476 func_p->fd_fltr_best_effort); 2477 } 2478 2479 /** 2480 * ice_parse_func_caps - Parse function capabilities 2481 * @hw: pointer to the HW struct 2482 * @func_p: pointer to function capabilities structure 2483 * @buf: buffer containing the function capability records 2484 * @cap_count: the number of capabilities 2485 * 2486 * Helper function to parse function (0x000A) capabilities list. For 2487 * capabilities shared between device and function, this relies on 2488 * ice_parse_common_caps. 2489 * 2490 * Loop through the list of provided capabilities and extract the relevant 2491 * data into the function capabilities structured. 2492 */ 2493 static void 2494 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2495 void *buf, u32 cap_count) 2496 { 2497 struct ice_aqc_list_caps_elem *cap_resp; 2498 u32 i; 2499 2500 cap_resp = buf; 2501 2502 memset(func_p, 0, sizeof(*func_p)); 2503 2504 for (i = 0; i < cap_count; i++) { 2505 u16 cap = le16_to_cpu(cap_resp[i].cap); 2506 bool found; 2507 2508 found = ice_parse_common_caps(hw, &func_p->common_cap, 2509 &cap_resp[i], "func caps"); 2510 2511 switch (cap) { 2512 case ICE_AQC_CAPS_VF: 2513 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2514 break; 2515 case ICE_AQC_CAPS_VSI: 2516 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2517 break; 2518 case ICE_AQC_CAPS_1588: 2519 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2520 break; 2521 case ICE_AQC_CAPS_FD: 2522 ice_parse_fdir_func_caps(hw, func_p); 2523 break; 2524 default: 2525 /* Don't list common capabilities as unknown */ 2526 if (!found) 2527 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2528 i, cap); 2529 break; 2530 } 2531 } 2532 2533 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2534 } 2535 2536 /** 2537 * ice_func_id_to_logical_id - map from function id to logical pf id 2538 * @active_function_bitmap: active function bitmap 2539 * @pf_id: function number of device 2540 * 2541 * Return: logical PF ID. 2542 */ 2543 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) 2544 { 2545 u8 logical_id = 0; 2546 u8 i; 2547 2548 for (i = 0; i < pf_id; i++) 2549 if (active_function_bitmap & BIT(i)) 2550 logical_id++; 2551 2552 return logical_id; 2553 } 2554 2555 /** 2556 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2557 * @hw: pointer to the HW struct 2558 * @dev_p: pointer to device capabilities structure 2559 * @cap: capability element to parse 2560 * 2561 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2562 */ 2563 static void 2564 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2565 struct ice_aqc_list_caps_elem *cap) 2566 { 2567 u32 number = le32_to_cpu(cap->number); 2568 2569 dev_p->num_funcs = hweight32(number); 2570 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2571 dev_p->num_funcs); 2572 2573 hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); 2574 } 2575 2576 /** 2577 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2578 * @hw: pointer to the HW struct 2579 * @dev_p: pointer to device capabilities structure 2580 * @cap: capability element to parse 2581 * 2582 * Parse ICE_AQC_CAPS_VF for device capabilities. 2583 */ 2584 static void 2585 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2586 struct ice_aqc_list_caps_elem *cap) 2587 { 2588 u32 number = le32_to_cpu(cap->number); 2589 2590 dev_p->num_vfs_exposed = number; 2591 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2592 dev_p->num_vfs_exposed); 2593 } 2594 2595 /** 2596 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2597 * @hw: pointer to the HW struct 2598 * @dev_p: pointer to device capabilities structure 2599 * @cap: capability element to parse 2600 * 2601 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2602 */ 2603 static void 2604 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2605 struct ice_aqc_list_caps_elem *cap) 2606 { 2607 u32 number = le32_to_cpu(cap->number); 2608 2609 dev_p->num_vsi_allocd_to_host = number; 2610 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2611 dev_p->num_vsi_allocd_to_host); 2612 } 2613 2614 /** 2615 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2616 * @hw: pointer to the HW struct 2617 * @dev_p: pointer to device capabilities structure 2618 * @cap: capability element to parse 2619 * 2620 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2621 */ 2622 static void 2623 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2624 struct ice_aqc_list_caps_elem *cap) 2625 { 2626 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2627 u32 logical_id = le32_to_cpu(cap->logical_id); 2628 u32 phys_id = le32_to_cpu(cap->phys_id); 2629 u32 number = le32_to_cpu(cap->number); 2630 2631 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2632 dev_p->common_cap.ieee_1588 = info->ena; 2633 2634 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2635 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2636 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2637 2638 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2639 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2640 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2641 2642 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2643 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2644 2645 info->ena_ports = logical_id; 2646 info->tmr_own_map = phys_id; 2647 2648 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2649 dev_p->common_cap.ieee_1588); 2650 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2651 info->tmr0_owner); 2652 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2653 info->tmr0_owned); 2654 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2655 info->tmr0_ena); 2656 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2657 info->tmr1_owner); 2658 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2659 info->tmr1_owned); 2660 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2661 info->tmr1_ena); 2662 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2663 info->ts_ll_read); 2664 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2665 info->ts_ll_int_read); 2666 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2667 info->ena_ports); 2668 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2669 info->tmr_own_map); 2670 } 2671 2672 /** 2673 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2674 * @hw: pointer to the HW struct 2675 * @dev_p: pointer to device capabilities structure 2676 * @cap: capability element to parse 2677 * 2678 * Parse ICE_AQC_CAPS_FD for device capabilities. 2679 */ 2680 static void 2681 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2682 struct ice_aqc_list_caps_elem *cap) 2683 { 2684 u32 number = le32_to_cpu(cap->number); 2685 2686 dev_p->num_flow_director_fltr = number; 2687 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2688 dev_p->num_flow_director_fltr); 2689 } 2690 2691 /** 2692 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2693 * @hw: pointer to the HW struct 2694 * @dev_p: pointer to device capabilities structure 2695 * @cap: capability element to parse 2696 * 2697 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2698 * enabled sensors. 2699 */ 2700 static void 2701 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2702 struct ice_aqc_list_caps_elem *cap) 2703 { 2704 dev_p->supported_sensors = le32_to_cpu(cap->number); 2705 2706 ice_debug(hw, ICE_DBG_INIT, 2707 "dev caps: supported sensors (bitmap) = 0x%x\n", 2708 dev_p->supported_sensors); 2709 } 2710 2711 /** 2712 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2713 * @hw: pointer to the HW struct 2714 * @dev_p: pointer to device capabilities structure 2715 * @cap: capability element to parse 2716 * 2717 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2718 */ 2719 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, 2720 struct ice_hw_dev_caps *dev_p, 2721 struct ice_aqc_list_caps_elem *cap) 2722 { 2723 dev_p->nac_topo.mode = le32_to_cpu(cap->number); 2724 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2725 2726 dev_info(ice_hw_to_dev(hw), 2727 "PF is configured in %s mode with IP instance ID %d\n", 2728 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? 2729 "primary" : "secondary", dev_p->nac_topo.id); 2730 2731 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2732 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2733 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2734 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2735 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2736 dev_p->nac_topo.id); 2737 } 2738 2739 /** 2740 * ice_parse_dev_caps - Parse device capabilities 2741 * @hw: pointer to the HW struct 2742 * @dev_p: pointer to device capabilities structure 2743 * @buf: buffer containing the device capability records 2744 * @cap_count: the number of capabilities 2745 * 2746 * Helper device to parse device (0x000B) capabilities list. For 2747 * capabilities shared between device and function, this relies on 2748 * ice_parse_common_caps. 2749 * 2750 * Loop through the list of provided capabilities and extract the relevant 2751 * data into the device capabilities structured. 2752 */ 2753 static void 2754 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2755 void *buf, u32 cap_count) 2756 { 2757 struct ice_aqc_list_caps_elem *cap_resp; 2758 u32 i; 2759 2760 cap_resp = buf; 2761 2762 memset(dev_p, 0, sizeof(*dev_p)); 2763 2764 for (i = 0; i < cap_count; i++) { 2765 u16 cap = le16_to_cpu(cap_resp[i].cap); 2766 bool found; 2767 2768 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2769 &cap_resp[i], "dev caps"); 2770 2771 switch (cap) { 2772 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2773 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2774 break; 2775 case ICE_AQC_CAPS_VF: 2776 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2777 break; 2778 case ICE_AQC_CAPS_VSI: 2779 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2780 break; 2781 case ICE_AQC_CAPS_1588: 2782 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2783 break; 2784 case ICE_AQC_CAPS_FD: 2785 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2786 break; 2787 case ICE_AQC_CAPS_SENSOR_READING: 2788 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2789 break; 2790 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2791 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2792 break; 2793 default: 2794 /* Don't list common capabilities as unknown */ 2795 if (!found) 2796 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2797 i, cap); 2798 break; 2799 } 2800 } 2801 2802 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2803 } 2804 2805 /** 2806 * ice_is_phy_rclk_in_netlist 2807 * @hw: pointer to the hw struct 2808 * 2809 * Check if the PHY Recovered Clock device is present in the netlist 2810 */ 2811 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2812 { 2813 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2814 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2815 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2816 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2817 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2818 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2819 return false; 2820 2821 return true; 2822 } 2823 2824 /** 2825 * ice_is_clock_mux_in_netlist 2826 * @hw: pointer to the hw struct 2827 * 2828 * Check if the Clock Multiplexer device is present in the netlist 2829 */ 2830 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2831 { 2832 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2833 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2834 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2835 NULL)) 2836 return false; 2837 2838 return true; 2839 } 2840 2841 /** 2842 * ice_is_cgu_in_netlist - check for CGU presence 2843 * @hw: pointer to the hw struct 2844 * 2845 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2846 * Save the CGU part number in the hw structure for later use. 2847 * Return: 2848 * * true - cgu is present 2849 * * false - cgu is not present 2850 */ 2851 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2852 { 2853 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2854 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2855 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2856 NULL)) { 2857 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2858 return true; 2859 } else if (!ice_find_netlist_node(hw, 2860 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2861 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2862 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2863 NULL)) { 2864 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2865 return true; 2866 } 2867 2868 return false; 2869 } 2870 2871 /** 2872 * ice_is_gps_in_netlist 2873 * @hw: pointer to the hw struct 2874 * 2875 * Check if the GPS generic device is present in the netlist 2876 */ 2877 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2878 { 2879 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2880 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2881 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2882 return false; 2883 2884 return true; 2885 } 2886 2887 /** 2888 * ice_aq_list_caps - query function/device capabilities 2889 * @hw: pointer to the HW struct 2890 * @buf: a buffer to hold the capabilities 2891 * @buf_size: size of the buffer 2892 * @cap_count: if not NULL, set to the number of capabilities reported 2893 * @opc: capabilities type to discover, device or function 2894 * @cd: pointer to command details structure or NULL 2895 * 2896 * Get the function (0x000A) or device (0x000B) capabilities description from 2897 * firmware and store it in the buffer. 2898 * 2899 * If the cap_count pointer is not NULL, then it is set to the number of 2900 * capabilities firmware will report. Note that if the buffer size is too 2901 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2902 * cap_count will still be updated in this case. It is recommended that the 2903 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2904 * firmware could return) to avoid this. 2905 */ 2906 int 2907 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2908 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2909 { 2910 struct ice_aqc_list_caps *cmd; 2911 struct ice_aq_desc desc; 2912 int status; 2913 2914 cmd = &desc.params.get_cap; 2915 2916 if (opc != ice_aqc_opc_list_func_caps && 2917 opc != ice_aqc_opc_list_dev_caps) 2918 return -EINVAL; 2919 2920 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2921 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2922 2923 if (cap_count) 2924 *cap_count = le32_to_cpu(cmd->count); 2925 2926 return status; 2927 } 2928 2929 /** 2930 * ice_discover_dev_caps - Read and extract device capabilities 2931 * @hw: pointer to the hardware structure 2932 * @dev_caps: pointer to device capabilities structure 2933 * 2934 * Read the device capabilities and extract them into the dev_caps structure 2935 * for later use. 2936 */ 2937 int 2938 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2939 { 2940 u32 cap_count = 0; 2941 void *cbuf; 2942 int status; 2943 2944 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2945 if (!cbuf) 2946 return -ENOMEM; 2947 2948 /* Although the driver doesn't know the number of capabilities the 2949 * device will return, we can simply send a 4KB buffer, the maximum 2950 * possible size that firmware can return. 2951 */ 2952 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2953 2954 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2955 ice_aqc_opc_list_dev_caps, NULL); 2956 if (!status) 2957 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2958 kfree(cbuf); 2959 2960 return status; 2961 } 2962 2963 /** 2964 * ice_discover_func_caps - Read and extract function capabilities 2965 * @hw: pointer to the hardware structure 2966 * @func_caps: pointer to function capabilities structure 2967 * 2968 * Read the function capabilities and extract them into the func_caps structure 2969 * for later use. 2970 */ 2971 static int 2972 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2973 { 2974 u32 cap_count = 0; 2975 void *cbuf; 2976 int status; 2977 2978 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2979 if (!cbuf) 2980 return -ENOMEM; 2981 2982 /* Although the driver doesn't know the number of capabilities the 2983 * device will return, we can simply send a 4KB buffer, the maximum 2984 * possible size that firmware can return. 2985 */ 2986 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2987 2988 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2989 ice_aqc_opc_list_func_caps, NULL); 2990 if (!status) 2991 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2992 kfree(cbuf); 2993 2994 return status; 2995 } 2996 2997 /** 2998 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2999 * @hw: pointer to the hardware structure 3000 */ 3001 void ice_set_safe_mode_caps(struct ice_hw *hw) 3002 { 3003 struct ice_hw_func_caps *func_caps = &hw->func_caps; 3004 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 3005 struct ice_hw_common_caps cached_caps; 3006 u32 num_funcs; 3007 3008 /* cache some func_caps values that should be restored after memset */ 3009 cached_caps = func_caps->common_cap; 3010 3011 /* unset func capabilities */ 3012 memset(func_caps, 0, sizeof(*func_caps)); 3013 3014 #define ICE_RESTORE_FUNC_CAP(name) \ 3015 func_caps->common_cap.name = cached_caps.name 3016 3017 /* restore cached values */ 3018 ICE_RESTORE_FUNC_CAP(valid_functions); 3019 ICE_RESTORE_FUNC_CAP(txq_first_id); 3020 ICE_RESTORE_FUNC_CAP(rxq_first_id); 3021 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 3022 ICE_RESTORE_FUNC_CAP(max_mtu); 3023 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 3024 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 3025 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 3026 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 3027 3028 /* one Tx and one Rx queue in safe mode */ 3029 func_caps->common_cap.num_rxq = 1; 3030 func_caps->common_cap.num_txq = 1; 3031 3032 /* two MSIX vectors, one for traffic and one for misc causes */ 3033 func_caps->common_cap.num_msix_vectors = 2; 3034 func_caps->guar_num_vsi = 1; 3035 3036 /* cache some dev_caps values that should be restored after memset */ 3037 cached_caps = dev_caps->common_cap; 3038 num_funcs = dev_caps->num_funcs; 3039 3040 /* unset dev capabilities */ 3041 memset(dev_caps, 0, sizeof(*dev_caps)); 3042 3043 #define ICE_RESTORE_DEV_CAP(name) \ 3044 dev_caps->common_cap.name = cached_caps.name 3045 3046 /* restore cached values */ 3047 ICE_RESTORE_DEV_CAP(valid_functions); 3048 ICE_RESTORE_DEV_CAP(txq_first_id); 3049 ICE_RESTORE_DEV_CAP(rxq_first_id); 3050 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3051 ICE_RESTORE_DEV_CAP(max_mtu); 3052 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3053 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 3054 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 3055 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 3056 dev_caps->num_funcs = num_funcs; 3057 3058 /* one Tx and one Rx queue per function in safe mode */ 3059 dev_caps->common_cap.num_rxq = num_funcs; 3060 dev_caps->common_cap.num_txq = num_funcs; 3061 3062 /* two MSIX vectors per function */ 3063 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3064 } 3065 3066 /** 3067 * ice_get_caps - get info about the HW 3068 * @hw: pointer to the hardware structure 3069 */ 3070 int ice_get_caps(struct ice_hw *hw) 3071 { 3072 int status; 3073 3074 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3075 if (status) 3076 return status; 3077 3078 return ice_discover_func_caps(hw, &hw->func_caps); 3079 } 3080 3081 /** 3082 * ice_aq_manage_mac_write - manage MAC address write command 3083 * @hw: pointer to the HW struct 3084 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3085 * @flags: flags to control write behavior 3086 * @cd: pointer to command details structure or NULL 3087 * 3088 * This function is used to write MAC address to the NVM (0x0108). 3089 */ 3090 int 3091 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3092 struct ice_sq_cd *cd) 3093 { 3094 struct ice_aqc_manage_mac_write *cmd; 3095 struct ice_aq_desc desc; 3096 3097 cmd = &desc.params.mac_write; 3098 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3099 3100 cmd->flags = flags; 3101 ether_addr_copy(cmd->mac_addr, mac_addr); 3102 3103 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3104 } 3105 3106 /** 3107 * ice_aq_clear_pxe_mode 3108 * @hw: pointer to the HW struct 3109 * 3110 * Tell the firmware that the driver is taking over from PXE (0x0110). 3111 */ 3112 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3113 { 3114 struct ice_aq_desc desc; 3115 3116 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3117 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3118 3119 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3120 } 3121 3122 /** 3123 * ice_clear_pxe_mode - clear pxe operations mode 3124 * @hw: pointer to the HW struct 3125 * 3126 * Make sure all PXE mode settings are cleared, including things 3127 * like descriptor fetch/write-back mode. 3128 */ 3129 void ice_clear_pxe_mode(struct ice_hw *hw) 3130 { 3131 if (ice_check_sq_alive(hw, &hw->adminq)) 3132 ice_aq_clear_pxe_mode(hw); 3133 } 3134 3135 /** 3136 * ice_aq_set_port_params - set physical port parameters. 3137 * @pi: pointer to the port info struct 3138 * @double_vlan: if set double VLAN is enabled 3139 * @cd: pointer to command details structure or NULL 3140 * 3141 * Set Physical port parameters (0x0203) 3142 */ 3143 int 3144 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3145 struct ice_sq_cd *cd) 3146 3147 { 3148 struct ice_aqc_set_port_params *cmd; 3149 struct ice_hw *hw = pi->hw; 3150 struct ice_aq_desc desc; 3151 u16 cmd_flags = 0; 3152 3153 cmd = &desc.params.set_port_params; 3154 3155 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3156 if (double_vlan) 3157 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3158 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3159 3160 cmd->local_fwd_mode = pi->local_fwd_mode | 3161 ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; 3162 3163 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3164 } 3165 3166 /** 3167 * ice_is_100m_speed_supported 3168 * @hw: pointer to the HW struct 3169 * 3170 * returns true if 100M speeds are supported by the device, 3171 * false otherwise. 3172 */ 3173 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3174 { 3175 switch (hw->device_id) { 3176 case ICE_DEV_ID_E822C_SGMII: 3177 case ICE_DEV_ID_E822L_SGMII: 3178 case ICE_DEV_ID_E823L_1GBE: 3179 case ICE_DEV_ID_E823C_SGMII: 3180 return true; 3181 default: 3182 return false; 3183 } 3184 } 3185 3186 /** 3187 * ice_get_link_speed_based_on_phy_type - returns link speed 3188 * @phy_type_low: lower part of phy_type 3189 * @phy_type_high: higher part of phy_type 3190 * 3191 * This helper function will convert an entry in PHY type structure 3192 * [phy_type_low, phy_type_high] to its corresponding link speed. 3193 * Note: In the structure of [phy_type_low, phy_type_high], there should 3194 * be one bit set, as this function will convert one PHY type to its 3195 * speed. 3196 * 3197 * Return: 3198 * * PHY speed for recognized PHY type 3199 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3200 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3201 */ 3202 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3203 { 3204 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3205 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3206 3207 switch (phy_type_low) { 3208 case ICE_PHY_TYPE_LOW_100BASE_TX: 3209 case ICE_PHY_TYPE_LOW_100M_SGMII: 3210 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3211 break; 3212 case ICE_PHY_TYPE_LOW_1000BASE_T: 3213 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3214 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3215 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3216 case ICE_PHY_TYPE_LOW_1G_SGMII: 3217 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3218 break; 3219 case ICE_PHY_TYPE_LOW_2500BASE_T: 3220 case ICE_PHY_TYPE_LOW_2500BASE_X: 3221 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3222 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3223 break; 3224 case ICE_PHY_TYPE_LOW_5GBASE_T: 3225 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3226 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3227 break; 3228 case ICE_PHY_TYPE_LOW_10GBASE_T: 3229 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3230 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3231 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3232 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3233 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3234 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3235 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3236 break; 3237 case ICE_PHY_TYPE_LOW_25GBASE_T: 3238 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3239 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3240 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3241 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3242 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3243 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3244 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3245 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3246 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3247 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3248 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3249 break; 3250 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3251 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3252 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3253 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3254 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3255 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3256 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3257 break; 3258 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3259 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3260 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3261 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3262 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3263 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3264 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3265 case ICE_PHY_TYPE_LOW_50G_AUI2: 3266 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3267 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3268 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3269 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3270 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3271 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3272 case ICE_PHY_TYPE_LOW_50G_AUI1: 3273 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3274 break; 3275 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3276 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3277 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3278 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3279 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3280 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3281 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3282 case ICE_PHY_TYPE_LOW_100G_AUI4: 3283 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3284 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3285 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3286 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3287 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3288 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3289 break; 3290 default: 3291 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3292 break; 3293 } 3294 3295 switch (phy_type_high) { 3296 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3297 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3298 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3299 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3300 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3301 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3302 break; 3303 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3304 case ICE_PHY_TYPE_HIGH_200G_SR4: 3305 case ICE_PHY_TYPE_HIGH_200G_FR4: 3306 case ICE_PHY_TYPE_HIGH_200G_LR4: 3307 case ICE_PHY_TYPE_HIGH_200G_DR4: 3308 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3309 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3310 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3311 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3312 break; 3313 default: 3314 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3315 break; 3316 } 3317 3318 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3319 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3320 return ICE_AQ_LINK_SPEED_UNKNOWN; 3321 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3322 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3323 return ICE_AQ_LINK_SPEED_UNKNOWN; 3324 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3325 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3326 return speed_phy_type_low; 3327 else 3328 return speed_phy_type_high; 3329 } 3330 3331 /** 3332 * ice_update_phy_type 3333 * @phy_type_low: pointer to the lower part of phy_type 3334 * @phy_type_high: pointer to the higher part of phy_type 3335 * @link_speeds_bitmap: targeted link speeds bitmap 3336 * 3337 * Note: For the link_speeds_bitmap structure, you can check it at 3338 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3339 * link_speeds_bitmap include multiple speeds. 3340 * 3341 * Each entry in this [phy_type_low, phy_type_high] structure will 3342 * present a certain link speed. This helper function will turn on bits 3343 * in [phy_type_low, phy_type_high] structure based on the value of 3344 * link_speeds_bitmap input parameter. 3345 */ 3346 void 3347 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3348 u16 link_speeds_bitmap) 3349 { 3350 u64 pt_high; 3351 u64 pt_low; 3352 int index; 3353 u16 speed; 3354 3355 /* We first check with low part of phy_type */ 3356 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3357 pt_low = BIT_ULL(index); 3358 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3359 3360 if (link_speeds_bitmap & speed) 3361 *phy_type_low |= BIT_ULL(index); 3362 } 3363 3364 /* We then check with high part of phy_type */ 3365 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3366 pt_high = BIT_ULL(index); 3367 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3368 3369 if (link_speeds_bitmap & speed) 3370 *phy_type_high |= BIT_ULL(index); 3371 } 3372 } 3373 3374 /** 3375 * ice_aq_set_phy_cfg 3376 * @hw: pointer to the HW struct 3377 * @pi: port info structure of the interested logical port 3378 * @cfg: structure with PHY configuration data to be set 3379 * @cd: pointer to command details structure or NULL 3380 * 3381 * Set the various PHY configuration parameters supported on the Port. 3382 * One or more of the Set PHY config parameters may be ignored in an MFP 3383 * mode as the PF may not have the privilege to set some of the PHY Config 3384 * parameters. This status will be indicated by the command response (0x0601). 3385 */ 3386 int 3387 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3388 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3389 { 3390 struct ice_aq_desc desc; 3391 int status; 3392 3393 if (!cfg) 3394 return -EINVAL; 3395 3396 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3397 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3398 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3399 cfg->caps); 3400 3401 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3402 } 3403 3404 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3405 desc.params.set_phy.lport_num = pi->lport; 3406 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3407 3408 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3409 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3410 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3411 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3412 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3413 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3414 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3415 cfg->low_power_ctrl_an); 3416 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3417 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3418 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3419 cfg->link_fec_opt); 3420 3421 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3422 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3423 status = 0; 3424 3425 if (!status) 3426 pi->phy.curr_user_phy_cfg = *cfg; 3427 3428 return status; 3429 } 3430 3431 /** 3432 * ice_update_link_info - update status of the HW network link 3433 * @pi: port info structure of the interested logical port 3434 */ 3435 int ice_update_link_info(struct ice_port_info *pi) 3436 { 3437 struct ice_link_status *li; 3438 int status; 3439 3440 if (!pi) 3441 return -EINVAL; 3442 3443 li = &pi->phy.link_info; 3444 3445 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3446 if (status) 3447 return status; 3448 3449 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3450 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3451 3452 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3453 if (!pcaps) 3454 return -ENOMEM; 3455 3456 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3457 pcaps, NULL); 3458 } 3459 3460 return status; 3461 } 3462 3463 /** 3464 * ice_aq_get_phy_equalization - function to read serdes equaliser 3465 * value from firmware using admin queue command. 3466 * @hw: pointer to the HW struct 3467 * @data_in: represents the serdes equalization parameter requested 3468 * @op_code: represents the serdes number and flag to represent tx or rx 3469 * @serdes_num: represents the serdes number 3470 * @output: pointer to the caller-supplied buffer to return serdes equaliser 3471 * 3472 * Return: non-zero status on error and 0 on success. 3473 */ 3474 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, 3475 u8 serdes_num, int *output) 3476 { 3477 struct ice_aqc_dnl_call_command *cmd; 3478 struct ice_aqc_dnl_call buf = {}; 3479 struct ice_aq_desc desc; 3480 int err; 3481 3482 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); 3483 buf.sto.txrx_equa_reqs.op_code_serdes_sel = 3484 cpu_to_le16(op_code | (serdes_num & 0xF)); 3485 cmd = &desc.params.dnl_call; 3486 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); 3487 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | 3488 ICE_AQ_FLAG_RD | 3489 ICE_AQ_FLAG_SI); 3490 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); 3491 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); 3492 3493 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), 3494 NULL); 3495 *output = err ? 0 : buf.sto.txrx_equa_resp.val; 3496 3497 return err; 3498 } 3499 3500 #define FEC_REG_PORT(port) { \ 3501 FEC_CORR_LOW_REG_PORT##port, \ 3502 FEC_CORR_HIGH_REG_PORT##port, \ 3503 FEC_UNCORR_LOW_REG_PORT##port, \ 3504 FEC_UNCORR_HIGH_REG_PORT##port, \ 3505 } 3506 3507 static const u32 fec_reg[][ICE_FEC_MAX] = { 3508 FEC_REG_PORT(0), 3509 FEC_REG_PORT(1), 3510 FEC_REG_PORT(2), 3511 FEC_REG_PORT(3) 3512 }; 3513 3514 /** 3515 * ice_aq_get_fec_stats - reads fec stats from phy 3516 * @hw: pointer to the HW struct 3517 * @pcs_quad: represents pcsquad of user input serdes 3518 * @pcs_port: represents the pcs port number part of above pcs quad 3519 * @fec_type: represents FEC stats type 3520 * @output: pointer to the caller-supplied buffer to return requested fec stats 3521 * 3522 * Return: non-zero status on error and 0 on success. 3523 */ 3524 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 3525 enum ice_fec_stats_types fec_type, u32 *output) 3526 { 3527 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); 3528 struct ice_sbq_msg_input msg = {}; 3529 u32 receiver_id, reg_offset; 3530 int err; 3531 3532 if (pcs_port > 3) 3533 return -EINVAL; 3534 3535 reg_offset = fec_reg[pcs_port][fec_type]; 3536 3537 if (pcs_quad == 0) 3538 receiver_id = FEC_RECEIVER_ID_PCS0; 3539 else if (pcs_quad == 1) 3540 receiver_id = FEC_RECEIVER_ID_PCS1; 3541 else 3542 return -EINVAL; 3543 3544 msg.msg_addr_low = lower_16_bits(reg_offset); 3545 msg.msg_addr_high = receiver_id; 3546 msg.opcode = ice_sbq_msg_rd; 3547 msg.dest_dev = rmn_0; 3548 3549 err = ice_sbq_rw_reg(hw, &msg, flag); 3550 if (err) 3551 return err; 3552 3553 *output = msg.data; 3554 return 0; 3555 } 3556 3557 /** 3558 * ice_cache_phy_user_req 3559 * @pi: port information structure 3560 * @cache_data: PHY logging data 3561 * @cache_mode: PHY logging mode 3562 * 3563 * Log the user request on (FC, FEC, SPEED) for later use. 3564 */ 3565 static void 3566 ice_cache_phy_user_req(struct ice_port_info *pi, 3567 struct ice_phy_cache_mode_data cache_data, 3568 enum ice_phy_cache_mode cache_mode) 3569 { 3570 if (!pi) 3571 return; 3572 3573 switch (cache_mode) { 3574 case ICE_FC_MODE: 3575 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3576 break; 3577 case ICE_SPEED_MODE: 3578 pi->phy.curr_user_speed_req = 3579 cache_data.data.curr_user_speed_req; 3580 break; 3581 case ICE_FEC_MODE: 3582 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3583 break; 3584 default: 3585 break; 3586 } 3587 } 3588 3589 /** 3590 * ice_caps_to_fc_mode 3591 * @caps: PHY capabilities 3592 * 3593 * Convert PHY FC capabilities to ice FC mode 3594 */ 3595 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3596 { 3597 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3598 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3599 return ICE_FC_FULL; 3600 3601 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3602 return ICE_FC_TX_PAUSE; 3603 3604 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3605 return ICE_FC_RX_PAUSE; 3606 3607 return ICE_FC_NONE; 3608 } 3609 3610 /** 3611 * ice_caps_to_fec_mode 3612 * @caps: PHY capabilities 3613 * @fec_options: Link FEC options 3614 * 3615 * Convert PHY FEC capabilities to ice FEC mode 3616 */ 3617 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3618 { 3619 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3620 return ICE_FEC_AUTO; 3621 3622 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3623 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3624 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3625 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3626 return ICE_FEC_BASER; 3627 3628 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3629 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3630 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3631 return ICE_FEC_RS; 3632 3633 return ICE_FEC_NONE; 3634 } 3635 3636 /** 3637 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3638 * @pi: port information structure 3639 * @cfg: PHY configuration data to set FC mode 3640 * @req_mode: FC mode to configure 3641 */ 3642 int 3643 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3644 enum ice_fc_mode req_mode) 3645 { 3646 struct ice_phy_cache_mode_data cache_data; 3647 u8 pause_mask = 0x0; 3648 3649 if (!pi || !cfg) 3650 return -EINVAL; 3651 3652 switch (req_mode) { 3653 case ICE_FC_FULL: 3654 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3655 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3656 break; 3657 case ICE_FC_RX_PAUSE: 3658 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3659 break; 3660 case ICE_FC_TX_PAUSE: 3661 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3662 break; 3663 default: 3664 break; 3665 } 3666 3667 /* clear the old pause settings */ 3668 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3669 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3670 3671 /* set the new capabilities */ 3672 cfg->caps |= pause_mask; 3673 3674 /* Cache user FC request */ 3675 cache_data.data.curr_user_fc_req = req_mode; 3676 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3677 3678 return 0; 3679 } 3680 3681 /** 3682 * ice_set_fc 3683 * @pi: port information structure 3684 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3685 * @ena_auto_link_update: enable automatic link update 3686 * 3687 * Set the requested flow control mode. 3688 */ 3689 int 3690 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3691 { 3692 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3693 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3694 struct ice_hw *hw; 3695 int status; 3696 3697 if (!pi || !aq_failures) 3698 return -EINVAL; 3699 3700 *aq_failures = 0; 3701 hw = pi->hw; 3702 3703 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3704 if (!pcaps) 3705 return -ENOMEM; 3706 3707 /* Get the current PHY config */ 3708 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3709 pcaps, NULL); 3710 if (status) { 3711 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3712 goto out; 3713 } 3714 3715 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3716 3717 /* Configure the set PHY data */ 3718 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3719 if (status) 3720 goto out; 3721 3722 /* If the capabilities have changed, then set the new config */ 3723 if (cfg.caps != pcaps->caps) { 3724 int retry_count, retry_max = 10; 3725 3726 /* Auto restart link so settings take effect */ 3727 if (ena_auto_link_update) 3728 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3729 3730 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3731 if (status) { 3732 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3733 goto out; 3734 } 3735 3736 /* Update the link info 3737 * It sometimes takes a really long time for link to 3738 * come back from the atomic reset. Thus, we wait a 3739 * little bit. 3740 */ 3741 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3742 status = ice_update_link_info(pi); 3743 3744 if (!status) 3745 break; 3746 3747 mdelay(100); 3748 } 3749 3750 if (status) 3751 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3752 } 3753 3754 out: 3755 return status; 3756 } 3757 3758 /** 3759 * ice_phy_caps_equals_cfg 3760 * @phy_caps: PHY capabilities 3761 * @phy_cfg: PHY configuration 3762 * 3763 * Helper function to determine if PHY capabilities matches PHY 3764 * configuration 3765 */ 3766 bool 3767 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3768 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3769 { 3770 u8 caps_mask, cfg_mask; 3771 3772 if (!phy_caps || !phy_cfg) 3773 return false; 3774 3775 /* These bits are not common between capabilities and configuration. 3776 * Do not use them to determine equality. 3777 */ 3778 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3779 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3780 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3781 3782 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3783 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3784 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3785 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3786 phy_caps->eee_cap != phy_cfg->eee_cap || 3787 phy_caps->eeer_value != phy_cfg->eeer_value || 3788 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3789 return false; 3790 3791 return true; 3792 } 3793 3794 /** 3795 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3796 * @pi: port information structure 3797 * @caps: PHY ability structure to copy date from 3798 * @cfg: PHY configuration structure to copy data to 3799 * 3800 * Helper function to copy AQC PHY get ability data to PHY set configuration 3801 * data structure 3802 */ 3803 void 3804 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3805 struct ice_aqc_get_phy_caps_data *caps, 3806 struct ice_aqc_set_phy_cfg_data *cfg) 3807 { 3808 if (!pi || !caps || !cfg) 3809 return; 3810 3811 memset(cfg, 0, sizeof(*cfg)); 3812 cfg->phy_type_low = caps->phy_type_low; 3813 cfg->phy_type_high = caps->phy_type_high; 3814 cfg->caps = caps->caps; 3815 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3816 cfg->eee_cap = caps->eee_cap; 3817 cfg->eeer_value = caps->eeer_value; 3818 cfg->link_fec_opt = caps->link_fec_options; 3819 cfg->module_compliance_enforcement = 3820 caps->module_compliance_enforcement; 3821 } 3822 3823 /** 3824 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3825 * @pi: port information structure 3826 * @cfg: PHY configuration data to set FEC mode 3827 * @fec: FEC mode to configure 3828 */ 3829 int 3830 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3831 enum ice_fec_mode fec) 3832 { 3833 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3834 struct ice_hw *hw; 3835 int status; 3836 3837 if (!pi || !cfg) 3838 return -EINVAL; 3839 3840 hw = pi->hw; 3841 3842 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3843 if (!pcaps) 3844 return -ENOMEM; 3845 3846 status = ice_aq_get_phy_caps(pi, false, 3847 (ice_fw_supports_report_dflt_cfg(hw) ? 3848 ICE_AQC_REPORT_DFLT_CFG : 3849 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3850 if (status) 3851 goto out; 3852 3853 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3854 cfg->link_fec_opt = pcaps->link_fec_options; 3855 3856 switch (fec) { 3857 case ICE_FEC_BASER: 3858 /* Clear RS bits, and AND BASE-R ability 3859 * bits and OR request bits. 3860 */ 3861 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3862 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3863 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3864 ICE_AQC_PHY_FEC_25G_KR_REQ; 3865 break; 3866 case ICE_FEC_RS: 3867 /* Clear BASE-R bits, and AND RS ability 3868 * bits and OR request bits. 3869 */ 3870 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3871 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3872 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3873 break; 3874 case ICE_FEC_NONE: 3875 /* Clear all FEC option bits. */ 3876 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3877 break; 3878 case ICE_FEC_AUTO: 3879 /* AND auto FEC bit, and all caps bits. */ 3880 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3881 cfg->link_fec_opt |= pcaps->link_fec_options; 3882 break; 3883 default: 3884 status = -EINVAL; 3885 break; 3886 } 3887 3888 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3889 !ice_fw_supports_report_dflt_cfg(hw)) { 3890 struct ice_link_default_override_tlv tlv = { 0 }; 3891 3892 status = ice_get_link_default_override(&tlv, pi); 3893 if (status) 3894 goto out; 3895 3896 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3897 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3898 cfg->link_fec_opt = tlv.fec_options; 3899 } 3900 3901 out: 3902 return status; 3903 } 3904 3905 /** 3906 * ice_get_link_status - get status of the HW network link 3907 * @pi: port information structure 3908 * @link_up: pointer to bool (true/false = linkup/linkdown) 3909 * 3910 * Variable link_up is true if link is up, false if link is down. 3911 * The variable link_up is invalid if status is non zero. As a 3912 * result of this call, link status reporting becomes enabled 3913 */ 3914 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3915 { 3916 struct ice_phy_info *phy_info; 3917 int status = 0; 3918 3919 if (!pi || !link_up) 3920 return -EINVAL; 3921 3922 phy_info = &pi->phy; 3923 3924 if (phy_info->get_link_info) { 3925 status = ice_update_link_info(pi); 3926 3927 if (status) 3928 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3929 status); 3930 } 3931 3932 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3933 3934 return status; 3935 } 3936 3937 /** 3938 * ice_aq_set_link_restart_an 3939 * @pi: pointer to the port information structure 3940 * @ena_link: if true: enable link, if false: disable link 3941 * @cd: pointer to command details structure or NULL 3942 * 3943 * Sets up the link and restarts the Auto-Negotiation over the link. 3944 */ 3945 int 3946 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3947 struct ice_sq_cd *cd) 3948 { 3949 struct ice_aqc_restart_an *cmd; 3950 struct ice_aq_desc desc; 3951 3952 cmd = &desc.params.restart_an; 3953 3954 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3955 3956 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3957 cmd->lport_num = pi->lport; 3958 if (ena_link) 3959 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3960 else 3961 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3962 3963 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3964 } 3965 3966 /** 3967 * ice_aq_set_event_mask 3968 * @hw: pointer to the HW struct 3969 * @port_num: port number of the physical function 3970 * @mask: event mask to be set 3971 * @cd: pointer to command details structure or NULL 3972 * 3973 * Set event mask (0x0613) 3974 */ 3975 int 3976 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3977 struct ice_sq_cd *cd) 3978 { 3979 struct ice_aqc_set_event_mask *cmd; 3980 struct ice_aq_desc desc; 3981 3982 cmd = &desc.params.set_event_mask; 3983 3984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3985 3986 cmd->lport_num = port_num; 3987 3988 cmd->event_mask = cpu_to_le16(mask); 3989 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3990 } 3991 3992 /** 3993 * ice_aq_set_mac_loopback 3994 * @hw: pointer to the HW struct 3995 * @ena_lpbk: Enable or Disable loopback 3996 * @cd: pointer to command details structure or NULL 3997 * 3998 * Enable/disable loopback on a given port 3999 */ 4000 int 4001 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 4002 { 4003 struct ice_aqc_set_mac_lb *cmd; 4004 struct ice_aq_desc desc; 4005 4006 cmd = &desc.params.set_mac_lb; 4007 4008 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 4009 if (ena_lpbk) 4010 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 4011 4012 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4013 } 4014 4015 /** 4016 * ice_aq_set_port_id_led 4017 * @pi: pointer to the port information 4018 * @is_orig_mode: is this LED set to original mode (by the net-list) 4019 * @cd: pointer to command details structure or NULL 4020 * 4021 * Set LED value for the given port (0x06e9) 4022 */ 4023 int 4024 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 4025 struct ice_sq_cd *cd) 4026 { 4027 struct ice_aqc_set_port_id_led *cmd; 4028 struct ice_hw *hw = pi->hw; 4029 struct ice_aq_desc desc; 4030 4031 cmd = &desc.params.set_port_id_led; 4032 4033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 4034 4035 if (is_orig_mode) 4036 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 4037 else 4038 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 4039 4040 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4041 } 4042 4043 /** 4044 * ice_aq_get_port_options 4045 * @hw: pointer to the HW struct 4046 * @options: buffer for the resultant port options 4047 * @option_count: input - size of the buffer in port options structures, 4048 * output - number of returned port options 4049 * @lport: logical port to call the command with (optional) 4050 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4051 * when PF owns more than 1 port it must be true 4052 * @active_option_idx: index of active port option in returned buffer 4053 * @active_option_valid: active option in returned buffer is valid 4054 * @pending_option_idx: index of pending port option in returned buffer 4055 * @pending_option_valid: pending option in returned buffer is valid 4056 * 4057 * Calls Get Port Options AQC (0x06ea) and verifies result. 4058 */ 4059 int 4060 ice_aq_get_port_options(struct ice_hw *hw, 4061 struct ice_aqc_get_port_options_elem *options, 4062 u8 *option_count, u8 lport, bool lport_valid, 4063 u8 *active_option_idx, bool *active_option_valid, 4064 u8 *pending_option_idx, bool *pending_option_valid) 4065 { 4066 struct ice_aqc_get_port_options *cmd; 4067 struct ice_aq_desc desc; 4068 int status; 4069 u8 i; 4070 4071 /* options buffer shall be able to hold max returned options */ 4072 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 4073 return -EINVAL; 4074 4075 cmd = &desc.params.get_port_options; 4076 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 4077 4078 if (lport_valid) 4079 cmd->lport_num = lport; 4080 cmd->lport_num_valid = lport_valid; 4081 4082 status = ice_aq_send_cmd(hw, &desc, options, 4083 *option_count * sizeof(*options), NULL); 4084 if (status) 4085 return status; 4086 4087 /* verify direct FW response & set output parameters */ 4088 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 4089 cmd->port_options_count); 4090 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 4091 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 4092 cmd->port_options); 4093 if (*active_option_valid) { 4094 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 4095 cmd->port_options); 4096 if (*active_option_idx > (*option_count - 1)) 4097 return -EIO; 4098 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 4099 *active_option_idx); 4100 } 4101 4102 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 4103 cmd->pending_port_option_status); 4104 if (*pending_option_valid) { 4105 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4106 cmd->pending_port_option_status); 4107 if (*pending_option_idx > (*option_count - 1)) 4108 return -EIO; 4109 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4110 *pending_option_idx); 4111 } 4112 4113 /* mask output options fields */ 4114 for (i = 0; i < *option_count; i++) { 4115 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4116 options[i].pmd); 4117 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4118 options[i].max_lane_speed); 4119 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4120 options[i].pmd, options[i].max_lane_speed); 4121 } 4122 4123 return 0; 4124 } 4125 4126 /** 4127 * ice_aq_set_port_option 4128 * @hw: pointer to the HW struct 4129 * @lport: logical port to call the command with 4130 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4131 * when PF owns more than 1 port it must be true 4132 * @new_option: new port option to be written 4133 * 4134 * Calls Set Port Options AQC (0x06eb). 4135 */ 4136 int 4137 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4138 u8 new_option) 4139 { 4140 struct ice_aqc_set_port_option *cmd; 4141 struct ice_aq_desc desc; 4142 4143 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4144 return -EINVAL; 4145 4146 cmd = &desc.params.set_port_option; 4147 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4148 4149 if (lport_valid) 4150 cmd->lport_num = lport; 4151 4152 cmd->lport_num_valid = lport_valid; 4153 cmd->selected_port_option = new_option; 4154 4155 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4156 } 4157 4158 /** 4159 * ice_aq_sff_eeprom 4160 * @hw: pointer to the HW struct 4161 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4162 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4163 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4164 * @page: QSFP page 4165 * @set_page: set or ignore the page 4166 * @data: pointer to data buffer to be read/written to the I2C device. 4167 * @length: 1-16 for read, 1 for write. 4168 * @write: 0 read, 1 for write. 4169 * @cd: pointer to command details structure or NULL 4170 * 4171 * Read/Write SFF EEPROM (0x06EE) 4172 */ 4173 int 4174 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4175 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4176 bool write, struct ice_sq_cd *cd) 4177 { 4178 struct ice_aqc_sff_eeprom *cmd; 4179 struct ice_aq_desc desc; 4180 u16 i2c_bus_addr; 4181 int status; 4182 4183 if (!data || (mem_addr & 0xff00)) 4184 return -EINVAL; 4185 4186 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4187 cmd = &desc.params.read_write_sff_param; 4188 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4189 cmd->lport_num = (u8)(lport & 0xff); 4190 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4191 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 4192 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 4193 if (write) 4194 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 4195 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 4196 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4197 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 4198 4199 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4200 return status; 4201 } 4202 4203 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4204 { 4205 switch (type) { 4206 case ICE_LUT_VSI: 4207 return ICE_LUT_VSI_SIZE; 4208 case ICE_LUT_GLOBAL: 4209 return ICE_LUT_GLOBAL_SIZE; 4210 case ICE_LUT_PF: 4211 return ICE_LUT_PF_SIZE; 4212 } 4213 WARN_ONCE(1, "incorrect type passed"); 4214 return ICE_LUT_VSI_SIZE; 4215 } 4216 4217 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4218 { 4219 switch (size) { 4220 case ICE_LUT_VSI_SIZE: 4221 return ICE_AQC_LUT_SIZE_SMALL; 4222 case ICE_LUT_GLOBAL_SIZE: 4223 return ICE_AQC_LUT_SIZE_512; 4224 case ICE_LUT_PF_SIZE: 4225 return ICE_AQC_LUT_SIZE_2K; 4226 } 4227 WARN_ONCE(1, "incorrect size passed"); 4228 return 0; 4229 } 4230 4231 /** 4232 * __ice_aq_get_set_rss_lut 4233 * @hw: pointer to the hardware structure 4234 * @params: RSS LUT parameters 4235 * @set: set true to set the table, false to get the table 4236 * 4237 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4238 */ 4239 static int 4240 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4241 struct ice_aq_get_set_rss_lut_params *params, bool set) 4242 { 4243 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4244 enum ice_lut_type lut_type = params->lut_type; 4245 struct ice_aqc_get_set_rss_lut *desc_params; 4246 enum ice_aqc_lut_flags flags; 4247 enum ice_lut_size lut_size; 4248 struct ice_aq_desc desc; 4249 u8 *lut = params->lut; 4250 4251 4252 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4253 return -EINVAL; 4254 4255 lut_size = ice_lut_type_to_size(lut_type); 4256 if (lut_size > params->lut_size) 4257 return -EINVAL; 4258 else if (set && lut_size != params->lut_size) 4259 return -EINVAL; 4260 4261 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4262 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4263 if (set) 4264 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4265 4266 desc_params = &desc.params.get_set_rss_lut; 4267 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4268 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4269 4270 if (lut_type == ICE_LUT_GLOBAL) 4271 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4272 params->global_lut_id); 4273 4274 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4275 desc_params->flags = cpu_to_le16(flags); 4276 4277 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4278 } 4279 4280 /** 4281 * ice_aq_get_rss_lut 4282 * @hw: pointer to the hardware structure 4283 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4284 * 4285 * get the RSS lookup table, PF or VSI type 4286 */ 4287 int 4288 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4289 { 4290 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4291 } 4292 4293 /** 4294 * ice_aq_set_rss_lut 4295 * @hw: pointer to the hardware structure 4296 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4297 * 4298 * set the RSS lookup table, PF or VSI type 4299 */ 4300 int 4301 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4302 { 4303 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4304 } 4305 4306 /** 4307 * __ice_aq_get_set_rss_key 4308 * @hw: pointer to the HW struct 4309 * @vsi_id: VSI FW index 4310 * @key: pointer to key info struct 4311 * @set: set true to set the key, false to get the key 4312 * 4313 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4314 */ 4315 static int 4316 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4317 struct ice_aqc_get_set_rss_keys *key, bool set) 4318 { 4319 struct ice_aqc_get_set_rss_key *desc_params; 4320 u16 key_size = sizeof(*key); 4321 struct ice_aq_desc desc; 4322 4323 if (set) { 4324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4325 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4326 } else { 4327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4328 } 4329 4330 desc_params = &desc.params.get_set_rss_key; 4331 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4332 4333 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4334 } 4335 4336 /** 4337 * ice_aq_get_rss_key 4338 * @hw: pointer to the HW struct 4339 * @vsi_handle: software VSI handle 4340 * @key: pointer to key info struct 4341 * 4342 * get the RSS key per VSI 4343 */ 4344 int 4345 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4346 struct ice_aqc_get_set_rss_keys *key) 4347 { 4348 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4349 return -EINVAL; 4350 4351 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4352 key, false); 4353 } 4354 4355 /** 4356 * ice_aq_set_rss_key 4357 * @hw: pointer to the HW struct 4358 * @vsi_handle: software VSI handle 4359 * @keys: pointer to key info struct 4360 * 4361 * set the RSS key per VSI 4362 */ 4363 int 4364 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4365 struct ice_aqc_get_set_rss_keys *keys) 4366 { 4367 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4368 return -EINVAL; 4369 4370 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4371 keys, true); 4372 } 4373 4374 /** 4375 * ice_aq_add_lan_txq 4376 * @hw: pointer to the hardware structure 4377 * @num_qgrps: Number of added queue groups 4378 * @qg_list: list of queue groups to be added 4379 * @buf_size: size of buffer for indirect command 4380 * @cd: pointer to command details structure or NULL 4381 * 4382 * Add Tx LAN queue (0x0C30) 4383 * 4384 * NOTE: 4385 * Prior to calling add Tx LAN queue: 4386 * Initialize the following as part of the Tx queue context: 4387 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4388 * Cache profile and Packet shaper profile. 4389 * 4390 * After add Tx LAN queue AQ command is completed: 4391 * Interrupts should be associated with specific queues, 4392 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4393 * flow. 4394 */ 4395 static int 4396 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4397 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4398 struct ice_sq_cd *cd) 4399 { 4400 struct ice_aqc_add_tx_qgrp *list; 4401 struct ice_aqc_add_txqs *cmd; 4402 struct ice_aq_desc desc; 4403 u16 i, sum_size = 0; 4404 4405 cmd = &desc.params.add_txqs; 4406 4407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4408 4409 if (!qg_list) 4410 return -EINVAL; 4411 4412 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4413 return -EINVAL; 4414 4415 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4416 sum_size += struct_size(list, txqs, list->num_txqs); 4417 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4418 list->num_txqs); 4419 } 4420 4421 if (buf_size != sum_size) 4422 return -EINVAL; 4423 4424 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4425 4426 cmd->num_qgrps = num_qgrps; 4427 4428 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4429 } 4430 4431 /** 4432 * ice_aq_dis_lan_txq 4433 * @hw: pointer to the hardware structure 4434 * @num_qgrps: number of groups in the list 4435 * @qg_list: the list of groups to disable 4436 * @buf_size: the total size of the qg_list buffer in bytes 4437 * @rst_src: if called due to reset, specifies the reset source 4438 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4439 * @cd: pointer to command details structure or NULL 4440 * 4441 * Disable LAN Tx queue (0x0C31) 4442 */ 4443 static int 4444 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4445 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4446 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4447 struct ice_sq_cd *cd) 4448 { 4449 struct ice_aqc_dis_txq_item *item; 4450 struct ice_aqc_dis_txqs *cmd; 4451 struct ice_aq_desc desc; 4452 u16 vmvf_and_timeout; 4453 u16 i, sz = 0; 4454 int status; 4455 4456 cmd = &desc.params.dis_txqs; 4457 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4458 4459 /* qg_list can be NULL only in VM/VF reset flow */ 4460 if (!qg_list && !rst_src) 4461 return -EINVAL; 4462 4463 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4464 return -EINVAL; 4465 4466 cmd->num_entries = num_qgrps; 4467 4468 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4469 4470 switch (rst_src) { 4471 case ICE_VM_RESET: 4472 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4473 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4474 break; 4475 case ICE_VF_RESET: 4476 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4477 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4478 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4479 ICE_AQC_Q_DIS_VMVF_NUM_M; 4480 break; 4481 case ICE_NO_RESET: 4482 default: 4483 break; 4484 } 4485 4486 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4487 4488 /* flush pipe on time out */ 4489 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4490 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4491 if (!qg_list) 4492 goto do_aq; 4493 4494 /* set RD bit to indicate that command buffer is provided by the driver 4495 * and it needs to be read by the firmware 4496 */ 4497 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4498 4499 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4500 u16 item_size = struct_size(item, q_id, item->num_qs); 4501 4502 /* If the num of queues is even, add 2 bytes of padding */ 4503 if ((item->num_qs % 2) == 0) 4504 item_size += 2; 4505 4506 sz += item_size; 4507 4508 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4509 } 4510 4511 if (buf_size != sz) 4512 return -EINVAL; 4513 4514 do_aq: 4515 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4516 if (status) { 4517 if (!qg_list) 4518 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4519 vmvf_num, hw->adminq.sq_last_status); 4520 else 4521 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4522 le16_to_cpu(qg_list[0].q_id[0]), 4523 hw->adminq.sq_last_status); 4524 } 4525 return status; 4526 } 4527 4528 /** 4529 * ice_aq_cfg_lan_txq 4530 * @hw: pointer to the hardware structure 4531 * @buf: buffer for command 4532 * @buf_size: size of buffer in bytes 4533 * @num_qs: number of queues being configured 4534 * @oldport: origination lport 4535 * @newport: destination lport 4536 * @cd: pointer to command details structure or NULL 4537 * 4538 * Move/Configure LAN Tx queue (0x0C32) 4539 * 4540 * There is a better AQ command to use for moving nodes, so only coding 4541 * this one for configuring the node. 4542 */ 4543 int 4544 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4545 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4546 struct ice_sq_cd *cd) 4547 { 4548 struct ice_aqc_cfg_txqs *cmd; 4549 struct ice_aq_desc desc; 4550 int status; 4551 4552 cmd = &desc.params.cfg_txqs; 4553 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4554 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4555 4556 if (!buf) 4557 return -EINVAL; 4558 4559 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4560 cmd->num_qs = num_qs; 4561 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4562 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4563 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4564 cmd->blocked_cgds = 0; 4565 4566 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4567 if (status) 4568 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4569 hw->adminq.sq_last_status); 4570 return status; 4571 } 4572 4573 /** 4574 * ice_aq_add_rdma_qsets 4575 * @hw: pointer to the hardware structure 4576 * @num_qset_grps: Number of RDMA Qset groups 4577 * @qset_list: list of Qset groups to be added 4578 * @buf_size: size of buffer for indirect command 4579 * @cd: pointer to command details structure or NULL 4580 * 4581 * Add Tx RDMA Qsets (0x0C33) 4582 */ 4583 static int 4584 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4585 struct ice_aqc_add_rdma_qset_data *qset_list, 4586 u16 buf_size, struct ice_sq_cd *cd) 4587 { 4588 struct ice_aqc_add_rdma_qset_data *list; 4589 struct ice_aqc_add_rdma_qset *cmd; 4590 struct ice_aq_desc desc; 4591 u16 i, sum_size = 0; 4592 4593 cmd = &desc.params.add_rdma_qset; 4594 4595 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4596 4597 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4598 return -EINVAL; 4599 4600 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4601 u16 num_qsets = le16_to_cpu(list->num_qsets); 4602 4603 sum_size += struct_size(list, rdma_qsets, num_qsets); 4604 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4605 num_qsets); 4606 } 4607 4608 if (buf_size != sum_size) 4609 return -EINVAL; 4610 4611 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4612 4613 cmd->num_qset_grps = num_qset_grps; 4614 4615 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4616 } 4617 4618 /* End of FW Admin Queue command wrappers */ 4619 4620 /** 4621 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4622 * @hw: pointer to the HW struct 4623 * @vsi_handle: software VSI handle 4624 * @tc: TC number 4625 * @q_handle: software queue handle 4626 */ 4627 struct ice_q_ctx * 4628 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4629 { 4630 struct ice_vsi_ctx *vsi; 4631 struct ice_q_ctx *q_ctx; 4632 4633 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4634 if (!vsi) 4635 return NULL; 4636 if (q_handle >= vsi->num_lan_q_entries[tc]) 4637 return NULL; 4638 if (!vsi->lan_q_ctx[tc]) 4639 return NULL; 4640 q_ctx = vsi->lan_q_ctx[tc]; 4641 return &q_ctx[q_handle]; 4642 } 4643 4644 /** 4645 * ice_ena_vsi_txq 4646 * @pi: port information structure 4647 * @vsi_handle: software VSI handle 4648 * @tc: TC number 4649 * @q_handle: software queue handle 4650 * @num_qgrps: Number of added queue groups 4651 * @buf: list of queue groups to be added 4652 * @buf_size: size of buffer for indirect command 4653 * @cd: pointer to command details structure or NULL 4654 * 4655 * This function adds one LAN queue 4656 */ 4657 int 4658 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4659 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4660 struct ice_sq_cd *cd) 4661 { 4662 struct ice_aqc_txsched_elem_data node = { 0 }; 4663 struct ice_sched_node *parent; 4664 struct ice_q_ctx *q_ctx; 4665 struct ice_hw *hw; 4666 int status; 4667 4668 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4669 return -EIO; 4670 4671 if (num_qgrps > 1 || buf->num_txqs > 1) 4672 return -ENOSPC; 4673 4674 hw = pi->hw; 4675 4676 if (!ice_is_vsi_valid(hw, vsi_handle)) 4677 return -EINVAL; 4678 4679 mutex_lock(&pi->sched_lock); 4680 4681 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4682 if (!q_ctx) { 4683 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4684 q_handle); 4685 status = -EINVAL; 4686 goto ena_txq_exit; 4687 } 4688 4689 /* find a parent node */ 4690 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4691 ICE_SCHED_NODE_OWNER_LAN); 4692 if (!parent) { 4693 status = -EINVAL; 4694 goto ena_txq_exit; 4695 } 4696 4697 buf->parent_teid = parent->info.node_teid; 4698 node.parent_teid = parent->info.node_teid; 4699 /* Mark that the values in the "generic" section as valid. The default 4700 * value in the "generic" section is zero. This means that : 4701 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4702 * - 0 priority among siblings, indicated by Bit 1-3. 4703 * - WFQ, indicated by Bit 4. 4704 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4705 * Bit 5-6. 4706 * - Bit 7 is reserved. 4707 * Without setting the generic section as valid in valid_sections, the 4708 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4709 */ 4710 buf->txqs[0].info.valid_sections = 4711 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4712 ICE_AQC_ELEM_VALID_EIR; 4713 buf->txqs[0].info.generic = 0; 4714 buf->txqs[0].info.cir_bw.bw_profile_idx = 4715 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4716 buf->txqs[0].info.cir_bw.bw_alloc = 4717 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4718 buf->txqs[0].info.eir_bw.bw_profile_idx = 4719 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4720 buf->txqs[0].info.eir_bw.bw_alloc = 4721 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4722 4723 /* add the LAN queue */ 4724 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4725 if (status) { 4726 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4727 le16_to_cpu(buf->txqs[0].txq_id), 4728 hw->adminq.sq_last_status); 4729 goto ena_txq_exit; 4730 } 4731 4732 node.node_teid = buf->txqs[0].q_teid; 4733 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4734 q_ctx->q_handle = q_handle; 4735 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4736 4737 /* add a leaf node into scheduler tree queue layer */ 4738 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4739 if (!status) 4740 status = ice_sched_replay_q_bw(pi, q_ctx); 4741 4742 ena_txq_exit: 4743 mutex_unlock(&pi->sched_lock); 4744 return status; 4745 } 4746 4747 /** 4748 * ice_dis_vsi_txq 4749 * @pi: port information structure 4750 * @vsi_handle: software VSI handle 4751 * @tc: TC number 4752 * @num_queues: number of queues 4753 * @q_handles: pointer to software queue handle array 4754 * @q_ids: pointer to the q_id array 4755 * @q_teids: pointer to queue node teids 4756 * @rst_src: if called due to reset, specifies the reset source 4757 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4758 * @cd: pointer to command details structure or NULL 4759 * 4760 * This function removes queues and their corresponding nodes in SW DB 4761 */ 4762 int 4763 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4764 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4765 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4766 struct ice_sq_cd *cd) 4767 { 4768 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4769 u16 i, buf_size = __struct_size(qg_list); 4770 struct ice_q_ctx *q_ctx; 4771 int status = -ENOENT; 4772 struct ice_hw *hw; 4773 4774 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4775 return -EIO; 4776 4777 hw = pi->hw; 4778 4779 if (!num_queues) { 4780 /* if queue is disabled already yet the disable queue command 4781 * has to be sent to complete the VF reset, then call 4782 * ice_aq_dis_lan_txq without any queue information 4783 */ 4784 if (rst_src) 4785 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4786 vmvf_num, NULL); 4787 return -EIO; 4788 } 4789 4790 mutex_lock(&pi->sched_lock); 4791 4792 for (i = 0; i < num_queues; i++) { 4793 struct ice_sched_node *node; 4794 4795 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4796 if (!node) 4797 continue; 4798 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4799 if (!q_ctx) { 4800 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4801 q_handles[i]); 4802 continue; 4803 } 4804 if (q_ctx->q_handle != q_handles[i]) { 4805 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4806 q_ctx->q_handle, q_handles[i]); 4807 continue; 4808 } 4809 qg_list->parent_teid = node->info.parent_teid; 4810 qg_list->num_qs = 1; 4811 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4812 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4813 vmvf_num, cd); 4814 4815 if (status) 4816 break; 4817 ice_free_sched_node(pi, node); 4818 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4819 q_ctx->q_teid = ICE_INVAL_TEID; 4820 } 4821 mutex_unlock(&pi->sched_lock); 4822 return status; 4823 } 4824 4825 /** 4826 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4827 * @pi: port information structure 4828 * @vsi_handle: software VSI handle 4829 * @tc_bitmap: TC bitmap 4830 * @maxqs: max queues array per TC 4831 * @owner: LAN or RDMA 4832 * 4833 * This function adds/updates the VSI queues per TC. 4834 */ 4835 static int 4836 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4837 u16 *maxqs, u8 owner) 4838 { 4839 int status = 0; 4840 u8 i; 4841 4842 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4843 return -EIO; 4844 4845 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4846 return -EINVAL; 4847 4848 mutex_lock(&pi->sched_lock); 4849 4850 ice_for_each_traffic_class(i) { 4851 /* configuration is possible only if TC node is present */ 4852 if (!ice_sched_get_tc_node(pi, i)) 4853 continue; 4854 4855 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4856 ice_is_tc_ena(tc_bitmap, i)); 4857 if (status) 4858 break; 4859 } 4860 4861 mutex_unlock(&pi->sched_lock); 4862 return status; 4863 } 4864 4865 /** 4866 * ice_cfg_vsi_lan - configure VSI LAN queues 4867 * @pi: port information structure 4868 * @vsi_handle: software VSI handle 4869 * @tc_bitmap: TC bitmap 4870 * @max_lanqs: max LAN queues array per TC 4871 * 4872 * This function adds/updates the VSI LAN queues per TC. 4873 */ 4874 int 4875 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4876 u16 *max_lanqs) 4877 { 4878 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4879 ICE_SCHED_NODE_OWNER_LAN); 4880 } 4881 4882 /** 4883 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4884 * @pi: port information structure 4885 * @vsi_handle: software VSI handle 4886 * @tc_bitmap: TC bitmap 4887 * @max_rdmaqs: max RDMA queues array per TC 4888 * 4889 * This function adds/updates the VSI RDMA queues per TC. 4890 */ 4891 int 4892 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4893 u16 *max_rdmaqs) 4894 { 4895 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4896 ICE_SCHED_NODE_OWNER_RDMA); 4897 } 4898 4899 /** 4900 * ice_ena_vsi_rdma_qset 4901 * @pi: port information structure 4902 * @vsi_handle: software VSI handle 4903 * @tc: TC number 4904 * @rdma_qset: pointer to RDMA Qset 4905 * @num_qsets: number of RDMA Qsets 4906 * @qset_teid: pointer to Qset node TEIDs 4907 * 4908 * This function adds RDMA Qset 4909 */ 4910 int 4911 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4912 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4913 { 4914 struct ice_aqc_txsched_elem_data node = { 0 }; 4915 struct ice_aqc_add_rdma_qset_data *buf; 4916 struct ice_sched_node *parent; 4917 struct ice_hw *hw; 4918 u16 i, buf_size; 4919 int ret; 4920 4921 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4922 return -EIO; 4923 hw = pi->hw; 4924 4925 if (!ice_is_vsi_valid(hw, vsi_handle)) 4926 return -EINVAL; 4927 4928 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4929 buf = kzalloc(buf_size, GFP_KERNEL); 4930 if (!buf) 4931 return -ENOMEM; 4932 mutex_lock(&pi->sched_lock); 4933 4934 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4935 ICE_SCHED_NODE_OWNER_RDMA); 4936 if (!parent) { 4937 ret = -EINVAL; 4938 goto rdma_error_exit; 4939 } 4940 buf->parent_teid = parent->info.node_teid; 4941 node.parent_teid = parent->info.node_teid; 4942 4943 buf->num_qsets = cpu_to_le16(num_qsets); 4944 for (i = 0; i < num_qsets; i++) { 4945 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4946 buf->rdma_qsets[i].info.valid_sections = 4947 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4948 ICE_AQC_ELEM_VALID_EIR; 4949 buf->rdma_qsets[i].info.generic = 0; 4950 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4951 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4952 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4953 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4954 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4955 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4956 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4957 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4958 } 4959 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4960 if (ret) { 4961 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4962 goto rdma_error_exit; 4963 } 4964 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4965 for (i = 0; i < num_qsets; i++) { 4966 node.node_teid = buf->rdma_qsets[i].qset_teid; 4967 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4968 &node, NULL); 4969 if (ret) 4970 break; 4971 qset_teid[i] = le32_to_cpu(node.node_teid); 4972 } 4973 rdma_error_exit: 4974 mutex_unlock(&pi->sched_lock); 4975 kfree(buf); 4976 return ret; 4977 } 4978 4979 /** 4980 * ice_dis_vsi_rdma_qset - free RDMA resources 4981 * @pi: port_info struct 4982 * @count: number of RDMA Qsets to free 4983 * @qset_teid: TEID of Qset node 4984 * @q_id: list of queue IDs being disabled 4985 */ 4986 int 4987 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4988 u16 *q_id) 4989 { 4990 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4991 u16 qg_size = __struct_size(qg_list); 4992 struct ice_hw *hw; 4993 int status = 0; 4994 int i; 4995 4996 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4997 return -EIO; 4998 4999 hw = pi->hw; 5000 5001 mutex_lock(&pi->sched_lock); 5002 5003 for (i = 0; i < count; i++) { 5004 struct ice_sched_node *node; 5005 5006 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5007 if (!node) 5008 continue; 5009 5010 qg_list->parent_teid = node->info.parent_teid; 5011 qg_list->num_qs = 1; 5012 qg_list->q_id[0] = 5013 cpu_to_le16(q_id[i] | 5014 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5015 5016 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5017 ICE_NO_RESET, 0, NULL); 5018 if (status) 5019 break; 5020 5021 ice_free_sched_node(pi, node); 5022 } 5023 5024 mutex_unlock(&pi->sched_lock); 5025 return status; 5026 } 5027 5028 /** 5029 * ice_aq_get_cgu_abilities - get cgu abilities 5030 * @hw: pointer to the HW struct 5031 * @abilities: CGU abilities 5032 * 5033 * Get CGU abilities (0x0C61) 5034 * Return: 0 on success or negative value on failure. 5035 */ 5036 int 5037 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5038 struct ice_aqc_get_cgu_abilities *abilities) 5039 { 5040 struct ice_aq_desc desc; 5041 5042 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5043 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5044 } 5045 5046 /** 5047 * ice_aq_set_input_pin_cfg - set input pin config 5048 * @hw: pointer to the HW struct 5049 * @input_idx: Input index 5050 * @flags1: Input flags 5051 * @flags2: Input flags 5052 * @freq: Frequency in Hz 5053 * @phase_delay: Delay in ps 5054 * 5055 * Set CGU input config (0x0C62) 5056 * Return: 0 on success or negative value on failure. 5057 */ 5058 int 5059 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5060 u32 freq, s32 phase_delay) 5061 { 5062 struct ice_aqc_set_cgu_input_config *cmd; 5063 struct ice_aq_desc desc; 5064 5065 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5066 cmd = &desc.params.set_cgu_input_config; 5067 cmd->input_idx = input_idx; 5068 cmd->flags1 = flags1; 5069 cmd->flags2 = flags2; 5070 cmd->freq = cpu_to_le32(freq); 5071 cmd->phase_delay = cpu_to_le32(phase_delay); 5072 5073 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5074 } 5075 5076 /** 5077 * ice_aq_get_input_pin_cfg - get input pin config 5078 * @hw: pointer to the HW struct 5079 * @input_idx: Input index 5080 * @status: Pin status 5081 * @type: Pin type 5082 * @flags1: Input flags 5083 * @flags2: Input flags 5084 * @freq: Frequency in Hz 5085 * @phase_delay: Delay in ps 5086 * 5087 * Get CGU input config (0x0C63) 5088 * Return: 0 on success or negative value on failure. 5089 */ 5090 int 5091 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5092 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5093 { 5094 struct ice_aqc_get_cgu_input_config *cmd; 5095 struct ice_aq_desc desc; 5096 int ret; 5097 5098 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5099 cmd = &desc.params.get_cgu_input_config; 5100 cmd->input_idx = input_idx; 5101 5102 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5103 if (!ret) { 5104 if (status) 5105 *status = cmd->status; 5106 if (type) 5107 *type = cmd->type; 5108 if (flags1) 5109 *flags1 = cmd->flags1; 5110 if (flags2) 5111 *flags2 = cmd->flags2; 5112 if (freq) 5113 *freq = le32_to_cpu(cmd->freq); 5114 if (phase_delay) 5115 *phase_delay = le32_to_cpu(cmd->phase_delay); 5116 } 5117 5118 return ret; 5119 } 5120 5121 /** 5122 * ice_aq_set_output_pin_cfg - set output pin config 5123 * @hw: pointer to the HW struct 5124 * @output_idx: Output index 5125 * @flags: Output flags 5126 * @src_sel: Index of DPLL block 5127 * @freq: Output frequency 5128 * @phase_delay: Output phase compensation 5129 * 5130 * Set CGU output config (0x0C64) 5131 * Return: 0 on success or negative value on failure. 5132 */ 5133 int 5134 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5135 u8 src_sel, u32 freq, s32 phase_delay) 5136 { 5137 struct ice_aqc_set_cgu_output_config *cmd; 5138 struct ice_aq_desc desc; 5139 5140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5141 cmd = &desc.params.set_cgu_output_config; 5142 cmd->output_idx = output_idx; 5143 cmd->flags = flags; 5144 cmd->src_sel = src_sel; 5145 cmd->freq = cpu_to_le32(freq); 5146 cmd->phase_delay = cpu_to_le32(phase_delay); 5147 5148 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5149 } 5150 5151 /** 5152 * ice_aq_get_output_pin_cfg - get output pin config 5153 * @hw: pointer to the HW struct 5154 * @output_idx: Output index 5155 * @flags: Output flags 5156 * @src_sel: Internal DPLL source 5157 * @freq: Output frequency 5158 * @src_freq: Source frequency 5159 * 5160 * Get CGU output config (0x0C65) 5161 * Return: 0 on success or negative value on failure. 5162 */ 5163 int 5164 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5165 u8 *src_sel, u32 *freq, u32 *src_freq) 5166 { 5167 struct ice_aqc_get_cgu_output_config *cmd; 5168 struct ice_aq_desc desc; 5169 int ret; 5170 5171 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5172 cmd = &desc.params.get_cgu_output_config; 5173 cmd->output_idx = output_idx; 5174 5175 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5176 if (!ret) { 5177 if (flags) 5178 *flags = cmd->flags; 5179 if (src_sel) 5180 *src_sel = cmd->src_sel; 5181 if (freq) 5182 *freq = le32_to_cpu(cmd->freq); 5183 if (src_freq) 5184 *src_freq = le32_to_cpu(cmd->src_freq); 5185 } 5186 5187 return ret; 5188 } 5189 5190 /** 5191 * ice_aq_get_cgu_dpll_status - get dpll status 5192 * @hw: pointer to the HW struct 5193 * @dpll_num: DPLL index 5194 * @ref_state: Reference clock state 5195 * @config: current DPLL config 5196 * @dpll_state: current DPLL state 5197 * @phase_offset: Phase offset in ns 5198 * @eec_mode: EEC_mode 5199 * 5200 * Get CGU DPLL status (0x0C66) 5201 * Return: 0 on success or negative value on failure. 5202 */ 5203 int 5204 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5205 u8 *dpll_state, u8 *config, s64 *phase_offset, 5206 u8 *eec_mode) 5207 { 5208 struct ice_aqc_get_cgu_dpll_status *cmd; 5209 struct ice_aq_desc desc; 5210 int status; 5211 5212 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5213 cmd = &desc.params.get_cgu_dpll_status; 5214 cmd->dpll_num = dpll_num; 5215 5216 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5217 if (!status) { 5218 *ref_state = cmd->ref_state; 5219 *dpll_state = cmd->dpll_state; 5220 *config = cmd->config; 5221 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5222 *phase_offset <<= 32; 5223 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5224 *phase_offset = sign_extend64(*phase_offset, 47); 5225 *eec_mode = cmd->eec_mode; 5226 } 5227 5228 return status; 5229 } 5230 5231 /** 5232 * ice_aq_set_cgu_dpll_config - set dpll config 5233 * @hw: pointer to the HW struct 5234 * @dpll_num: DPLL index 5235 * @ref_state: Reference clock state 5236 * @config: DPLL config 5237 * @eec_mode: EEC mode 5238 * 5239 * Set CGU DPLL config (0x0C67) 5240 * Return: 0 on success or negative value on failure. 5241 */ 5242 int 5243 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5244 u8 config, u8 eec_mode) 5245 { 5246 struct ice_aqc_set_cgu_dpll_config *cmd; 5247 struct ice_aq_desc desc; 5248 5249 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5250 cmd = &desc.params.set_cgu_dpll_config; 5251 cmd->dpll_num = dpll_num; 5252 cmd->ref_state = ref_state; 5253 cmd->config = config; 5254 cmd->eec_mode = eec_mode; 5255 5256 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5257 } 5258 5259 /** 5260 * ice_aq_set_cgu_ref_prio - set input reference priority 5261 * @hw: pointer to the HW struct 5262 * @dpll_num: DPLL index 5263 * @ref_idx: Reference pin index 5264 * @ref_priority: Reference input priority 5265 * 5266 * Set CGU reference priority (0x0C68) 5267 * Return: 0 on success or negative value on failure. 5268 */ 5269 int 5270 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5271 u8 ref_priority) 5272 { 5273 struct ice_aqc_set_cgu_ref_prio *cmd; 5274 struct ice_aq_desc desc; 5275 5276 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5277 cmd = &desc.params.set_cgu_ref_prio; 5278 cmd->dpll_num = dpll_num; 5279 cmd->ref_idx = ref_idx; 5280 cmd->ref_priority = ref_priority; 5281 5282 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5283 } 5284 5285 /** 5286 * ice_aq_get_cgu_ref_prio - get input reference priority 5287 * @hw: pointer to the HW struct 5288 * @dpll_num: DPLL index 5289 * @ref_idx: Reference pin index 5290 * @ref_prio: Reference input priority 5291 * 5292 * Get CGU reference priority (0x0C69) 5293 * Return: 0 on success or negative value on failure. 5294 */ 5295 int 5296 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5297 u8 *ref_prio) 5298 { 5299 struct ice_aqc_get_cgu_ref_prio *cmd; 5300 struct ice_aq_desc desc; 5301 int status; 5302 5303 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5304 cmd = &desc.params.get_cgu_ref_prio; 5305 cmd->dpll_num = dpll_num; 5306 cmd->ref_idx = ref_idx; 5307 5308 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5309 if (!status) 5310 *ref_prio = cmd->ref_priority; 5311 5312 return status; 5313 } 5314 5315 /** 5316 * ice_aq_get_cgu_info - get cgu info 5317 * @hw: pointer to the HW struct 5318 * @cgu_id: CGU ID 5319 * @cgu_cfg_ver: CGU config version 5320 * @cgu_fw_ver: CGU firmware version 5321 * 5322 * Get CGU info (0x0C6A) 5323 * Return: 0 on success or negative value on failure. 5324 */ 5325 int 5326 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5327 u32 *cgu_fw_ver) 5328 { 5329 struct ice_aqc_get_cgu_info *cmd; 5330 struct ice_aq_desc desc; 5331 int status; 5332 5333 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5334 cmd = &desc.params.get_cgu_info; 5335 5336 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5337 if (!status) { 5338 *cgu_id = le32_to_cpu(cmd->cgu_id); 5339 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5340 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5341 } 5342 5343 return status; 5344 } 5345 5346 /** 5347 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5348 * @hw: pointer to the HW struct 5349 * @phy_output: PHY reference clock output pin 5350 * @enable: GPIO state to be applied 5351 * @freq: PHY output frequency 5352 * 5353 * Set phy recovered clock as reference (0x0630) 5354 * Return: 0 on success or negative value on failure. 5355 */ 5356 int 5357 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5358 u32 *freq) 5359 { 5360 struct ice_aqc_set_phy_rec_clk_out *cmd; 5361 struct ice_aq_desc desc; 5362 int status; 5363 5364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5365 cmd = &desc.params.set_phy_rec_clk_out; 5366 cmd->phy_output = phy_output; 5367 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5368 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5369 cmd->freq = cpu_to_le32(*freq); 5370 5371 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5372 if (!status) 5373 *freq = le32_to_cpu(cmd->freq); 5374 5375 return status; 5376 } 5377 5378 /** 5379 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5380 * @hw: pointer to the HW struct 5381 * @phy_output: PHY reference clock output pin 5382 * @port_num: Port number 5383 * @flags: PHY flags 5384 * @node_handle: PHY output frequency 5385 * 5386 * Get PHY recovered clock output info (0x0631) 5387 * Return: 0 on success or negative value on failure. 5388 */ 5389 int 5390 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5391 u8 *flags, u16 *node_handle) 5392 { 5393 struct ice_aqc_get_phy_rec_clk_out *cmd; 5394 struct ice_aq_desc desc; 5395 int status; 5396 5397 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5398 cmd = &desc.params.get_phy_rec_clk_out; 5399 cmd->phy_output = *phy_output; 5400 5401 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5402 if (!status) { 5403 *phy_output = cmd->phy_output; 5404 if (port_num) 5405 *port_num = cmd->port_num; 5406 if (flags) 5407 *flags = cmd->flags; 5408 if (node_handle) 5409 *node_handle = le16_to_cpu(cmd->node_handle); 5410 } 5411 5412 return status; 5413 } 5414 5415 /** 5416 * ice_aq_get_sensor_reading 5417 * @hw: pointer to the HW struct 5418 * @data: pointer to data to be read from the sensor 5419 * 5420 * Get sensor reading (0x0632) 5421 */ 5422 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5423 struct ice_aqc_get_sensor_reading_resp *data) 5424 { 5425 struct ice_aqc_get_sensor_reading *cmd; 5426 struct ice_aq_desc desc; 5427 int status; 5428 5429 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5430 cmd = &desc.params.get_sensor_reading; 5431 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5432 #define ICE_INTERNAL_TEMP_SENSOR 0 5433 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5434 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5435 5436 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5437 if (!status) 5438 memcpy(data, &desc.params.get_sensor_reading_resp, 5439 sizeof(*data)); 5440 5441 return status; 5442 } 5443 5444 /** 5445 * ice_replay_pre_init - replay pre initialization 5446 * @hw: pointer to the HW struct 5447 * 5448 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5449 */ 5450 static int ice_replay_pre_init(struct ice_hw *hw) 5451 { 5452 struct ice_switch_info *sw = hw->switch_info; 5453 u8 i; 5454 5455 /* Delete old entries from replay filter list head if there is any */ 5456 ice_rm_all_sw_replay_rule_info(hw); 5457 /* In start of replay, move entries into replay_rules list, it 5458 * will allow adding rules entries back to filt_rules list, 5459 * which is operational list. 5460 */ 5461 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5462 list_replace_init(&sw->recp_list[i].filt_rules, 5463 &sw->recp_list[i].filt_replay_rules); 5464 ice_sched_replay_agg_vsi_preinit(hw); 5465 5466 return 0; 5467 } 5468 5469 /** 5470 * ice_replay_vsi - replay VSI configuration 5471 * @hw: pointer to the HW struct 5472 * @vsi_handle: driver VSI handle 5473 * 5474 * Restore all VSI configuration after reset. It is required to call this 5475 * function with main VSI first. 5476 */ 5477 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5478 { 5479 int status; 5480 5481 if (!ice_is_vsi_valid(hw, vsi_handle)) 5482 return -EINVAL; 5483 5484 /* Replay pre-initialization if there is any */ 5485 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5486 status = ice_replay_pre_init(hw); 5487 if (status) 5488 return status; 5489 } 5490 /* Replay per VSI all RSS configurations */ 5491 status = ice_replay_rss_cfg(hw, vsi_handle); 5492 if (status) 5493 return status; 5494 /* Replay per VSI all filters */ 5495 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5496 if (!status) 5497 status = ice_replay_vsi_agg(hw, vsi_handle); 5498 return status; 5499 } 5500 5501 /** 5502 * ice_replay_post - post replay configuration cleanup 5503 * @hw: pointer to the HW struct 5504 * 5505 * Post replay cleanup. 5506 */ 5507 void ice_replay_post(struct ice_hw *hw) 5508 { 5509 /* Delete old entries from replay filter list head */ 5510 ice_rm_all_sw_replay_rule_info(hw); 5511 ice_sched_replay_agg(hw); 5512 } 5513 5514 /** 5515 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5516 * @hw: ptr to the hardware info 5517 * @reg: offset of 64 bit HW register to read from 5518 * @prev_stat_loaded: bool to specify if previous stats are loaded 5519 * @prev_stat: ptr to previous loaded stat value 5520 * @cur_stat: ptr to current stat value 5521 */ 5522 void 5523 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5524 u64 *prev_stat, u64 *cur_stat) 5525 { 5526 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5527 5528 /* device stats are not reset at PFR, they likely will not be zeroed 5529 * when the driver starts. Thus, save the value from the first read 5530 * without adding to the statistic value so that we report stats which 5531 * count up from zero. 5532 */ 5533 if (!prev_stat_loaded) { 5534 *prev_stat = new_data; 5535 return; 5536 } 5537 5538 /* Calculate the difference between the new and old values, and then 5539 * add it to the software stat value. 5540 */ 5541 if (new_data >= *prev_stat) 5542 *cur_stat += new_data - *prev_stat; 5543 else 5544 /* to manage the potential roll-over */ 5545 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5546 5547 /* Update the previously stored value to prepare for next read */ 5548 *prev_stat = new_data; 5549 } 5550 5551 /** 5552 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5553 * @hw: ptr to the hardware info 5554 * @reg: offset of HW register to read from 5555 * @prev_stat_loaded: bool to specify if previous stats are loaded 5556 * @prev_stat: ptr to previous loaded stat value 5557 * @cur_stat: ptr to current stat value 5558 */ 5559 void 5560 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5561 u64 *prev_stat, u64 *cur_stat) 5562 { 5563 u32 new_data; 5564 5565 new_data = rd32(hw, reg); 5566 5567 /* device stats are not reset at PFR, they likely will not be zeroed 5568 * when the driver starts. Thus, save the value from the first read 5569 * without adding to the statistic value so that we report stats which 5570 * count up from zero. 5571 */ 5572 if (!prev_stat_loaded) { 5573 *prev_stat = new_data; 5574 return; 5575 } 5576 5577 /* Calculate the difference between the new and old values, and then 5578 * add it to the software stat value. 5579 */ 5580 if (new_data >= *prev_stat) 5581 *cur_stat += new_data - *prev_stat; 5582 else 5583 /* to manage the potential roll-over */ 5584 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5585 5586 /* Update the previously stored value to prepare for next read */ 5587 *prev_stat = new_data; 5588 } 5589 5590 /** 5591 * ice_sched_query_elem - query element information from HW 5592 * @hw: pointer to the HW struct 5593 * @node_teid: node TEID to be queried 5594 * @buf: buffer to element information 5595 * 5596 * This function queries HW element information 5597 */ 5598 int 5599 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5600 struct ice_aqc_txsched_elem_data *buf) 5601 { 5602 u16 buf_size, num_elem_ret = 0; 5603 int status; 5604 5605 buf_size = sizeof(*buf); 5606 memset(buf, 0, buf_size); 5607 buf->node_teid = cpu_to_le32(node_teid); 5608 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5609 NULL); 5610 if (status || num_elem_ret != 1) 5611 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5612 return status; 5613 } 5614 5615 /** 5616 * ice_aq_read_i2c 5617 * @hw: pointer to the hw struct 5618 * @topo_addr: topology address for a device to communicate with 5619 * @bus_addr: 7-bit I2C bus address 5620 * @addr: I2C memory address (I2C offset) with up to 16 bits 5621 * @params: I2C parameters: bit [7] - Repeated start, 5622 * bits [6:5] data offset size, 5623 * bit [4] - I2C address type, 5624 * bits [3:0] - data size to read (0-16 bytes) 5625 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5626 * @cd: pointer to command details structure or NULL 5627 * 5628 * Read I2C (0x06E2) 5629 */ 5630 int 5631 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5632 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5633 struct ice_sq_cd *cd) 5634 { 5635 struct ice_aq_desc desc = { 0 }; 5636 struct ice_aqc_i2c *cmd; 5637 u8 data_size; 5638 int status; 5639 5640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5641 cmd = &desc.params.read_write_i2c; 5642 5643 if (!data) 5644 return -EINVAL; 5645 5646 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5647 5648 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5649 cmd->topo_addr = topo_addr; 5650 cmd->i2c_params = params; 5651 cmd->i2c_addr = addr; 5652 5653 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5654 if (!status) { 5655 struct ice_aqc_read_i2c_resp *resp; 5656 u8 i; 5657 5658 resp = &desc.params.read_i2c_resp; 5659 for (i = 0; i < data_size; i++) { 5660 *data = resp->i2c_data[i]; 5661 data++; 5662 } 5663 } 5664 5665 return status; 5666 } 5667 5668 /** 5669 * ice_aq_write_i2c 5670 * @hw: pointer to the hw struct 5671 * @topo_addr: topology address for a device to communicate with 5672 * @bus_addr: 7-bit I2C bus address 5673 * @addr: I2C memory address (I2C offset) with up to 16 bits 5674 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5675 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5676 * @cd: pointer to command details structure or NULL 5677 * 5678 * Write I2C (0x06E3) 5679 * 5680 * * Return: 5681 * * 0 - Successful write to the i2c device 5682 * * -EINVAL - Data size greater than 4 bytes 5683 * * -EIO - FW error 5684 */ 5685 int 5686 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5687 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5688 struct ice_sq_cd *cd) 5689 { 5690 struct ice_aq_desc desc = { 0 }; 5691 struct ice_aqc_i2c *cmd; 5692 u8 data_size; 5693 5694 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5695 cmd = &desc.params.read_write_i2c; 5696 5697 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5698 5699 /* data_size limited to 4 */ 5700 if (data_size > 4) 5701 return -EINVAL; 5702 5703 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5704 cmd->topo_addr = topo_addr; 5705 cmd->i2c_params = params; 5706 cmd->i2c_addr = addr; 5707 5708 memcpy(cmd->i2c_data, data, data_size); 5709 5710 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5711 } 5712 5713 /** 5714 * ice_aq_set_gpio 5715 * @hw: pointer to the hw struct 5716 * @gpio_ctrl_handle: GPIO controller node handle 5717 * @pin_idx: IO Number of the GPIO that needs to be set 5718 * @value: SW provide IO value to set in the LSB 5719 * @cd: pointer to command details structure or NULL 5720 * 5721 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5722 */ 5723 int 5724 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5725 struct ice_sq_cd *cd) 5726 { 5727 struct ice_aqc_gpio *cmd; 5728 struct ice_aq_desc desc; 5729 5730 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5731 cmd = &desc.params.read_write_gpio; 5732 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5733 cmd->gpio_num = pin_idx; 5734 cmd->gpio_val = value ? 1 : 0; 5735 5736 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5737 } 5738 5739 /** 5740 * ice_aq_get_gpio 5741 * @hw: pointer to the hw struct 5742 * @gpio_ctrl_handle: GPIO controller node handle 5743 * @pin_idx: IO Number of the GPIO that needs to be set 5744 * @value: IO value read 5745 * @cd: pointer to command details structure or NULL 5746 * 5747 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5748 * the topology 5749 */ 5750 int 5751 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5752 bool *value, struct ice_sq_cd *cd) 5753 { 5754 struct ice_aqc_gpio *cmd; 5755 struct ice_aq_desc desc; 5756 int status; 5757 5758 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5759 cmd = &desc.params.read_write_gpio; 5760 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5761 cmd->gpio_num = pin_idx; 5762 5763 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5764 if (status) 5765 return status; 5766 5767 *value = !!cmd->gpio_val; 5768 return 0; 5769 } 5770 5771 /** 5772 * ice_is_fw_api_min_ver 5773 * @hw: pointer to the hardware structure 5774 * @maj: major version 5775 * @min: minor version 5776 * @patch: patch version 5777 * 5778 * Checks if the firmware API is minimum version 5779 */ 5780 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5781 { 5782 if (hw->api_maj_ver == maj) { 5783 if (hw->api_min_ver > min) 5784 return true; 5785 if (hw->api_min_ver == min && hw->api_patch >= patch) 5786 return true; 5787 } else if (hw->api_maj_ver > maj) { 5788 return true; 5789 } 5790 5791 return false; 5792 } 5793 5794 /** 5795 * ice_fw_supports_link_override 5796 * @hw: pointer to the hardware structure 5797 * 5798 * Checks if the firmware supports link override 5799 */ 5800 bool ice_fw_supports_link_override(struct ice_hw *hw) 5801 { 5802 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5803 ICE_FW_API_LINK_OVERRIDE_MIN, 5804 ICE_FW_API_LINK_OVERRIDE_PATCH); 5805 } 5806 5807 /** 5808 * ice_get_link_default_override 5809 * @ldo: pointer to the link default override struct 5810 * @pi: pointer to the port info struct 5811 * 5812 * Gets the link default override for a port 5813 */ 5814 int 5815 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5816 struct ice_port_info *pi) 5817 { 5818 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5819 struct ice_hw *hw = pi->hw; 5820 int status; 5821 5822 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5823 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5824 if (status) { 5825 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5826 return status; 5827 } 5828 5829 /* Each port has its own config; calculate for our port */ 5830 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5831 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5832 5833 /* link options first */ 5834 status = ice_read_sr_word(hw, tlv_start, &buf); 5835 if (status) { 5836 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5837 return status; 5838 } 5839 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5840 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5841 ICE_LINK_OVERRIDE_PHY_CFG_S; 5842 5843 /* link PHY config */ 5844 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5845 status = ice_read_sr_word(hw, offset, &buf); 5846 if (status) { 5847 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5848 return status; 5849 } 5850 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5851 5852 /* PHY types low */ 5853 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5854 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5855 status = ice_read_sr_word(hw, (offset + i), &buf); 5856 if (status) { 5857 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5858 return status; 5859 } 5860 /* shift 16 bits at a time to fill 64 bits */ 5861 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5862 } 5863 5864 /* PHY types high */ 5865 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5866 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5867 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5868 status = ice_read_sr_word(hw, (offset + i), &buf); 5869 if (status) { 5870 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5871 return status; 5872 } 5873 /* shift 16 bits at a time to fill 64 bits */ 5874 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5875 } 5876 5877 return status; 5878 } 5879 5880 /** 5881 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5882 * @caps: get PHY capability data 5883 */ 5884 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5885 { 5886 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5887 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5888 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5889 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5890 return true; 5891 5892 return false; 5893 } 5894 5895 /** 5896 * ice_is_fw_health_report_supported - checks if firmware supports health events 5897 * @hw: pointer to the hardware structure 5898 * 5899 * Return: true if firmware supports health status reports, 5900 * false otherwise 5901 */ 5902 bool ice_is_fw_health_report_supported(struct ice_hw *hw) 5903 { 5904 return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, 5905 ICE_FW_API_HEALTH_REPORT_MIN, 5906 ICE_FW_API_HEALTH_REPORT_PATCH); 5907 } 5908 5909 /** 5910 * ice_aq_set_health_status_cfg - Configure FW health events 5911 * @hw: pointer to the HW struct 5912 * @event_source: type of diagnostic events to enable 5913 * 5914 * Configure the health status event types that the firmware will send to this 5915 * PF. The supported event types are: PF-specific, all PFs, and global. 5916 * 5917 * Return: 0 on success, negative error code otherwise. 5918 */ 5919 int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) 5920 { 5921 struct ice_aqc_set_health_status_cfg *cmd; 5922 struct ice_aq_desc desc; 5923 5924 cmd = &desc.params.set_health_status_cfg; 5925 5926 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); 5927 5928 cmd->event_source = event_source; 5929 5930 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5931 } 5932 5933 /** 5934 * ice_aq_set_lldp_mib - Set the LLDP MIB 5935 * @hw: pointer to the HW struct 5936 * @mib_type: Local, Remote or both Local and Remote MIBs 5937 * @buf: pointer to the caller-supplied buffer to store the MIB block 5938 * @buf_size: size of the buffer (in bytes) 5939 * @cd: pointer to command details structure or NULL 5940 * 5941 * Set the LLDP MIB. (0x0A08) 5942 */ 5943 int 5944 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5945 struct ice_sq_cd *cd) 5946 { 5947 struct ice_aqc_lldp_set_local_mib *cmd; 5948 struct ice_aq_desc desc; 5949 5950 cmd = &desc.params.lldp_set_mib; 5951 5952 if (buf_size == 0 || !buf) 5953 return -EINVAL; 5954 5955 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5956 5957 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5958 desc.datalen = cpu_to_le16(buf_size); 5959 5960 cmd->type = mib_type; 5961 cmd->length = cpu_to_le16(buf_size); 5962 5963 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5964 } 5965 5966 /** 5967 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5968 * @hw: pointer to HW struct 5969 */ 5970 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5971 { 5972 if (hw->mac_type != ICE_MAC_E810) 5973 return false; 5974 5975 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5976 ICE_FW_API_LLDP_FLTR_MIN, 5977 ICE_FW_API_LLDP_FLTR_PATCH); 5978 } 5979 5980 /** 5981 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5982 * @hw: pointer to HW struct 5983 * @vsi_num: absolute HW index for VSI 5984 * @add: boolean for if adding or removing a filter 5985 */ 5986 int 5987 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5988 { 5989 struct ice_aqc_lldp_filter_ctrl *cmd; 5990 struct ice_aq_desc desc; 5991 5992 cmd = &desc.params.lldp_filter_ctrl; 5993 5994 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5995 5996 if (add) 5997 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5998 else 5999 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6000 6001 cmd->vsi_num = cpu_to_le16(vsi_num); 6002 6003 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6004 } 6005 6006 /** 6007 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6008 * @hw: pointer to HW struct 6009 */ 6010 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6011 { 6012 struct ice_aq_desc desc; 6013 6014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6015 6016 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6017 } 6018 6019 /** 6020 * ice_fw_supports_report_dflt_cfg 6021 * @hw: pointer to the hardware structure 6022 * 6023 * Checks if the firmware supports report default configuration 6024 */ 6025 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6026 { 6027 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6028 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6029 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6030 } 6031 6032 /* each of the indexes into the following array match the speed of a return 6033 * value from the list of AQ returned speeds like the range: 6034 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6035 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6036 * array. The array is defined as 15 elements long because the link_speed 6037 * returned by the firmware is a 16 bit * value, but is indexed 6038 * by [fls(speed) - 1] 6039 */ 6040 static const u32 ice_aq_to_link_speed[] = { 6041 SPEED_10, /* BIT(0) */ 6042 SPEED_100, 6043 SPEED_1000, 6044 SPEED_2500, 6045 SPEED_5000, 6046 SPEED_10000, 6047 SPEED_20000, 6048 SPEED_25000, 6049 SPEED_40000, 6050 SPEED_50000, 6051 SPEED_100000, /* BIT(10) */ 6052 SPEED_200000, 6053 }; 6054 6055 /** 6056 * ice_get_link_speed - get integer speed from table 6057 * @index: array index from fls(aq speed) - 1 6058 * 6059 * Returns: u32 value containing integer speed 6060 */ 6061 u32 ice_get_link_speed(u16 index) 6062 { 6063 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6064 return 0; 6065 6066 return ice_aq_to_link_speed[index]; 6067 } 6068