1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 #include <linux/packing.h> 10 11 #define ICE_PF_RESET_WAIT_COUNT 300 12 #define ICE_MAX_NETLIST_SIZE 10 13 14 static const char * const ice_link_mode_str_low[] = { 15 [0] = "100BASE_TX", 16 [1] = "100M_SGMII", 17 [2] = "1000BASE_T", 18 [3] = "1000BASE_SX", 19 [4] = "1000BASE_LX", 20 [5] = "1000BASE_KX", 21 [6] = "1G_SGMII", 22 [7] = "2500BASE_T", 23 [8] = "2500BASE_X", 24 [9] = "2500BASE_KX", 25 [10] = "5GBASE_T", 26 [11] = "5GBASE_KR", 27 [12] = "10GBASE_T", 28 [13] = "10G_SFI_DA", 29 [14] = "10GBASE_SR", 30 [15] = "10GBASE_LR", 31 [16] = "10GBASE_KR_CR1", 32 [17] = "10G_SFI_AOC_ACC", 33 [18] = "10G_SFI_C2C", 34 [19] = "25GBASE_T", 35 [20] = "25GBASE_CR", 36 [21] = "25GBASE_CR_S", 37 [22] = "25GBASE_CR1", 38 [23] = "25GBASE_SR", 39 [24] = "25GBASE_LR", 40 [25] = "25GBASE_KR", 41 [26] = "25GBASE_KR_S", 42 [27] = "25GBASE_KR1", 43 [28] = "25G_AUI_AOC_ACC", 44 [29] = "25G_AUI_C2C", 45 [30] = "40GBASE_CR4", 46 [31] = "40GBASE_SR4", 47 [32] = "40GBASE_LR4", 48 [33] = "40GBASE_KR4", 49 [34] = "40G_XLAUI_AOC_ACC", 50 [35] = "40G_XLAUI", 51 [36] = "50GBASE_CR2", 52 [37] = "50GBASE_SR2", 53 [38] = "50GBASE_LR2", 54 [39] = "50GBASE_KR2", 55 [40] = "50G_LAUI2_AOC_ACC", 56 [41] = "50G_LAUI2", 57 [42] = "50G_AUI2_AOC_ACC", 58 [43] = "50G_AUI2", 59 [44] = "50GBASE_CP", 60 [45] = "50GBASE_SR", 61 [46] = "50GBASE_FR", 62 [47] = "50GBASE_LR", 63 [48] = "50GBASE_KR_PAM4", 64 [49] = "50G_AUI1_AOC_ACC", 65 [50] = "50G_AUI1", 66 [51] = "100GBASE_CR4", 67 [52] = "100GBASE_SR4", 68 [53] = "100GBASE_LR4", 69 [54] = "100GBASE_KR4", 70 [55] = "100G_CAUI4_AOC_ACC", 71 [56] = "100G_CAUI4", 72 [57] = "100G_AUI4_AOC_ACC", 73 [58] = "100G_AUI4", 74 [59] = "100GBASE_CR_PAM4", 75 [60] = "100GBASE_KR_PAM4", 76 [61] = "100GBASE_CP2", 77 [62] = "100GBASE_SR2", 78 [63] = "100GBASE_DR", 79 }; 80 81 static const char * const ice_link_mode_str_high[] = { 82 [0] = "100GBASE_KR2_PAM4", 83 [1] = "100G_CAUI2_AOC_ACC", 84 [2] = "100G_CAUI2", 85 [3] = "100G_AUI2_AOC_ACC", 86 [4] = "100G_AUI2", 87 }; 88 89 /** 90 * ice_dump_phy_type - helper function to dump phy_type 91 * @hw: pointer to the HW structure 92 * @low: 64 bit value for phy_type_low 93 * @high: 64 bit value for phy_type_high 94 * @prefix: prefix string to differentiate multiple dumps 95 */ 96 static void 97 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 98 { 99 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 100 101 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 102 if (low & BIT_ULL(i)) 103 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 104 prefix, i, ice_link_mode_str_low[i]); 105 } 106 107 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 108 109 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 110 if (high & BIT_ULL(i)) 111 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 112 prefix, i, ice_link_mode_str_high[i]); 113 } 114 } 115 116 /** 117 * ice_set_mac_type - Sets MAC type 118 * @hw: pointer to the HW structure 119 * 120 * This function sets the MAC type of the adapter based on the 121 * vendor ID and device ID stored in the HW structure. 122 */ 123 static int ice_set_mac_type(struct ice_hw *hw) 124 { 125 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 126 return -ENODEV; 127 128 switch (hw->device_id) { 129 case ICE_DEV_ID_E810C_BACKPLANE: 130 case ICE_DEV_ID_E810C_QSFP: 131 case ICE_DEV_ID_E810C_SFP: 132 case ICE_DEV_ID_E810_XXV_BACKPLANE: 133 case ICE_DEV_ID_E810_XXV_QSFP: 134 case ICE_DEV_ID_E810_XXV_SFP: 135 hw->mac_type = ICE_MAC_E810; 136 break; 137 case ICE_DEV_ID_E823C_10G_BASE_T: 138 case ICE_DEV_ID_E823C_BACKPLANE: 139 case ICE_DEV_ID_E823C_QSFP: 140 case ICE_DEV_ID_E823C_SFP: 141 case ICE_DEV_ID_E823C_SGMII: 142 case ICE_DEV_ID_E822C_10G_BASE_T: 143 case ICE_DEV_ID_E822C_BACKPLANE: 144 case ICE_DEV_ID_E822C_QSFP: 145 case ICE_DEV_ID_E822C_SFP: 146 case ICE_DEV_ID_E822C_SGMII: 147 case ICE_DEV_ID_E822L_10G_BASE_T: 148 case ICE_DEV_ID_E822L_BACKPLANE: 149 case ICE_DEV_ID_E822L_SFP: 150 case ICE_DEV_ID_E822L_SGMII: 151 case ICE_DEV_ID_E823L_10G_BASE_T: 152 case ICE_DEV_ID_E823L_1GBE: 153 case ICE_DEV_ID_E823L_BACKPLANE: 154 case ICE_DEV_ID_E823L_QSFP: 155 case ICE_DEV_ID_E823L_SFP: 156 hw->mac_type = ICE_MAC_GENERIC; 157 break; 158 case ICE_DEV_ID_E825C_BACKPLANE: 159 case ICE_DEV_ID_E825C_QSFP: 160 case ICE_DEV_ID_E825C_SFP: 161 case ICE_DEV_ID_E825C_SGMII: 162 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 163 break; 164 case ICE_DEV_ID_E830CC_BACKPLANE: 165 case ICE_DEV_ID_E830CC_QSFP56: 166 case ICE_DEV_ID_E830CC_SFP: 167 case ICE_DEV_ID_E830CC_SFP_DD: 168 case ICE_DEV_ID_E830C_BACKPLANE: 169 case ICE_DEV_ID_E830_XXV_BACKPLANE: 170 case ICE_DEV_ID_E830C_QSFP: 171 case ICE_DEV_ID_E830_XXV_QSFP: 172 case ICE_DEV_ID_E830C_SFP: 173 case ICE_DEV_ID_E830_XXV_SFP: 174 hw->mac_type = ICE_MAC_E830; 175 break; 176 default: 177 hw->mac_type = ICE_MAC_UNKNOWN; 178 break; 179 } 180 181 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 182 return 0; 183 } 184 185 /** 186 * ice_is_generic_mac - check if device's mac_type is generic 187 * @hw: pointer to the hardware structure 188 * 189 * Return: true if mac_type is generic (with SBQ support), false if not 190 */ 191 bool ice_is_generic_mac(struct ice_hw *hw) 192 { 193 return (hw->mac_type == ICE_MAC_GENERIC || 194 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 195 } 196 197 /** 198 * ice_is_e810 199 * @hw: pointer to the hardware structure 200 * 201 * returns true if the device is E810 based, false if not. 202 */ 203 bool ice_is_e810(struct ice_hw *hw) 204 { 205 return hw->mac_type == ICE_MAC_E810; 206 } 207 208 /** 209 * ice_is_e810t 210 * @hw: pointer to the hardware structure 211 * 212 * returns true if the device is E810T based, false if not. 213 */ 214 bool ice_is_e810t(struct ice_hw *hw) 215 { 216 switch (hw->device_id) { 217 case ICE_DEV_ID_E810C_SFP: 218 switch (hw->subsystem_device_id) { 219 case ICE_SUBDEV_ID_E810T: 220 case ICE_SUBDEV_ID_E810T2: 221 case ICE_SUBDEV_ID_E810T3: 222 case ICE_SUBDEV_ID_E810T4: 223 case ICE_SUBDEV_ID_E810T6: 224 case ICE_SUBDEV_ID_E810T7: 225 return true; 226 } 227 break; 228 case ICE_DEV_ID_E810C_QSFP: 229 switch (hw->subsystem_device_id) { 230 case ICE_SUBDEV_ID_E810T2: 231 case ICE_SUBDEV_ID_E810T3: 232 case ICE_SUBDEV_ID_E810T5: 233 return true; 234 } 235 break; 236 default: 237 break; 238 } 239 240 return false; 241 } 242 243 /** 244 * ice_is_e822 - Check if a device is E822 family device 245 * @hw: pointer to the hardware structure 246 * 247 * Return: true if the device is E822 based, false if not. 248 */ 249 bool ice_is_e822(struct ice_hw *hw) 250 { 251 switch (hw->device_id) { 252 case ICE_DEV_ID_E822C_BACKPLANE: 253 case ICE_DEV_ID_E822C_QSFP: 254 case ICE_DEV_ID_E822C_SFP: 255 case ICE_DEV_ID_E822C_10G_BASE_T: 256 case ICE_DEV_ID_E822C_SGMII: 257 case ICE_DEV_ID_E822L_BACKPLANE: 258 case ICE_DEV_ID_E822L_SFP: 259 case ICE_DEV_ID_E822L_10G_BASE_T: 260 case ICE_DEV_ID_E822L_SGMII: 261 return true; 262 default: 263 return false; 264 } 265 } 266 267 /** 268 * ice_is_e823 269 * @hw: pointer to the hardware structure 270 * 271 * returns true if the device is E823-L or E823-C based, false if not. 272 */ 273 bool ice_is_e823(struct ice_hw *hw) 274 { 275 switch (hw->device_id) { 276 case ICE_DEV_ID_E823L_BACKPLANE: 277 case ICE_DEV_ID_E823L_SFP: 278 case ICE_DEV_ID_E823L_10G_BASE_T: 279 case ICE_DEV_ID_E823L_1GBE: 280 case ICE_DEV_ID_E823L_QSFP: 281 case ICE_DEV_ID_E823C_BACKPLANE: 282 case ICE_DEV_ID_E823C_QSFP: 283 case ICE_DEV_ID_E823C_SFP: 284 case ICE_DEV_ID_E823C_10G_BASE_T: 285 case ICE_DEV_ID_E823C_SGMII: 286 return true; 287 default: 288 return false; 289 } 290 } 291 292 /** 293 * ice_is_e825c - Check if a device is E825C family device 294 * @hw: pointer to the hardware structure 295 * 296 * Return: true if the device is E825-C based, false if not. 297 */ 298 bool ice_is_e825c(struct ice_hw *hw) 299 { 300 switch (hw->device_id) { 301 case ICE_DEV_ID_E825C_BACKPLANE: 302 case ICE_DEV_ID_E825C_QSFP: 303 case ICE_DEV_ID_E825C_SFP: 304 case ICE_DEV_ID_E825C_SGMII: 305 return true; 306 default: 307 return false; 308 } 309 } 310 311 /** 312 * ice_is_pf_c827 - check if pf contains c827 phy 313 * @hw: pointer to the hw struct 314 * 315 * Return: true if the device has c827 phy. 316 */ 317 static bool ice_is_pf_c827(struct ice_hw *hw) 318 { 319 struct ice_aqc_get_link_topo cmd = {}; 320 u8 node_part_number; 321 u16 node_handle; 322 int status; 323 324 if (hw->mac_type != ICE_MAC_E810) 325 return false; 326 327 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 328 return true; 329 330 cmd.addr.topo_params.node_type_ctx = 331 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 332 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 333 cmd.addr.topo_params.index = 0; 334 335 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 336 &node_handle); 337 338 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 339 return false; 340 341 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 342 return true; 343 344 return false; 345 } 346 347 /** 348 * ice_clear_pf_cfg - Clear PF configuration 349 * @hw: pointer to the hardware structure 350 * 351 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 352 * configuration, flow director filters, etc.). 353 */ 354 int ice_clear_pf_cfg(struct ice_hw *hw) 355 { 356 struct ice_aq_desc desc; 357 358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 359 360 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 361 } 362 363 /** 364 * ice_aq_manage_mac_read - manage MAC address read command 365 * @hw: pointer to the HW struct 366 * @buf: a virtual buffer to hold the manage MAC read response 367 * @buf_size: Size of the virtual buffer 368 * @cd: pointer to command details structure or NULL 369 * 370 * This function is used to return per PF station MAC address (0x0107). 371 * NOTE: Upon successful completion of this command, MAC address information 372 * is returned in user specified buffer. Please interpret user specified 373 * buffer as "manage_mac_read" response. 374 * Response such as various MAC addresses are stored in HW struct (port.mac) 375 * ice_discover_dev_caps is expected to be called before this function is 376 * called. 377 */ 378 static int 379 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 380 struct ice_sq_cd *cd) 381 { 382 struct ice_aqc_manage_mac_read_resp *resp; 383 struct ice_aqc_manage_mac_read *cmd; 384 struct ice_aq_desc desc; 385 int status; 386 u16 flags; 387 u8 i; 388 389 cmd = &desc.params.mac_read; 390 391 if (buf_size < sizeof(*resp)) 392 return -EINVAL; 393 394 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 395 396 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 397 if (status) 398 return status; 399 400 resp = buf; 401 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 402 403 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 404 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 405 return -EIO; 406 } 407 408 /* A single port can report up to two (LAN and WoL) addresses */ 409 for (i = 0; i < cmd->num_addr; i++) 410 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 411 ether_addr_copy(hw->port_info->mac.lan_addr, 412 resp[i].mac_addr); 413 ether_addr_copy(hw->port_info->mac.perm_addr, 414 resp[i].mac_addr); 415 break; 416 } 417 418 return 0; 419 } 420 421 /** 422 * ice_aq_get_phy_caps - returns PHY capabilities 423 * @pi: port information structure 424 * @qual_mods: report qualified modules 425 * @report_mode: report mode capabilities 426 * @pcaps: structure for PHY capabilities to be filled 427 * @cd: pointer to command details structure or NULL 428 * 429 * Returns the various PHY capabilities supported on the Port (0x0600) 430 */ 431 int 432 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 433 struct ice_aqc_get_phy_caps_data *pcaps, 434 struct ice_sq_cd *cd) 435 { 436 struct ice_aqc_get_phy_caps *cmd; 437 u16 pcaps_size = sizeof(*pcaps); 438 struct ice_aq_desc desc; 439 const char *prefix; 440 struct ice_hw *hw; 441 int status; 442 443 cmd = &desc.params.get_phy; 444 445 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 446 return -EINVAL; 447 hw = pi->hw; 448 449 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 450 !ice_fw_supports_report_dflt_cfg(hw)) 451 return -EINVAL; 452 453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 454 455 if (qual_mods) 456 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 457 458 cmd->param0 |= cpu_to_le16(report_mode); 459 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 460 461 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 462 463 switch (report_mode) { 464 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 465 prefix = "phy_caps_media"; 466 break; 467 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 468 prefix = "phy_caps_no_media"; 469 break; 470 case ICE_AQC_REPORT_ACTIVE_CFG: 471 prefix = "phy_caps_active"; 472 break; 473 case ICE_AQC_REPORT_DFLT_CFG: 474 prefix = "phy_caps_default"; 475 break; 476 default: 477 prefix = "phy_caps_invalid"; 478 } 479 480 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 481 le64_to_cpu(pcaps->phy_type_high), prefix); 482 483 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 484 prefix, report_mode); 485 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 486 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 487 pcaps->low_power_ctrl_an); 488 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 489 pcaps->eee_cap); 490 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 491 pcaps->eeer_value); 492 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 493 pcaps->link_fec_options); 494 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 495 prefix, pcaps->module_compliance_enforcement); 496 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 497 prefix, pcaps->extended_compliance_code); 498 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 499 pcaps->module_type[0]); 500 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 501 pcaps->module_type[1]); 502 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 503 pcaps->module_type[2]); 504 505 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 506 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 507 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 508 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 509 sizeof(pi->phy.link_info.module_type)); 510 } 511 512 return status; 513 } 514 515 /** 516 * ice_aq_get_link_topo_handle - get link topology node return status 517 * @pi: port information structure 518 * @node_type: requested node type 519 * @cd: pointer to command details structure or NULL 520 * 521 * Get link topology node return status for specified node type (0x06E0) 522 * 523 * Node type cage can be used to determine if cage is present. If AQC 524 * returns error (ENOENT), then no cage present. If no cage present, then 525 * connection type is backplane or BASE-T. 526 */ 527 static int 528 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 529 struct ice_sq_cd *cd) 530 { 531 struct ice_aqc_get_link_topo *cmd; 532 struct ice_aq_desc desc; 533 534 cmd = &desc.params.get_link_topo; 535 536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 537 538 cmd->addr.topo_params.node_type_ctx = 539 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 540 ICE_AQC_LINK_TOPO_NODE_CTX_S); 541 542 /* set node type */ 543 cmd->addr.topo_params.node_type_ctx |= 544 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 545 546 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 547 } 548 549 /** 550 * ice_aq_get_netlist_node 551 * @hw: pointer to the hw struct 552 * @cmd: get_link_topo AQ structure 553 * @node_part_number: output node part number if node found 554 * @node_handle: output node handle parameter if node found 555 * 556 * Get netlist node handle. 557 */ 558 int 559 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 560 u8 *node_part_number, u16 *node_handle) 561 { 562 struct ice_aq_desc desc; 563 564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 565 desc.params.get_link_topo = *cmd; 566 567 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 568 return -EINTR; 569 570 if (node_handle) 571 *node_handle = 572 le16_to_cpu(desc.params.get_link_topo.addr.handle); 573 if (node_part_number) 574 *node_part_number = desc.params.get_link_topo.node_part_num; 575 576 return 0; 577 } 578 579 /** 580 * ice_find_netlist_node 581 * @hw: pointer to the hw struct 582 * @node_type: type of netlist node to look for 583 * @ctx: context of the search 584 * @node_part_number: node part number to look for 585 * @node_handle: output parameter if node found - optional 586 * 587 * Scan the netlist for a node handle of the given node type and part number. 588 * 589 * If node_handle is non-NULL it will be modified on function exit. It is only 590 * valid if the function returns zero, and should be ignored on any non-zero 591 * return value. 592 * 593 * Return: 594 * * 0 if the node is found, 595 * * -ENOENT if no handle was found, 596 * * negative error code on failure to access the AQ. 597 */ 598 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx, 599 u8 node_part_number, u16 *node_handle) 600 { 601 u8 idx; 602 603 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 604 struct ice_aqc_get_link_topo cmd = {}; 605 u8 rec_node_part_number; 606 int status; 607 608 cmd.addr.topo_params.node_type_ctx = 609 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) | 610 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx); 611 cmd.addr.topo_params.index = idx; 612 613 status = ice_aq_get_netlist_node(hw, &cmd, 614 &rec_node_part_number, 615 node_handle); 616 if (status) 617 return status; 618 619 if (rec_node_part_number == node_part_number) 620 return 0; 621 } 622 623 return -ENOENT; 624 } 625 626 /** 627 * ice_is_media_cage_present 628 * @pi: port information structure 629 * 630 * Returns true if media cage is present, else false. If no cage, then 631 * media type is backplane or BASE-T. 632 */ 633 static bool ice_is_media_cage_present(struct ice_port_info *pi) 634 { 635 /* Node type cage can be used to determine if cage is present. If AQC 636 * returns error (ENOENT), then no cage present. If no cage present then 637 * connection type is backplane or BASE-T. 638 */ 639 return !ice_aq_get_link_topo_handle(pi, 640 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 641 NULL); 642 } 643 644 /** 645 * ice_get_media_type - Gets media type 646 * @pi: port information structure 647 */ 648 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 649 { 650 struct ice_link_status *hw_link_info; 651 652 if (!pi) 653 return ICE_MEDIA_UNKNOWN; 654 655 hw_link_info = &pi->phy.link_info; 656 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 657 /* If more than one media type is selected, report unknown */ 658 return ICE_MEDIA_UNKNOWN; 659 660 if (hw_link_info->phy_type_low) { 661 /* 1G SGMII is a special case where some DA cable PHYs 662 * may show this as an option when it really shouldn't 663 * be since SGMII is meant to be between a MAC and a PHY 664 * in a backplane. Try to detect this case and handle it 665 */ 666 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 667 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 668 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 669 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 670 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 671 return ICE_MEDIA_DA; 672 673 switch (hw_link_info->phy_type_low) { 674 case ICE_PHY_TYPE_LOW_1000BASE_SX: 675 case ICE_PHY_TYPE_LOW_1000BASE_LX: 676 case ICE_PHY_TYPE_LOW_10GBASE_SR: 677 case ICE_PHY_TYPE_LOW_10GBASE_LR: 678 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 679 case ICE_PHY_TYPE_LOW_25GBASE_SR: 680 case ICE_PHY_TYPE_LOW_25GBASE_LR: 681 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 682 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 683 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 684 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 685 case ICE_PHY_TYPE_LOW_50GBASE_SR: 686 case ICE_PHY_TYPE_LOW_50GBASE_FR: 687 case ICE_PHY_TYPE_LOW_50GBASE_LR: 688 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 689 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 690 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 691 case ICE_PHY_TYPE_LOW_100GBASE_DR: 692 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 693 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 694 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 695 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 696 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 697 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 698 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 699 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 700 return ICE_MEDIA_FIBER; 701 case ICE_PHY_TYPE_LOW_100BASE_TX: 702 case ICE_PHY_TYPE_LOW_1000BASE_T: 703 case ICE_PHY_TYPE_LOW_2500BASE_T: 704 case ICE_PHY_TYPE_LOW_5GBASE_T: 705 case ICE_PHY_TYPE_LOW_10GBASE_T: 706 case ICE_PHY_TYPE_LOW_25GBASE_T: 707 return ICE_MEDIA_BASET; 708 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 709 case ICE_PHY_TYPE_LOW_25GBASE_CR: 710 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 711 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 712 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 713 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 714 case ICE_PHY_TYPE_LOW_50GBASE_CP: 715 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 716 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 717 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 718 return ICE_MEDIA_DA; 719 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 720 case ICE_PHY_TYPE_LOW_40G_XLAUI: 721 case ICE_PHY_TYPE_LOW_50G_LAUI2: 722 case ICE_PHY_TYPE_LOW_50G_AUI2: 723 case ICE_PHY_TYPE_LOW_50G_AUI1: 724 case ICE_PHY_TYPE_LOW_100G_AUI4: 725 case ICE_PHY_TYPE_LOW_100G_CAUI4: 726 if (ice_is_media_cage_present(pi)) 727 return ICE_MEDIA_DA; 728 fallthrough; 729 case ICE_PHY_TYPE_LOW_1000BASE_KX: 730 case ICE_PHY_TYPE_LOW_2500BASE_KX: 731 case ICE_PHY_TYPE_LOW_2500BASE_X: 732 case ICE_PHY_TYPE_LOW_5GBASE_KR: 733 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 734 case ICE_PHY_TYPE_LOW_25GBASE_KR: 735 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 736 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 737 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 738 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 739 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 740 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 741 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 742 return ICE_MEDIA_BACKPLANE; 743 } 744 } else { 745 switch (hw_link_info->phy_type_high) { 746 case ICE_PHY_TYPE_HIGH_100G_AUI2: 747 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 748 if (ice_is_media_cage_present(pi)) 749 return ICE_MEDIA_DA; 750 fallthrough; 751 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 752 return ICE_MEDIA_BACKPLANE; 753 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 754 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 755 return ICE_MEDIA_FIBER; 756 } 757 } 758 return ICE_MEDIA_UNKNOWN; 759 } 760 761 /** 762 * ice_get_link_status_datalen 763 * @hw: pointer to the HW struct 764 * 765 * Returns datalength for the Get Link Status AQ command, which is bigger for 766 * newer adapter families handled by ice driver. 767 */ 768 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 769 { 770 switch (hw->mac_type) { 771 case ICE_MAC_E830: 772 return ICE_AQC_LS_DATA_SIZE_V2; 773 case ICE_MAC_E810: 774 default: 775 return ICE_AQC_LS_DATA_SIZE_V1; 776 } 777 } 778 779 /** 780 * ice_aq_get_link_info 781 * @pi: port information structure 782 * @ena_lse: enable/disable LinkStatusEvent reporting 783 * @link: pointer to link status structure - optional 784 * @cd: pointer to command details structure or NULL 785 * 786 * Get Link Status (0x607). Returns the link status of the adapter. 787 */ 788 int 789 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 790 struct ice_link_status *link, struct ice_sq_cd *cd) 791 { 792 struct ice_aqc_get_link_status_data link_data = { 0 }; 793 struct ice_aqc_get_link_status *resp; 794 struct ice_link_status *li_old, *li; 795 enum ice_media_type *hw_media_type; 796 struct ice_fc_info *hw_fc_info; 797 bool tx_pause, rx_pause; 798 struct ice_aq_desc desc; 799 struct ice_hw *hw; 800 u16 cmd_flags; 801 int status; 802 803 if (!pi) 804 return -EINVAL; 805 hw = pi->hw; 806 li_old = &pi->phy.link_info_old; 807 hw_media_type = &pi->phy.media_type; 808 li = &pi->phy.link_info; 809 hw_fc_info = &pi->fc; 810 811 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 812 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 813 resp = &desc.params.get_link_status; 814 resp->cmd_flags = cpu_to_le16(cmd_flags); 815 resp->lport_num = pi->lport; 816 817 status = ice_aq_send_cmd(hw, &desc, &link_data, 818 ice_get_link_status_datalen(hw), cd); 819 if (status) 820 return status; 821 822 /* save off old link status information */ 823 *li_old = *li; 824 825 /* update current link status information */ 826 li->link_speed = le16_to_cpu(link_data.link_speed); 827 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 828 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 829 *hw_media_type = ice_get_media_type(pi); 830 li->link_info = link_data.link_info; 831 li->link_cfg_err = link_data.link_cfg_err; 832 li->an_info = link_data.an_info; 833 li->ext_info = link_data.ext_info; 834 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 835 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 836 li->topo_media_conflict = link_data.topo_media_conflict; 837 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 838 ICE_AQ_CFG_PACING_TYPE_M); 839 840 /* update fc info */ 841 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 842 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 843 if (tx_pause && rx_pause) 844 hw_fc_info->current_mode = ICE_FC_FULL; 845 else if (tx_pause) 846 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 847 else if (rx_pause) 848 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 849 else 850 hw_fc_info->current_mode = ICE_FC_NONE; 851 852 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 853 854 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 855 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 856 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 857 (unsigned long long)li->phy_type_low); 858 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 859 (unsigned long long)li->phy_type_high); 860 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 861 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 862 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 863 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 864 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 865 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 866 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 867 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 868 li->max_frame_size); 869 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 870 871 /* save link status information */ 872 if (link) 873 *link = *li; 874 875 /* flag cleared so calling functions don't call AQ again */ 876 pi->phy.get_link_info = false; 877 878 return 0; 879 } 880 881 /** 882 * ice_fill_tx_timer_and_fc_thresh 883 * @hw: pointer to the HW struct 884 * @cmd: pointer to MAC cfg structure 885 * 886 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 887 * descriptor 888 */ 889 static void 890 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 891 struct ice_aqc_set_mac_cfg *cmd) 892 { 893 u32 val, fc_thres_m; 894 895 /* We read back the transmit timer and FC threshold value of 896 * LFC. Thus, we will use index = 897 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 898 * 899 * Also, because we are operating on transmit timer and FC 900 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 901 */ 902 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 903 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 904 905 if (hw->mac_type == ICE_MAC_E830) { 906 /* Retrieve the transmit timer */ 907 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 908 cmd->tx_tmr_value = 909 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 910 911 /* Retrieve the fc threshold */ 912 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 913 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 914 } else { 915 /* Retrieve the transmit timer */ 916 val = rd32(hw, 917 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 918 cmd->tx_tmr_value = 919 le16_encode_bits(val, 920 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 921 922 /* Retrieve the fc threshold */ 923 val = rd32(hw, 924 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 925 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 926 } 927 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 928 } 929 930 /** 931 * ice_aq_set_mac_cfg 932 * @hw: pointer to the HW struct 933 * @max_frame_size: Maximum Frame Size to be supported 934 * @cd: pointer to command details structure or NULL 935 * 936 * Set MAC configuration (0x0603) 937 */ 938 int 939 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 940 { 941 struct ice_aqc_set_mac_cfg *cmd; 942 struct ice_aq_desc desc; 943 944 cmd = &desc.params.set_mac_cfg; 945 946 if (max_frame_size == 0) 947 return -EINVAL; 948 949 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 950 951 cmd->max_frame_size = cpu_to_le16(max_frame_size); 952 953 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 954 955 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 956 } 957 958 /** 959 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 960 * @hw: pointer to the HW struct 961 */ 962 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 963 { 964 struct ice_switch_info *sw; 965 int status; 966 967 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 968 sizeof(*hw->switch_info), GFP_KERNEL); 969 sw = hw->switch_info; 970 971 if (!sw) 972 return -ENOMEM; 973 974 INIT_LIST_HEAD(&sw->vsi_list_map_head); 975 sw->prof_res_bm_init = 0; 976 977 /* Initialize recipe count with default recipes read from NVM */ 978 sw->recp_cnt = ICE_SW_LKUP_LAST; 979 980 status = ice_init_def_sw_recp(hw); 981 if (status) { 982 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 983 return status; 984 } 985 return 0; 986 } 987 988 /** 989 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 990 * @hw: pointer to the HW struct 991 */ 992 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 993 { 994 struct ice_switch_info *sw = hw->switch_info; 995 struct ice_vsi_list_map_info *v_pos_map; 996 struct ice_vsi_list_map_info *v_tmp_map; 997 struct ice_sw_recipe *recps; 998 u8 i; 999 1000 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 1001 list_entry) { 1002 list_del(&v_pos_map->list_entry); 1003 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 1004 } 1005 recps = sw->recp_list; 1006 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 1007 recps[i].root_rid = i; 1008 1009 if (recps[i].adv_rule) { 1010 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 1011 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 1012 1013 mutex_destroy(&recps[i].filt_rule_lock); 1014 list_for_each_entry_safe(lst_itr, tmp_entry, 1015 &recps[i].filt_rules, 1016 list_entry) { 1017 list_del(&lst_itr->list_entry); 1018 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 1019 devm_kfree(ice_hw_to_dev(hw), lst_itr); 1020 } 1021 } else { 1022 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 1023 1024 mutex_destroy(&recps[i].filt_rule_lock); 1025 list_for_each_entry_safe(lst_itr, tmp_entry, 1026 &recps[i].filt_rules, 1027 list_entry) { 1028 list_del(&lst_itr->list_entry); 1029 devm_kfree(ice_hw_to_dev(hw), lst_itr); 1030 } 1031 } 1032 } 1033 ice_rm_all_sw_replay_rule_info(hw); 1034 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 1035 devm_kfree(ice_hw_to_dev(hw), sw); 1036 } 1037 1038 /** 1039 * ice_get_itr_intrl_gran 1040 * @hw: pointer to the HW struct 1041 * 1042 * Determines the ITR/INTRL granularities based on the maximum aggregate 1043 * bandwidth according to the device's configuration during power-on. 1044 */ 1045 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1046 { 1047 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 1048 rd32(hw, GL_PWR_MODE_CTL)); 1049 1050 switch (max_agg_bw) { 1051 case ICE_MAX_AGG_BW_200G: 1052 case ICE_MAX_AGG_BW_100G: 1053 case ICE_MAX_AGG_BW_50G: 1054 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1055 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1056 break; 1057 case ICE_MAX_AGG_BW_25G: 1058 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1059 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1060 break; 1061 } 1062 } 1063 1064 /** 1065 * ice_wait_for_fw - wait for full FW readiness 1066 * @hw: pointer to the hardware structure 1067 * @timeout: milliseconds that can elapse before timing out 1068 * 1069 * Return: 0 on success, -ETIMEDOUT on timeout. 1070 */ 1071 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) 1072 { 1073 int fw_loading; 1074 u32 elapsed = 0; 1075 1076 while (elapsed <= timeout) { 1077 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; 1078 1079 /* firmware was not yet loaded, we have to wait more */ 1080 if (fw_loading) { 1081 elapsed += 100; 1082 msleep(100); 1083 continue; 1084 } 1085 return 0; 1086 } 1087 1088 return -ETIMEDOUT; 1089 } 1090 1091 /** 1092 * ice_init_hw - main hardware initialization routine 1093 * @hw: pointer to the hardware structure 1094 */ 1095 int ice_init_hw(struct ice_hw *hw) 1096 { 1097 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 1098 void *mac_buf __free(kfree) = NULL; 1099 u16 mac_buf_len; 1100 int status; 1101 1102 /* Set MAC type based on DeviceID */ 1103 status = ice_set_mac_type(hw); 1104 if (status) 1105 return status; 1106 1107 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1108 1109 status = ice_reset(hw, ICE_RESET_PFR); 1110 if (status) 1111 return status; 1112 1113 ice_get_itr_intrl_gran(hw); 1114 1115 status = ice_create_all_ctrlq(hw); 1116 if (status) 1117 goto err_unroll_cqinit; 1118 1119 status = ice_fwlog_init(hw); 1120 if (status) 1121 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1122 status); 1123 1124 status = ice_clear_pf_cfg(hw); 1125 if (status) 1126 goto err_unroll_cqinit; 1127 1128 /* Set bit to enable Flow Director filters */ 1129 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1130 INIT_LIST_HEAD(&hw->fdir_list_head); 1131 1132 ice_clear_pxe_mode(hw); 1133 1134 status = ice_init_nvm(hw); 1135 if (status) 1136 goto err_unroll_cqinit; 1137 1138 status = ice_get_caps(hw); 1139 if (status) 1140 goto err_unroll_cqinit; 1141 1142 if (!hw->port_info) 1143 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1144 sizeof(*hw->port_info), 1145 GFP_KERNEL); 1146 if (!hw->port_info) { 1147 status = -ENOMEM; 1148 goto err_unroll_cqinit; 1149 } 1150 1151 hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; 1152 /* set the back pointer to HW */ 1153 hw->port_info->hw = hw; 1154 1155 /* Initialize port_info struct with switch configuration data */ 1156 status = ice_get_initial_sw_cfg(hw); 1157 if (status) 1158 goto err_unroll_alloc; 1159 1160 hw->evb_veb = true; 1161 1162 /* init xarray for identifying scheduling nodes uniquely */ 1163 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1164 1165 /* Query the allocated resources for Tx scheduler */ 1166 status = ice_sched_query_res_alloc(hw); 1167 if (status) { 1168 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1169 goto err_unroll_alloc; 1170 } 1171 ice_sched_get_psm_clk_freq(hw); 1172 1173 /* Initialize port_info struct with scheduler data */ 1174 status = ice_sched_init_port(hw->port_info); 1175 if (status) 1176 goto err_unroll_sched; 1177 1178 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1179 if (!pcaps) { 1180 status = -ENOMEM; 1181 goto err_unroll_sched; 1182 } 1183 1184 /* Initialize port_info struct with PHY capabilities */ 1185 status = ice_aq_get_phy_caps(hw->port_info, false, 1186 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1187 NULL); 1188 if (status) 1189 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1190 status); 1191 1192 /* Initialize port_info struct with link information */ 1193 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1194 if (status) 1195 goto err_unroll_sched; 1196 1197 /* need a valid SW entry point to build a Tx tree */ 1198 if (!hw->sw_entry_point_layer) { 1199 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1200 status = -EIO; 1201 goto err_unroll_sched; 1202 } 1203 INIT_LIST_HEAD(&hw->agg_list); 1204 /* Initialize max burst size */ 1205 if (!hw->max_burst_size) 1206 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1207 1208 status = ice_init_fltr_mgmt_struct(hw); 1209 if (status) 1210 goto err_unroll_sched; 1211 1212 /* Get MAC information */ 1213 /* A single port can report up to two (LAN and WoL) addresses */ 1214 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1215 GFP_KERNEL); 1216 if (!mac_buf) { 1217 status = -ENOMEM; 1218 goto err_unroll_fltr_mgmt_struct; 1219 } 1220 1221 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1222 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1223 1224 if (status) 1225 goto err_unroll_fltr_mgmt_struct; 1226 /* enable jumbo frame support at MAC level */ 1227 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1228 if (status) 1229 goto err_unroll_fltr_mgmt_struct; 1230 /* Obtain counter base index which would be used by flow director */ 1231 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1232 if (status) 1233 goto err_unroll_fltr_mgmt_struct; 1234 status = ice_init_hw_tbls(hw); 1235 if (status) 1236 goto err_unroll_fltr_mgmt_struct; 1237 mutex_init(&hw->tnl_lock); 1238 ice_init_chk_recipe_reuse_support(hw); 1239 1240 /* Some cards require longer initialization times 1241 * due to necessity of loading FW from an external source. 1242 * This can take even half a minute. 1243 */ 1244 if (ice_is_pf_c827(hw)) { 1245 status = ice_wait_for_fw(hw, 30000); 1246 if (status) { 1247 dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); 1248 goto err_unroll_fltr_mgmt_struct; 1249 } 1250 } 1251 1252 return 0; 1253 err_unroll_fltr_mgmt_struct: 1254 ice_cleanup_fltr_mgmt_struct(hw); 1255 err_unroll_sched: 1256 ice_sched_cleanup_all(hw); 1257 err_unroll_alloc: 1258 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1259 err_unroll_cqinit: 1260 ice_destroy_all_ctrlq(hw); 1261 return status; 1262 } 1263 1264 /** 1265 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1266 * @hw: pointer to the hardware structure 1267 * 1268 * This should be called only during nominal operation, not as a result of 1269 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1270 * applicable initializations if it fails for any reason. 1271 */ 1272 void ice_deinit_hw(struct ice_hw *hw) 1273 { 1274 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1275 ice_cleanup_fltr_mgmt_struct(hw); 1276 1277 ice_sched_cleanup_all(hw); 1278 ice_sched_clear_agg(hw); 1279 ice_free_seg(hw); 1280 ice_free_hw_tbls(hw); 1281 mutex_destroy(&hw->tnl_lock); 1282 1283 ice_fwlog_deinit(hw); 1284 ice_destroy_all_ctrlq(hw); 1285 1286 /* Clear VSI contexts if not already cleared */ 1287 ice_clear_all_vsi_ctx(hw); 1288 } 1289 1290 /** 1291 * ice_check_reset - Check to see if a global reset is complete 1292 * @hw: pointer to the hardware structure 1293 */ 1294 int ice_check_reset(struct ice_hw *hw) 1295 { 1296 u32 cnt, reg = 0, grst_timeout, uld_mask; 1297 1298 /* Poll for Device Active state in case a recent CORER, GLOBR, 1299 * or EMPR has occurred. The grst delay value is in 100ms units. 1300 * Add 1sec for outstanding AQ commands that can take a long time. 1301 */ 1302 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1303 rd32(hw, GLGEN_RSTCTL)) + 10; 1304 1305 for (cnt = 0; cnt < grst_timeout; cnt++) { 1306 mdelay(100); 1307 reg = rd32(hw, GLGEN_RSTAT); 1308 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1309 break; 1310 } 1311 1312 if (cnt == grst_timeout) { 1313 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1314 return -EIO; 1315 } 1316 1317 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1318 GLNVM_ULD_PCIER_DONE_1_M |\ 1319 GLNVM_ULD_CORER_DONE_M |\ 1320 GLNVM_ULD_GLOBR_DONE_M |\ 1321 GLNVM_ULD_POR_DONE_M |\ 1322 GLNVM_ULD_POR_DONE_1_M |\ 1323 GLNVM_ULD_PCIER_DONE_2_M) 1324 1325 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1326 GLNVM_ULD_PE_DONE_M : 0); 1327 1328 /* Device is Active; check Global Reset processes are done */ 1329 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1330 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1331 if (reg == uld_mask) { 1332 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1333 break; 1334 } 1335 mdelay(10); 1336 } 1337 1338 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1339 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1340 reg); 1341 return -EIO; 1342 } 1343 1344 return 0; 1345 } 1346 1347 /** 1348 * ice_pf_reset - Reset the PF 1349 * @hw: pointer to the hardware structure 1350 * 1351 * If a global reset has been triggered, this function checks 1352 * for its completion and then issues the PF reset 1353 */ 1354 static int ice_pf_reset(struct ice_hw *hw) 1355 { 1356 u32 cnt, reg; 1357 1358 /* If at function entry a global reset was already in progress, i.e. 1359 * state is not 'device active' or any of the reset done bits are not 1360 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1361 * global reset is done. 1362 */ 1363 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1364 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1365 /* poll on global reset currently in progress until done */ 1366 if (ice_check_reset(hw)) 1367 return -EIO; 1368 1369 return 0; 1370 } 1371 1372 /* Reset the PF */ 1373 reg = rd32(hw, PFGEN_CTRL); 1374 1375 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1376 1377 /* Wait for the PFR to complete. The wait time is the global config lock 1378 * timeout plus the PFR timeout which will account for a possible reset 1379 * that is occurring during a download package operation. 1380 */ 1381 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1382 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1383 reg = rd32(hw, PFGEN_CTRL); 1384 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1385 break; 1386 1387 mdelay(1); 1388 } 1389 1390 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1391 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1392 return -EIO; 1393 } 1394 1395 return 0; 1396 } 1397 1398 /** 1399 * ice_reset - Perform different types of reset 1400 * @hw: pointer to the hardware structure 1401 * @req: reset request 1402 * 1403 * This function triggers a reset as specified by the req parameter. 1404 * 1405 * Note: 1406 * If anything other than a PF reset is triggered, PXE mode is restored. 1407 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1408 * interface has been restored in the rebuild flow. 1409 */ 1410 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1411 { 1412 u32 val = 0; 1413 1414 switch (req) { 1415 case ICE_RESET_PFR: 1416 return ice_pf_reset(hw); 1417 case ICE_RESET_CORER: 1418 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1419 val = GLGEN_RTRIG_CORER_M; 1420 break; 1421 case ICE_RESET_GLOBR: 1422 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1423 val = GLGEN_RTRIG_GLOBR_M; 1424 break; 1425 default: 1426 return -EINVAL; 1427 } 1428 1429 val |= rd32(hw, GLGEN_RTRIG); 1430 wr32(hw, GLGEN_RTRIG, val); 1431 ice_flush(hw); 1432 1433 /* wait for the FW to be ready */ 1434 return ice_check_reset(hw); 1435 } 1436 1437 /** 1438 * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers 1439 * @hw: pointer to the hardware structure 1440 * @rxq_ctx: pointer to the packed Rx queue context 1441 * @rxq_index: the index of the Rx queue 1442 */ 1443 static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, 1444 const ice_rxq_ctx_buf_t *rxq_ctx, 1445 u32 rxq_index) 1446 { 1447 /* Copy each dword separately to HW */ 1448 for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1449 u32 ctx = ((const u32 *)rxq_ctx)[i]; 1450 1451 wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); 1452 1453 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); 1454 } 1455 } 1456 1457 #define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ 1458 PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) 1459 1460 /* LAN Rx Queue Context */ 1461 static const struct packed_field_u8 ice_rlan_ctx_fields[] = { 1462 /* Field Width LSB */ 1463 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1464 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1465 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1466 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1467 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1468 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1469 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1470 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1471 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1472 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1473 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1474 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1475 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1476 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1477 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1478 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1479 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1480 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1481 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1482 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1483 }; 1484 1485 /** 1486 * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer 1487 * @ctx: the Rx queue context to pack 1488 * @buf: the HW buffer to pack into 1489 * 1490 * Pack the Rx queue context from the CPU-friendly unpacked buffer into its 1491 * bit-packed HW layout. 1492 */ 1493 static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, 1494 ice_rxq_ctx_buf_t *buf) 1495 { 1496 pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, 1497 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1498 } 1499 1500 /** 1501 * ice_write_rxq_ctx - Write Rx Queue context to hardware 1502 * @hw: pointer to the hardware structure 1503 * @rlan_ctx: pointer to the unpacked Rx queue context 1504 * @rxq_index: the index of the Rx queue 1505 * 1506 * Pack the sparse Rx Queue context into dense hardware format and write it 1507 * into the HW register space. 1508 * 1509 * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. 1510 */ 1511 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1512 u32 rxq_index) 1513 { 1514 ice_rxq_ctx_buf_t buf = {}; 1515 1516 if (rxq_index > QRX_CTRL_MAX_INDEX) 1517 return -EINVAL; 1518 1519 ice_pack_rxq_ctx(rlan_ctx, &buf); 1520 ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); 1521 1522 return 0; 1523 } 1524 1525 /* LAN Tx Queue Context */ 1526 static const struct packed_field_u8 ice_tlan_ctx_fields[] = { 1527 /* Field Width LSB */ 1528 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1529 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1530 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1531 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1532 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1533 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1534 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1535 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1536 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1537 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1538 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1539 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1540 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1541 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1542 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1543 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1544 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1545 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1546 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1547 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1548 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1549 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1550 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1551 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1552 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1553 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1554 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1555 }; 1556 1557 /** 1558 * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer 1559 * @ctx: the Tx queue context to pack 1560 * @buf: the HW buffer to pack into 1561 * 1562 * Pack the Tx queue context from the CPU-friendly unpacked buffer into its 1563 * bit-packed HW layout. 1564 */ 1565 void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) 1566 { 1567 pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1568 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1569 } 1570 1571 /* Sideband Queue command wrappers */ 1572 1573 /** 1574 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1575 * @hw: pointer to the HW struct 1576 * @desc: descriptor describing the command 1577 * @buf: buffer to use for indirect commands (NULL for direct commands) 1578 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1579 * @cd: pointer to command details structure 1580 */ 1581 static int 1582 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1583 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1584 { 1585 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1586 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1587 } 1588 1589 /** 1590 * ice_sbq_rw_reg - Fill Sideband Queue command 1591 * @hw: pointer to the HW struct 1592 * @in: message info to be filled in descriptor 1593 * @flags: control queue descriptor flags 1594 */ 1595 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) 1596 { 1597 struct ice_sbq_cmd_desc desc = {0}; 1598 struct ice_sbq_msg_req msg = {0}; 1599 u16 msg_len; 1600 int status; 1601 1602 msg_len = sizeof(msg); 1603 1604 msg.dest_dev = in->dest_dev; 1605 msg.opcode = in->opcode; 1606 msg.flags = ICE_SBQ_MSG_FLAGS; 1607 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1608 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1609 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1610 1611 if (in->opcode) 1612 msg.data = cpu_to_le32(in->data); 1613 else 1614 /* data read comes back in completion, so shorten the struct by 1615 * sizeof(msg.data) 1616 */ 1617 msg_len -= sizeof(msg.data); 1618 1619 desc.flags = cpu_to_le16(flags); 1620 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1621 desc.param0.cmd_len = cpu_to_le16(msg_len); 1622 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1623 if (!status && !in->opcode) 1624 in->data = le32_to_cpu 1625 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1626 return status; 1627 } 1628 1629 /* FW Admin Queue command wrappers */ 1630 1631 /* Software lock/mutex that is meant to be held while the Global Config Lock 1632 * in firmware is acquired by the software to prevent most (but not all) types 1633 * of AQ commands from being sent to FW 1634 */ 1635 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1636 1637 /** 1638 * ice_should_retry_sq_send_cmd 1639 * @opcode: AQ opcode 1640 * 1641 * Decide if we should retry the send command routine for the ATQ, depending 1642 * on the opcode. 1643 */ 1644 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1645 { 1646 switch (opcode) { 1647 case ice_aqc_opc_get_link_topo: 1648 case ice_aqc_opc_lldp_stop: 1649 case ice_aqc_opc_lldp_start: 1650 case ice_aqc_opc_lldp_filter_ctrl: 1651 return true; 1652 } 1653 1654 return false; 1655 } 1656 1657 /** 1658 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1659 * @hw: pointer to the HW struct 1660 * @cq: pointer to the specific Control queue 1661 * @desc: prefilled descriptor describing the command 1662 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1663 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1664 * @cd: pointer to command details structure 1665 * 1666 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1667 * Queue if the EBUSY AQ error is returned. 1668 */ 1669 static int 1670 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1671 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1672 struct ice_sq_cd *cd) 1673 { 1674 struct ice_aq_desc desc_cpy; 1675 bool is_cmd_for_retry; 1676 u8 idx = 0; 1677 u16 opcode; 1678 int status; 1679 1680 opcode = le16_to_cpu(desc->opcode); 1681 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1682 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1683 1684 if (is_cmd_for_retry) { 1685 /* All retryable cmds are direct, without buf. */ 1686 WARN_ON(buf); 1687 1688 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1689 } 1690 1691 do { 1692 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1693 1694 if (!is_cmd_for_retry || !status || 1695 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1696 break; 1697 1698 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1699 1700 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1701 1702 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1703 1704 return status; 1705 } 1706 1707 /** 1708 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1709 * @hw: pointer to the HW struct 1710 * @desc: descriptor describing the command 1711 * @buf: buffer to use for indirect commands (NULL for direct commands) 1712 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1713 * @cd: pointer to command details structure 1714 * 1715 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1716 */ 1717 int 1718 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1719 u16 buf_size, struct ice_sq_cd *cd) 1720 { 1721 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1722 bool lock_acquired = false; 1723 int status; 1724 1725 /* When a package download is in process (i.e. when the firmware's 1726 * Global Configuration Lock resource is held), only the Download 1727 * Package, Get Version, Get Package Info List, Upload Section, 1728 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1729 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1730 * Recipes to Profile Association, and Release Resource (with resource 1731 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1732 * must block until the package download completes and the Global Config 1733 * Lock is released. See also ice_acquire_global_cfg_lock(). 1734 */ 1735 switch (le16_to_cpu(desc->opcode)) { 1736 case ice_aqc_opc_download_pkg: 1737 case ice_aqc_opc_get_pkg_info_list: 1738 case ice_aqc_opc_get_ver: 1739 case ice_aqc_opc_upload_section: 1740 case ice_aqc_opc_update_pkg: 1741 case ice_aqc_opc_set_port_params: 1742 case ice_aqc_opc_get_vlan_mode_parameters: 1743 case ice_aqc_opc_set_vlan_mode_parameters: 1744 case ice_aqc_opc_set_tx_topo: 1745 case ice_aqc_opc_get_tx_topo: 1746 case ice_aqc_opc_add_recipe: 1747 case ice_aqc_opc_recipe_to_profile: 1748 case ice_aqc_opc_get_recipe: 1749 case ice_aqc_opc_get_recipe_to_profile: 1750 break; 1751 case ice_aqc_opc_release_res: 1752 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1753 break; 1754 fallthrough; 1755 default: 1756 mutex_lock(&ice_global_cfg_lock_sw); 1757 lock_acquired = true; 1758 break; 1759 } 1760 1761 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1762 if (lock_acquired) 1763 mutex_unlock(&ice_global_cfg_lock_sw); 1764 1765 return status; 1766 } 1767 1768 /** 1769 * ice_aq_get_fw_ver 1770 * @hw: pointer to the HW struct 1771 * @cd: pointer to command details structure or NULL 1772 * 1773 * Get the firmware version (0x0001) from the admin queue commands 1774 */ 1775 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1776 { 1777 struct ice_aqc_get_ver *resp; 1778 struct ice_aq_desc desc; 1779 int status; 1780 1781 resp = &desc.params.get_ver; 1782 1783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1784 1785 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1786 1787 if (!status) { 1788 hw->fw_branch = resp->fw_branch; 1789 hw->fw_maj_ver = resp->fw_major; 1790 hw->fw_min_ver = resp->fw_minor; 1791 hw->fw_patch = resp->fw_patch; 1792 hw->fw_build = le32_to_cpu(resp->fw_build); 1793 hw->api_branch = resp->api_branch; 1794 hw->api_maj_ver = resp->api_major; 1795 hw->api_min_ver = resp->api_minor; 1796 hw->api_patch = resp->api_patch; 1797 } 1798 1799 return status; 1800 } 1801 1802 /** 1803 * ice_aq_send_driver_ver 1804 * @hw: pointer to the HW struct 1805 * @dv: driver's major, minor version 1806 * @cd: pointer to command details structure or NULL 1807 * 1808 * Send the driver version (0x0002) to the firmware 1809 */ 1810 int 1811 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1812 struct ice_sq_cd *cd) 1813 { 1814 struct ice_aqc_driver_ver *cmd; 1815 struct ice_aq_desc desc; 1816 u16 len; 1817 1818 cmd = &desc.params.driver_ver; 1819 1820 if (!dv) 1821 return -EINVAL; 1822 1823 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1824 1825 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1826 cmd->major_ver = dv->major_ver; 1827 cmd->minor_ver = dv->minor_ver; 1828 cmd->build_ver = dv->build_ver; 1829 cmd->subbuild_ver = dv->subbuild_ver; 1830 1831 len = 0; 1832 while (len < sizeof(dv->driver_string) && 1833 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1834 len++; 1835 1836 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1837 } 1838 1839 /** 1840 * ice_aq_q_shutdown 1841 * @hw: pointer to the HW struct 1842 * @unloading: is the driver unloading itself 1843 * 1844 * Tell the Firmware that we're shutting down the AdminQ and whether 1845 * or not the driver is unloading as well (0x0003). 1846 */ 1847 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1848 { 1849 struct ice_aqc_q_shutdown *cmd; 1850 struct ice_aq_desc desc; 1851 1852 cmd = &desc.params.q_shutdown; 1853 1854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1855 1856 if (unloading) 1857 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1858 1859 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1860 } 1861 1862 /** 1863 * ice_aq_req_res 1864 * @hw: pointer to the HW struct 1865 * @res: resource ID 1866 * @access: access type 1867 * @sdp_number: resource number 1868 * @timeout: the maximum time in ms that the driver may hold the resource 1869 * @cd: pointer to command details structure or NULL 1870 * 1871 * Requests common resource using the admin queue commands (0x0008). 1872 * When attempting to acquire the Global Config Lock, the driver can 1873 * learn of three states: 1874 * 1) 0 - acquired lock, and can perform download package 1875 * 2) -EIO - did not get lock, driver should fail to load 1876 * 3) -EALREADY - did not get lock, but another driver has 1877 * successfully downloaded the package; the driver does 1878 * not have to download the package and can continue 1879 * loading 1880 * 1881 * Note that if the caller is in an acquire lock, perform action, release lock 1882 * phase of operation, it is possible that the FW may detect a timeout and issue 1883 * a CORER. In this case, the driver will receive a CORER interrupt and will 1884 * have to determine its cause. The calling thread that is handling this flow 1885 * will likely get an error propagated back to it indicating the Download 1886 * Package, Update Package or the Release Resource AQ commands timed out. 1887 */ 1888 static int 1889 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1890 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1891 struct ice_sq_cd *cd) 1892 { 1893 struct ice_aqc_req_res *cmd_resp; 1894 struct ice_aq_desc desc; 1895 int status; 1896 1897 cmd_resp = &desc.params.res_owner; 1898 1899 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1900 1901 cmd_resp->res_id = cpu_to_le16(res); 1902 cmd_resp->access_type = cpu_to_le16(access); 1903 cmd_resp->res_number = cpu_to_le32(sdp_number); 1904 cmd_resp->timeout = cpu_to_le32(*timeout); 1905 *timeout = 0; 1906 1907 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1908 1909 /* The completion specifies the maximum time in ms that the driver 1910 * may hold the resource in the Timeout field. 1911 */ 1912 1913 /* Global config lock response utilizes an additional status field. 1914 * 1915 * If the Global config lock resource is held by some other driver, the 1916 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1917 * and the timeout field indicates the maximum time the current owner 1918 * of the resource has to free it. 1919 */ 1920 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1921 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1922 *timeout = le32_to_cpu(cmd_resp->timeout); 1923 return 0; 1924 } else if (le16_to_cpu(cmd_resp->status) == 1925 ICE_AQ_RES_GLBL_IN_PROG) { 1926 *timeout = le32_to_cpu(cmd_resp->timeout); 1927 return -EIO; 1928 } else if (le16_to_cpu(cmd_resp->status) == 1929 ICE_AQ_RES_GLBL_DONE) { 1930 return -EALREADY; 1931 } 1932 1933 /* invalid FW response, force a timeout immediately */ 1934 *timeout = 0; 1935 return -EIO; 1936 } 1937 1938 /* If the resource is held by some other driver, the command completes 1939 * with a busy return value and the timeout field indicates the maximum 1940 * time the current owner of the resource has to free it. 1941 */ 1942 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1943 *timeout = le32_to_cpu(cmd_resp->timeout); 1944 1945 return status; 1946 } 1947 1948 /** 1949 * ice_aq_release_res 1950 * @hw: pointer to the HW struct 1951 * @res: resource ID 1952 * @sdp_number: resource number 1953 * @cd: pointer to command details structure or NULL 1954 * 1955 * release common resource using the admin queue commands (0x0009) 1956 */ 1957 static int 1958 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1959 struct ice_sq_cd *cd) 1960 { 1961 struct ice_aqc_req_res *cmd; 1962 struct ice_aq_desc desc; 1963 1964 cmd = &desc.params.res_owner; 1965 1966 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1967 1968 cmd->res_id = cpu_to_le16(res); 1969 cmd->res_number = cpu_to_le32(sdp_number); 1970 1971 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1972 } 1973 1974 /** 1975 * ice_acquire_res 1976 * @hw: pointer to the HW structure 1977 * @res: resource ID 1978 * @access: access type (read or write) 1979 * @timeout: timeout in milliseconds 1980 * 1981 * This function will attempt to acquire the ownership of a resource. 1982 */ 1983 int 1984 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1985 enum ice_aq_res_access_type access, u32 timeout) 1986 { 1987 #define ICE_RES_POLLING_DELAY_MS 10 1988 u32 delay = ICE_RES_POLLING_DELAY_MS; 1989 u32 time_left = timeout; 1990 int status; 1991 1992 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1993 1994 /* A return code of -EALREADY means that another driver has 1995 * previously acquired the resource and performed any necessary updates; 1996 * in this case the caller does not obtain the resource and has no 1997 * further work to do. 1998 */ 1999 if (status == -EALREADY) 2000 goto ice_acquire_res_exit; 2001 2002 if (status) 2003 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2004 2005 /* If necessary, poll until the current lock owner timeouts */ 2006 timeout = time_left; 2007 while (status && timeout && time_left) { 2008 mdelay(delay); 2009 timeout = (timeout > delay) ? timeout - delay : 0; 2010 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2011 2012 if (status == -EALREADY) 2013 /* lock free, but no work to do */ 2014 break; 2015 2016 if (!status) 2017 /* lock acquired */ 2018 break; 2019 } 2020 if (status && status != -EALREADY) 2021 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2022 2023 ice_acquire_res_exit: 2024 if (status == -EALREADY) { 2025 if (access == ICE_RES_WRITE) 2026 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2027 else 2028 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 2029 } 2030 return status; 2031 } 2032 2033 /** 2034 * ice_release_res 2035 * @hw: pointer to the HW structure 2036 * @res: resource ID 2037 * 2038 * This function will release a resource using the proper Admin Command. 2039 */ 2040 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2041 { 2042 unsigned long timeout; 2043 int status; 2044 2045 /* there are some rare cases when trying to release the resource 2046 * results in an admin queue timeout, so handle them correctly 2047 */ 2048 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 2049 do { 2050 status = ice_aq_release_res(hw, res, 0, NULL); 2051 if (status != -EIO) 2052 break; 2053 usleep_range(1000, 2000); 2054 } while (time_before(jiffies, timeout)); 2055 } 2056 2057 /** 2058 * ice_aq_alloc_free_res - command to allocate/free resources 2059 * @hw: pointer to the HW struct 2060 * @buf: Indirect buffer to hold data parameters and response 2061 * @buf_size: size of buffer for indirect commands 2062 * @opc: pass in the command opcode 2063 * 2064 * Helper function to allocate/free resources using the admin queue commands 2065 */ 2066 int ice_aq_alloc_free_res(struct ice_hw *hw, 2067 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2068 enum ice_adminq_opc opc) 2069 { 2070 struct ice_aqc_alloc_free_res_cmd *cmd; 2071 struct ice_aq_desc desc; 2072 2073 cmd = &desc.params.sw_res_ctrl; 2074 2075 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2076 return -EINVAL; 2077 2078 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2079 2080 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2081 2082 cmd->num_entries = cpu_to_le16(1); 2083 2084 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2085 } 2086 2087 /** 2088 * ice_alloc_hw_res - allocate resource 2089 * @hw: pointer to the HW struct 2090 * @type: type of resource 2091 * @num: number of resources to allocate 2092 * @btm: allocate from bottom 2093 * @res: pointer to array that will receive the resources 2094 */ 2095 int 2096 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2097 { 2098 struct ice_aqc_alloc_free_res_elem *buf; 2099 u16 buf_len; 2100 int status; 2101 2102 buf_len = struct_size(buf, elem, num); 2103 buf = kzalloc(buf_len, GFP_KERNEL); 2104 if (!buf) 2105 return -ENOMEM; 2106 2107 /* Prepare buffer to allocate resource. */ 2108 buf->num_elems = cpu_to_le16(num); 2109 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2110 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2111 if (btm) 2112 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2113 2114 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2115 if (status) 2116 goto ice_alloc_res_exit; 2117 2118 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2119 2120 ice_alloc_res_exit: 2121 kfree(buf); 2122 return status; 2123 } 2124 2125 /** 2126 * ice_free_hw_res - free allocated HW resource 2127 * @hw: pointer to the HW struct 2128 * @type: type of resource to free 2129 * @num: number of resources 2130 * @res: pointer to array that contains the resources to free 2131 */ 2132 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2133 { 2134 struct ice_aqc_alloc_free_res_elem *buf; 2135 u16 buf_len; 2136 int status; 2137 2138 buf_len = struct_size(buf, elem, num); 2139 buf = kzalloc(buf_len, GFP_KERNEL); 2140 if (!buf) 2141 return -ENOMEM; 2142 2143 /* Prepare buffer to free resource. */ 2144 buf->num_elems = cpu_to_le16(num); 2145 buf->res_type = cpu_to_le16(type); 2146 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2147 2148 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2149 if (status) 2150 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2151 2152 kfree(buf); 2153 return status; 2154 } 2155 2156 /** 2157 * ice_get_num_per_func - determine number of resources per PF 2158 * @hw: pointer to the HW structure 2159 * @max: value to be evenly split between each PF 2160 * 2161 * Determine the number of valid functions by going through the bitmap returned 2162 * from parsing capabilities and use this to calculate the number of resources 2163 * per PF based on the max value passed in. 2164 */ 2165 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2166 { 2167 u8 funcs; 2168 2169 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2170 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2171 ICE_CAPS_VALID_FUNCS_M); 2172 2173 if (!funcs) 2174 return 0; 2175 2176 return max / funcs; 2177 } 2178 2179 /** 2180 * ice_parse_common_caps - parse common device/function capabilities 2181 * @hw: pointer to the HW struct 2182 * @caps: pointer to common capabilities structure 2183 * @elem: the capability element to parse 2184 * @prefix: message prefix for tracing capabilities 2185 * 2186 * Given a capability element, extract relevant details into the common 2187 * capability structure. 2188 * 2189 * Returns: true if the capability matches one of the common capability ids, 2190 * false otherwise. 2191 */ 2192 static bool 2193 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2194 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2195 { 2196 u32 logical_id = le32_to_cpu(elem->logical_id); 2197 u32 phys_id = le32_to_cpu(elem->phys_id); 2198 u32 number = le32_to_cpu(elem->number); 2199 u16 cap = le16_to_cpu(elem->cap); 2200 bool found = true; 2201 2202 switch (cap) { 2203 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2204 caps->valid_functions = number; 2205 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2206 caps->valid_functions); 2207 break; 2208 case ICE_AQC_CAPS_SRIOV: 2209 caps->sr_iov_1_1 = (number == 1); 2210 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2211 caps->sr_iov_1_1); 2212 break; 2213 case ICE_AQC_CAPS_DCB: 2214 caps->dcb = (number == 1); 2215 caps->active_tc_bitmap = logical_id; 2216 caps->maxtc = phys_id; 2217 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2218 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2219 caps->active_tc_bitmap); 2220 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2221 break; 2222 case ICE_AQC_CAPS_RSS: 2223 caps->rss_table_size = number; 2224 caps->rss_table_entry_width = logical_id; 2225 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2226 caps->rss_table_size); 2227 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2228 caps->rss_table_entry_width); 2229 break; 2230 case ICE_AQC_CAPS_RXQS: 2231 caps->num_rxq = number; 2232 caps->rxq_first_id = phys_id; 2233 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2234 caps->num_rxq); 2235 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2236 caps->rxq_first_id); 2237 break; 2238 case ICE_AQC_CAPS_TXQS: 2239 caps->num_txq = number; 2240 caps->txq_first_id = phys_id; 2241 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2242 caps->num_txq); 2243 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2244 caps->txq_first_id); 2245 break; 2246 case ICE_AQC_CAPS_MSIX: 2247 caps->num_msix_vectors = number; 2248 caps->msix_vector_first_id = phys_id; 2249 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2250 caps->num_msix_vectors); 2251 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2252 caps->msix_vector_first_id); 2253 break; 2254 case ICE_AQC_CAPS_PENDING_NVM_VER: 2255 caps->nvm_update_pending_nvm = true; 2256 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2257 break; 2258 case ICE_AQC_CAPS_PENDING_OROM_VER: 2259 caps->nvm_update_pending_orom = true; 2260 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2261 break; 2262 case ICE_AQC_CAPS_PENDING_NET_VER: 2263 caps->nvm_update_pending_netlist = true; 2264 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2265 break; 2266 case ICE_AQC_CAPS_NVM_MGMT: 2267 caps->nvm_unified_update = 2268 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2269 true : false; 2270 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2271 caps->nvm_unified_update); 2272 break; 2273 case ICE_AQC_CAPS_RDMA: 2274 caps->rdma = (number == 1); 2275 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2276 break; 2277 case ICE_AQC_CAPS_MAX_MTU: 2278 caps->max_mtu = number; 2279 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2280 prefix, caps->max_mtu); 2281 break; 2282 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2283 caps->pcie_reset_avoidance = (number > 0); 2284 ice_debug(hw, ICE_DBG_INIT, 2285 "%s: pcie_reset_avoidance = %d\n", prefix, 2286 caps->pcie_reset_avoidance); 2287 break; 2288 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2289 caps->reset_restrict_support = (number == 1); 2290 ice_debug(hw, ICE_DBG_INIT, 2291 "%s: reset_restrict_support = %d\n", prefix, 2292 caps->reset_restrict_support); 2293 break; 2294 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2295 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2296 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2297 prefix, caps->roce_lag); 2298 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2299 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2300 prefix, caps->sriov_lag); 2301 break; 2302 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2303 caps->tx_sched_topo_comp_mode_en = (number == 1); 2304 break; 2305 default: 2306 /* Not one of the recognized common capabilities */ 2307 found = false; 2308 } 2309 2310 return found; 2311 } 2312 2313 /** 2314 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2315 * @hw: pointer to the HW structure 2316 * @caps: pointer to capabilities structure to fix 2317 * 2318 * Re-calculate the capabilities that are dependent on the number of physical 2319 * ports; i.e. some features are not supported or function differently on 2320 * devices with more than 4 ports. 2321 */ 2322 static void 2323 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2324 { 2325 /* This assumes device capabilities are always scanned before function 2326 * capabilities during the initialization flow. 2327 */ 2328 if (hw->dev_caps.num_funcs > 4) { 2329 /* Max 4 TCs per port */ 2330 caps->maxtc = 4; 2331 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2332 caps->maxtc); 2333 if (caps->rdma) { 2334 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2335 caps->rdma = 0; 2336 } 2337 2338 /* print message only when processing device capabilities 2339 * during initialization. 2340 */ 2341 if (caps == &hw->dev_caps.common_cap) 2342 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2343 } 2344 } 2345 2346 /** 2347 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2348 * @hw: pointer to the HW struct 2349 * @func_p: pointer to function capabilities structure 2350 * @cap: pointer to the capability element to parse 2351 * 2352 * Extract function capabilities for ICE_AQC_CAPS_VF. 2353 */ 2354 static void 2355 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2356 struct ice_aqc_list_caps_elem *cap) 2357 { 2358 u32 logical_id = le32_to_cpu(cap->logical_id); 2359 u32 number = le32_to_cpu(cap->number); 2360 2361 func_p->num_allocd_vfs = number; 2362 func_p->vf_base_id = logical_id; 2363 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2364 func_p->num_allocd_vfs); 2365 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2366 func_p->vf_base_id); 2367 } 2368 2369 /** 2370 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2371 * @hw: pointer to the HW struct 2372 * @func_p: pointer to function capabilities structure 2373 * @cap: pointer to the capability element to parse 2374 * 2375 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2376 */ 2377 static void 2378 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2379 struct ice_aqc_list_caps_elem *cap) 2380 { 2381 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2382 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2383 le32_to_cpu(cap->number)); 2384 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2385 func_p->guar_num_vsi); 2386 } 2387 2388 /** 2389 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2390 * @hw: pointer to the HW struct 2391 * @func_p: pointer to function capabilities structure 2392 * @cap: pointer to the capability element to parse 2393 * 2394 * Extract function capabilities for ICE_AQC_CAPS_1588. 2395 */ 2396 static void 2397 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2398 struct ice_aqc_list_caps_elem *cap) 2399 { 2400 struct ice_ts_func_info *info = &func_p->ts_func_info; 2401 u32 number = le32_to_cpu(cap->number); 2402 2403 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2404 func_p->common_cap.ieee_1588 = info->ena; 2405 2406 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2407 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2408 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2409 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2410 2411 if (!ice_is_e825c(hw)) { 2412 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2413 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2414 } else { 2415 info->clk_freq = ICE_TIME_REF_FREQ_156_250; 2416 info->clk_src = ICE_CLK_SRC_TCXO; 2417 } 2418 2419 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2420 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2421 } else { 2422 /* Unknown clock frequency, so assume a (probably incorrect) 2423 * default to avoid out-of-bounds look ups of frequency 2424 * related information. 2425 */ 2426 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2427 info->clk_freq); 2428 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2429 } 2430 2431 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2432 func_p->common_cap.ieee_1588); 2433 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2434 info->src_tmr_owned); 2435 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2436 info->tmr_ena); 2437 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2438 info->tmr_index_owned); 2439 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2440 info->tmr_index_assoc); 2441 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2442 info->clk_freq); 2443 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2444 info->clk_src); 2445 } 2446 2447 /** 2448 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2449 * @hw: pointer to the HW struct 2450 * @func_p: pointer to function capabilities structure 2451 * 2452 * Extract function capabilities for ICE_AQC_CAPS_FD. 2453 */ 2454 static void 2455 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2456 { 2457 u32 reg_val, gsize, bsize; 2458 2459 reg_val = rd32(hw, GLQF_FD_SIZE); 2460 switch (hw->mac_type) { 2461 case ICE_MAC_E830: 2462 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2463 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2464 break; 2465 case ICE_MAC_E810: 2466 default: 2467 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2468 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2469 } 2470 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2471 func_p->fd_fltr_best_effort = bsize; 2472 2473 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2474 func_p->fd_fltr_guar); 2475 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2476 func_p->fd_fltr_best_effort); 2477 } 2478 2479 /** 2480 * ice_parse_func_caps - Parse function capabilities 2481 * @hw: pointer to the HW struct 2482 * @func_p: pointer to function capabilities structure 2483 * @buf: buffer containing the function capability records 2484 * @cap_count: the number of capabilities 2485 * 2486 * Helper function to parse function (0x000A) capabilities list. For 2487 * capabilities shared between device and function, this relies on 2488 * ice_parse_common_caps. 2489 * 2490 * Loop through the list of provided capabilities and extract the relevant 2491 * data into the function capabilities structured. 2492 */ 2493 static void 2494 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2495 void *buf, u32 cap_count) 2496 { 2497 struct ice_aqc_list_caps_elem *cap_resp; 2498 u32 i; 2499 2500 cap_resp = buf; 2501 2502 memset(func_p, 0, sizeof(*func_p)); 2503 2504 for (i = 0; i < cap_count; i++) { 2505 u16 cap = le16_to_cpu(cap_resp[i].cap); 2506 bool found; 2507 2508 found = ice_parse_common_caps(hw, &func_p->common_cap, 2509 &cap_resp[i], "func caps"); 2510 2511 switch (cap) { 2512 case ICE_AQC_CAPS_VF: 2513 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2514 break; 2515 case ICE_AQC_CAPS_VSI: 2516 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2517 break; 2518 case ICE_AQC_CAPS_1588: 2519 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2520 break; 2521 case ICE_AQC_CAPS_FD: 2522 ice_parse_fdir_func_caps(hw, func_p); 2523 break; 2524 default: 2525 /* Don't list common capabilities as unknown */ 2526 if (!found) 2527 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2528 i, cap); 2529 break; 2530 } 2531 } 2532 2533 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2534 } 2535 2536 /** 2537 * ice_func_id_to_logical_id - map from function id to logical pf id 2538 * @active_function_bitmap: active function bitmap 2539 * @pf_id: function number of device 2540 * 2541 * Return: logical PF ID. 2542 */ 2543 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) 2544 { 2545 u8 logical_id = 0; 2546 u8 i; 2547 2548 for (i = 0; i < pf_id; i++) 2549 if (active_function_bitmap & BIT(i)) 2550 logical_id++; 2551 2552 return logical_id; 2553 } 2554 2555 /** 2556 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2557 * @hw: pointer to the HW struct 2558 * @dev_p: pointer to device capabilities structure 2559 * @cap: capability element to parse 2560 * 2561 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2562 */ 2563 static void 2564 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2565 struct ice_aqc_list_caps_elem *cap) 2566 { 2567 u32 number = le32_to_cpu(cap->number); 2568 2569 dev_p->num_funcs = hweight32(number); 2570 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2571 dev_p->num_funcs); 2572 2573 hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); 2574 } 2575 2576 /** 2577 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2578 * @hw: pointer to the HW struct 2579 * @dev_p: pointer to device capabilities structure 2580 * @cap: capability element to parse 2581 * 2582 * Parse ICE_AQC_CAPS_VF for device capabilities. 2583 */ 2584 static void 2585 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2586 struct ice_aqc_list_caps_elem *cap) 2587 { 2588 u32 number = le32_to_cpu(cap->number); 2589 2590 dev_p->num_vfs_exposed = number; 2591 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2592 dev_p->num_vfs_exposed); 2593 } 2594 2595 /** 2596 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2597 * @hw: pointer to the HW struct 2598 * @dev_p: pointer to device capabilities structure 2599 * @cap: capability element to parse 2600 * 2601 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2602 */ 2603 static void 2604 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2605 struct ice_aqc_list_caps_elem *cap) 2606 { 2607 u32 number = le32_to_cpu(cap->number); 2608 2609 dev_p->num_vsi_allocd_to_host = number; 2610 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2611 dev_p->num_vsi_allocd_to_host); 2612 } 2613 2614 /** 2615 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2616 * @hw: pointer to the HW struct 2617 * @dev_p: pointer to device capabilities structure 2618 * @cap: capability element to parse 2619 * 2620 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2621 */ 2622 static void 2623 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2624 struct ice_aqc_list_caps_elem *cap) 2625 { 2626 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2627 u32 logical_id = le32_to_cpu(cap->logical_id); 2628 u32 phys_id = le32_to_cpu(cap->phys_id); 2629 u32 number = le32_to_cpu(cap->number); 2630 2631 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2632 dev_p->common_cap.ieee_1588 = info->ena; 2633 2634 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2635 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2636 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2637 2638 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2639 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2640 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2641 2642 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2643 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2644 info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0); 2645 2646 info->ena_ports = logical_id; 2647 info->tmr_own_map = phys_id; 2648 2649 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2650 dev_p->common_cap.ieee_1588); 2651 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2652 info->tmr0_owner); 2653 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2654 info->tmr0_owned); 2655 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2656 info->tmr0_ena); 2657 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2658 info->tmr1_owner); 2659 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2660 info->tmr1_owned); 2661 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2662 info->tmr1_ena); 2663 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2664 info->ts_ll_read); 2665 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2666 info->ts_ll_int_read); 2667 ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n", 2668 info->ll_phy_tmr_update); 2669 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2670 info->ena_ports); 2671 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2672 info->tmr_own_map); 2673 } 2674 2675 /** 2676 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2677 * @hw: pointer to the HW struct 2678 * @dev_p: pointer to device capabilities structure 2679 * @cap: capability element to parse 2680 * 2681 * Parse ICE_AQC_CAPS_FD for device capabilities. 2682 */ 2683 static void 2684 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2685 struct ice_aqc_list_caps_elem *cap) 2686 { 2687 u32 number = le32_to_cpu(cap->number); 2688 2689 dev_p->num_flow_director_fltr = number; 2690 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2691 dev_p->num_flow_director_fltr); 2692 } 2693 2694 /** 2695 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2696 * @hw: pointer to the HW struct 2697 * @dev_p: pointer to device capabilities structure 2698 * @cap: capability element to parse 2699 * 2700 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2701 * enabled sensors. 2702 */ 2703 static void 2704 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2705 struct ice_aqc_list_caps_elem *cap) 2706 { 2707 dev_p->supported_sensors = le32_to_cpu(cap->number); 2708 2709 ice_debug(hw, ICE_DBG_INIT, 2710 "dev caps: supported sensors (bitmap) = 0x%x\n", 2711 dev_p->supported_sensors); 2712 } 2713 2714 /** 2715 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2716 * @hw: pointer to the HW struct 2717 * @dev_p: pointer to device capabilities structure 2718 * @cap: capability element to parse 2719 * 2720 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2721 */ 2722 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, 2723 struct ice_hw_dev_caps *dev_p, 2724 struct ice_aqc_list_caps_elem *cap) 2725 { 2726 dev_p->nac_topo.mode = le32_to_cpu(cap->number); 2727 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2728 2729 dev_info(ice_hw_to_dev(hw), 2730 "PF is configured in %s mode with IP instance ID %d\n", 2731 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? 2732 "primary" : "secondary", dev_p->nac_topo.id); 2733 2734 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2735 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2736 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2737 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2738 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2739 dev_p->nac_topo.id); 2740 } 2741 2742 /** 2743 * ice_parse_dev_caps - Parse device capabilities 2744 * @hw: pointer to the HW struct 2745 * @dev_p: pointer to device capabilities structure 2746 * @buf: buffer containing the device capability records 2747 * @cap_count: the number of capabilities 2748 * 2749 * Helper device to parse device (0x000B) capabilities list. For 2750 * capabilities shared between device and function, this relies on 2751 * ice_parse_common_caps. 2752 * 2753 * Loop through the list of provided capabilities and extract the relevant 2754 * data into the device capabilities structured. 2755 */ 2756 static void 2757 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2758 void *buf, u32 cap_count) 2759 { 2760 struct ice_aqc_list_caps_elem *cap_resp; 2761 u32 i; 2762 2763 cap_resp = buf; 2764 2765 memset(dev_p, 0, sizeof(*dev_p)); 2766 2767 for (i = 0; i < cap_count; i++) { 2768 u16 cap = le16_to_cpu(cap_resp[i].cap); 2769 bool found; 2770 2771 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2772 &cap_resp[i], "dev caps"); 2773 2774 switch (cap) { 2775 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2776 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2777 break; 2778 case ICE_AQC_CAPS_VF: 2779 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2780 break; 2781 case ICE_AQC_CAPS_VSI: 2782 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2783 break; 2784 case ICE_AQC_CAPS_1588: 2785 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2786 break; 2787 case ICE_AQC_CAPS_FD: 2788 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2789 break; 2790 case ICE_AQC_CAPS_SENSOR_READING: 2791 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2792 break; 2793 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2794 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2795 break; 2796 default: 2797 /* Don't list common capabilities as unknown */ 2798 if (!found) 2799 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2800 i, cap); 2801 break; 2802 } 2803 } 2804 2805 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2806 } 2807 2808 /** 2809 * ice_is_phy_rclk_in_netlist 2810 * @hw: pointer to the hw struct 2811 * 2812 * Check if the PHY Recovered Clock device is present in the netlist 2813 */ 2814 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2815 { 2816 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2817 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2818 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2819 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, 2820 ICE_AQC_LINK_TOPO_NODE_CTX_PORT, 2821 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2822 return false; 2823 2824 return true; 2825 } 2826 2827 /** 2828 * ice_is_clock_mux_in_netlist 2829 * @hw: pointer to the hw struct 2830 * 2831 * Check if the Clock Multiplexer device is present in the netlist 2832 */ 2833 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2834 { 2835 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2836 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2837 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2838 NULL)) 2839 return false; 2840 2841 return true; 2842 } 2843 2844 /** 2845 * ice_is_cgu_in_netlist - check for CGU presence 2846 * @hw: pointer to the hw struct 2847 * 2848 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2849 * Save the CGU part number in the hw structure for later use. 2850 * Return: 2851 * * true - cgu is present 2852 * * false - cgu is not present 2853 */ 2854 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2855 { 2856 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2857 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2858 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2859 NULL)) { 2860 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2861 return true; 2862 } else if (!ice_find_netlist_node(hw, 2863 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2864 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2865 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2866 NULL)) { 2867 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2868 return true; 2869 } 2870 2871 return false; 2872 } 2873 2874 /** 2875 * ice_is_gps_in_netlist 2876 * @hw: pointer to the hw struct 2877 * 2878 * Check if the GPS generic device is present in the netlist 2879 */ 2880 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2881 { 2882 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2883 ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, 2884 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2885 return false; 2886 2887 return true; 2888 } 2889 2890 /** 2891 * ice_aq_list_caps - query function/device capabilities 2892 * @hw: pointer to the HW struct 2893 * @buf: a buffer to hold the capabilities 2894 * @buf_size: size of the buffer 2895 * @cap_count: if not NULL, set to the number of capabilities reported 2896 * @opc: capabilities type to discover, device or function 2897 * @cd: pointer to command details structure or NULL 2898 * 2899 * Get the function (0x000A) or device (0x000B) capabilities description from 2900 * firmware and store it in the buffer. 2901 * 2902 * If the cap_count pointer is not NULL, then it is set to the number of 2903 * capabilities firmware will report. Note that if the buffer size is too 2904 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2905 * cap_count will still be updated in this case. It is recommended that the 2906 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2907 * firmware could return) to avoid this. 2908 */ 2909 int 2910 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2911 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2912 { 2913 struct ice_aqc_list_caps *cmd; 2914 struct ice_aq_desc desc; 2915 int status; 2916 2917 cmd = &desc.params.get_cap; 2918 2919 if (opc != ice_aqc_opc_list_func_caps && 2920 opc != ice_aqc_opc_list_dev_caps) 2921 return -EINVAL; 2922 2923 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2924 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2925 2926 if (cap_count) 2927 *cap_count = le32_to_cpu(cmd->count); 2928 2929 return status; 2930 } 2931 2932 /** 2933 * ice_discover_dev_caps - Read and extract device capabilities 2934 * @hw: pointer to the hardware structure 2935 * @dev_caps: pointer to device capabilities structure 2936 * 2937 * Read the device capabilities and extract them into the dev_caps structure 2938 * for later use. 2939 */ 2940 int 2941 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2942 { 2943 u32 cap_count = 0; 2944 void *cbuf; 2945 int status; 2946 2947 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2948 if (!cbuf) 2949 return -ENOMEM; 2950 2951 /* Although the driver doesn't know the number of capabilities the 2952 * device will return, we can simply send a 4KB buffer, the maximum 2953 * possible size that firmware can return. 2954 */ 2955 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2956 2957 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2958 ice_aqc_opc_list_dev_caps, NULL); 2959 if (!status) 2960 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2961 kfree(cbuf); 2962 2963 return status; 2964 } 2965 2966 /** 2967 * ice_discover_func_caps - Read and extract function capabilities 2968 * @hw: pointer to the hardware structure 2969 * @func_caps: pointer to function capabilities structure 2970 * 2971 * Read the function capabilities and extract them into the func_caps structure 2972 * for later use. 2973 */ 2974 static int 2975 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2976 { 2977 u32 cap_count = 0; 2978 void *cbuf; 2979 int status; 2980 2981 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2982 if (!cbuf) 2983 return -ENOMEM; 2984 2985 /* Although the driver doesn't know the number of capabilities the 2986 * device will return, we can simply send a 4KB buffer, the maximum 2987 * possible size that firmware can return. 2988 */ 2989 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2990 2991 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2992 ice_aqc_opc_list_func_caps, NULL); 2993 if (!status) 2994 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2995 kfree(cbuf); 2996 2997 return status; 2998 } 2999 3000 /** 3001 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 3002 * @hw: pointer to the hardware structure 3003 */ 3004 void ice_set_safe_mode_caps(struct ice_hw *hw) 3005 { 3006 struct ice_hw_func_caps *func_caps = &hw->func_caps; 3007 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 3008 struct ice_hw_common_caps cached_caps; 3009 u32 num_funcs; 3010 3011 /* cache some func_caps values that should be restored after memset */ 3012 cached_caps = func_caps->common_cap; 3013 3014 /* unset func capabilities */ 3015 memset(func_caps, 0, sizeof(*func_caps)); 3016 3017 #define ICE_RESTORE_FUNC_CAP(name) \ 3018 func_caps->common_cap.name = cached_caps.name 3019 3020 /* restore cached values */ 3021 ICE_RESTORE_FUNC_CAP(valid_functions); 3022 ICE_RESTORE_FUNC_CAP(txq_first_id); 3023 ICE_RESTORE_FUNC_CAP(rxq_first_id); 3024 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 3025 ICE_RESTORE_FUNC_CAP(max_mtu); 3026 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 3027 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 3028 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 3029 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 3030 3031 /* one Tx and one Rx queue in safe mode */ 3032 func_caps->common_cap.num_rxq = 1; 3033 func_caps->common_cap.num_txq = 1; 3034 3035 /* two MSIX vectors, one for traffic and one for misc causes */ 3036 func_caps->common_cap.num_msix_vectors = 2; 3037 func_caps->guar_num_vsi = 1; 3038 3039 /* cache some dev_caps values that should be restored after memset */ 3040 cached_caps = dev_caps->common_cap; 3041 num_funcs = dev_caps->num_funcs; 3042 3043 /* unset dev capabilities */ 3044 memset(dev_caps, 0, sizeof(*dev_caps)); 3045 3046 #define ICE_RESTORE_DEV_CAP(name) \ 3047 dev_caps->common_cap.name = cached_caps.name 3048 3049 /* restore cached values */ 3050 ICE_RESTORE_DEV_CAP(valid_functions); 3051 ICE_RESTORE_DEV_CAP(txq_first_id); 3052 ICE_RESTORE_DEV_CAP(rxq_first_id); 3053 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3054 ICE_RESTORE_DEV_CAP(max_mtu); 3055 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3056 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 3057 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 3058 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 3059 dev_caps->num_funcs = num_funcs; 3060 3061 /* one Tx and one Rx queue per function in safe mode */ 3062 dev_caps->common_cap.num_rxq = num_funcs; 3063 dev_caps->common_cap.num_txq = num_funcs; 3064 3065 /* two MSIX vectors per function */ 3066 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3067 } 3068 3069 /** 3070 * ice_get_caps - get info about the HW 3071 * @hw: pointer to the hardware structure 3072 */ 3073 int ice_get_caps(struct ice_hw *hw) 3074 { 3075 int status; 3076 3077 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3078 if (status) 3079 return status; 3080 3081 return ice_discover_func_caps(hw, &hw->func_caps); 3082 } 3083 3084 /** 3085 * ice_aq_manage_mac_write - manage MAC address write command 3086 * @hw: pointer to the HW struct 3087 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3088 * @flags: flags to control write behavior 3089 * @cd: pointer to command details structure or NULL 3090 * 3091 * This function is used to write MAC address to the NVM (0x0108). 3092 */ 3093 int 3094 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3095 struct ice_sq_cd *cd) 3096 { 3097 struct ice_aqc_manage_mac_write *cmd; 3098 struct ice_aq_desc desc; 3099 3100 cmd = &desc.params.mac_write; 3101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3102 3103 cmd->flags = flags; 3104 ether_addr_copy(cmd->mac_addr, mac_addr); 3105 3106 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3107 } 3108 3109 /** 3110 * ice_aq_clear_pxe_mode 3111 * @hw: pointer to the HW struct 3112 * 3113 * Tell the firmware that the driver is taking over from PXE (0x0110). 3114 */ 3115 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3116 { 3117 struct ice_aq_desc desc; 3118 3119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3120 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3121 3122 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3123 } 3124 3125 /** 3126 * ice_clear_pxe_mode - clear pxe operations mode 3127 * @hw: pointer to the HW struct 3128 * 3129 * Make sure all PXE mode settings are cleared, including things 3130 * like descriptor fetch/write-back mode. 3131 */ 3132 void ice_clear_pxe_mode(struct ice_hw *hw) 3133 { 3134 if (ice_check_sq_alive(hw, &hw->adminq)) 3135 ice_aq_clear_pxe_mode(hw); 3136 } 3137 3138 /** 3139 * ice_aq_set_port_params - set physical port parameters. 3140 * @pi: pointer to the port info struct 3141 * @double_vlan: if set double VLAN is enabled 3142 * @cd: pointer to command details structure or NULL 3143 * 3144 * Set Physical port parameters (0x0203) 3145 */ 3146 int 3147 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3148 struct ice_sq_cd *cd) 3149 3150 { 3151 struct ice_aqc_set_port_params *cmd; 3152 struct ice_hw *hw = pi->hw; 3153 struct ice_aq_desc desc; 3154 u16 cmd_flags = 0; 3155 3156 cmd = &desc.params.set_port_params; 3157 3158 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3159 if (double_vlan) 3160 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3161 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3162 3163 cmd->local_fwd_mode = pi->local_fwd_mode | 3164 ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; 3165 3166 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3167 } 3168 3169 /** 3170 * ice_is_100m_speed_supported 3171 * @hw: pointer to the HW struct 3172 * 3173 * returns true if 100M speeds are supported by the device, 3174 * false otherwise. 3175 */ 3176 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3177 { 3178 switch (hw->device_id) { 3179 case ICE_DEV_ID_E822C_SGMII: 3180 case ICE_DEV_ID_E822L_SGMII: 3181 case ICE_DEV_ID_E823L_1GBE: 3182 case ICE_DEV_ID_E823C_SGMII: 3183 return true; 3184 default: 3185 return false; 3186 } 3187 } 3188 3189 /** 3190 * ice_get_link_speed_based_on_phy_type - returns link speed 3191 * @phy_type_low: lower part of phy_type 3192 * @phy_type_high: higher part of phy_type 3193 * 3194 * This helper function will convert an entry in PHY type structure 3195 * [phy_type_low, phy_type_high] to its corresponding link speed. 3196 * Note: In the structure of [phy_type_low, phy_type_high], there should 3197 * be one bit set, as this function will convert one PHY type to its 3198 * speed. 3199 * 3200 * Return: 3201 * * PHY speed for recognized PHY type 3202 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3203 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3204 */ 3205 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3206 { 3207 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3208 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3209 3210 switch (phy_type_low) { 3211 case ICE_PHY_TYPE_LOW_100BASE_TX: 3212 case ICE_PHY_TYPE_LOW_100M_SGMII: 3213 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3214 break; 3215 case ICE_PHY_TYPE_LOW_1000BASE_T: 3216 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3217 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3218 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3219 case ICE_PHY_TYPE_LOW_1G_SGMII: 3220 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3221 break; 3222 case ICE_PHY_TYPE_LOW_2500BASE_T: 3223 case ICE_PHY_TYPE_LOW_2500BASE_X: 3224 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3225 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3226 break; 3227 case ICE_PHY_TYPE_LOW_5GBASE_T: 3228 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3229 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3230 break; 3231 case ICE_PHY_TYPE_LOW_10GBASE_T: 3232 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3233 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3234 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3235 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3236 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3237 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3238 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3239 break; 3240 case ICE_PHY_TYPE_LOW_25GBASE_T: 3241 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3242 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3243 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3244 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3245 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3246 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3247 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3248 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3249 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3250 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3251 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3252 break; 3253 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3254 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3255 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3256 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3257 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3258 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3259 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3260 break; 3261 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3262 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3263 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3264 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3265 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3266 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3267 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3268 case ICE_PHY_TYPE_LOW_50G_AUI2: 3269 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3270 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3271 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3272 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3273 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3274 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3275 case ICE_PHY_TYPE_LOW_50G_AUI1: 3276 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3277 break; 3278 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3279 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3280 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3281 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3282 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3283 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3284 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3285 case ICE_PHY_TYPE_LOW_100G_AUI4: 3286 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3287 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3288 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3289 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3290 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3291 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3292 break; 3293 default: 3294 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3295 break; 3296 } 3297 3298 switch (phy_type_high) { 3299 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3300 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3301 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3302 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3303 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3304 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3305 break; 3306 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3307 case ICE_PHY_TYPE_HIGH_200G_SR4: 3308 case ICE_PHY_TYPE_HIGH_200G_FR4: 3309 case ICE_PHY_TYPE_HIGH_200G_LR4: 3310 case ICE_PHY_TYPE_HIGH_200G_DR4: 3311 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3312 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3313 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3314 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3315 break; 3316 default: 3317 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3318 break; 3319 } 3320 3321 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3322 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3323 return ICE_AQ_LINK_SPEED_UNKNOWN; 3324 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3325 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3326 return ICE_AQ_LINK_SPEED_UNKNOWN; 3327 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3328 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3329 return speed_phy_type_low; 3330 else 3331 return speed_phy_type_high; 3332 } 3333 3334 /** 3335 * ice_update_phy_type 3336 * @phy_type_low: pointer to the lower part of phy_type 3337 * @phy_type_high: pointer to the higher part of phy_type 3338 * @link_speeds_bitmap: targeted link speeds bitmap 3339 * 3340 * Note: For the link_speeds_bitmap structure, you can check it at 3341 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3342 * link_speeds_bitmap include multiple speeds. 3343 * 3344 * Each entry in this [phy_type_low, phy_type_high] structure will 3345 * present a certain link speed. This helper function will turn on bits 3346 * in [phy_type_low, phy_type_high] structure based on the value of 3347 * link_speeds_bitmap input parameter. 3348 */ 3349 void 3350 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3351 u16 link_speeds_bitmap) 3352 { 3353 u64 pt_high; 3354 u64 pt_low; 3355 int index; 3356 u16 speed; 3357 3358 /* We first check with low part of phy_type */ 3359 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3360 pt_low = BIT_ULL(index); 3361 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3362 3363 if (link_speeds_bitmap & speed) 3364 *phy_type_low |= BIT_ULL(index); 3365 } 3366 3367 /* We then check with high part of phy_type */ 3368 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3369 pt_high = BIT_ULL(index); 3370 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3371 3372 if (link_speeds_bitmap & speed) 3373 *phy_type_high |= BIT_ULL(index); 3374 } 3375 } 3376 3377 /** 3378 * ice_aq_set_phy_cfg 3379 * @hw: pointer to the HW struct 3380 * @pi: port info structure of the interested logical port 3381 * @cfg: structure with PHY configuration data to be set 3382 * @cd: pointer to command details structure or NULL 3383 * 3384 * Set the various PHY configuration parameters supported on the Port. 3385 * One or more of the Set PHY config parameters may be ignored in an MFP 3386 * mode as the PF may not have the privilege to set some of the PHY Config 3387 * parameters. This status will be indicated by the command response (0x0601). 3388 */ 3389 int 3390 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3391 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3392 { 3393 struct ice_aq_desc desc; 3394 int status; 3395 3396 if (!cfg) 3397 return -EINVAL; 3398 3399 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3400 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3401 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3402 cfg->caps); 3403 3404 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3405 } 3406 3407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3408 desc.params.set_phy.lport_num = pi->lport; 3409 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3410 3411 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3412 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3413 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3414 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3415 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3416 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3417 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3418 cfg->low_power_ctrl_an); 3419 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3420 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3421 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3422 cfg->link_fec_opt); 3423 3424 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3425 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3426 status = 0; 3427 3428 if (!status) 3429 pi->phy.curr_user_phy_cfg = *cfg; 3430 3431 return status; 3432 } 3433 3434 /** 3435 * ice_update_link_info - update status of the HW network link 3436 * @pi: port info structure of the interested logical port 3437 */ 3438 int ice_update_link_info(struct ice_port_info *pi) 3439 { 3440 struct ice_link_status *li; 3441 int status; 3442 3443 if (!pi) 3444 return -EINVAL; 3445 3446 li = &pi->phy.link_info; 3447 3448 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3449 if (status) 3450 return status; 3451 3452 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3453 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3454 3455 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3456 if (!pcaps) 3457 return -ENOMEM; 3458 3459 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3460 pcaps, NULL); 3461 } 3462 3463 return status; 3464 } 3465 3466 /** 3467 * ice_aq_get_phy_equalization - function to read serdes equaliser 3468 * value from firmware using admin queue command. 3469 * @hw: pointer to the HW struct 3470 * @data_in: represents the serdes equalization parameter requested 3471 * @op_code: represents the serdes number and flag to represent tx or rx 3472 * @serdes_num: represents the serdes number 3473 * @output: pointer to the caller-supplied buffer to return serdes equaliser 3474 * 3475 * Return: non-zero status on error and 0 on success. 3476 */ 3477 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, 3478 u8 serdes_num, int *output) 3479 { 3480 struct ice_aqc_dnl_call_command *cmd; 3481 struct ice_aqc_dnl_call buf = {}; 3482 struct ice_aq_desc desc; 3483 int err; 3484 3485 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); 3486 buf.sto.txrx_equa_reqs.op_code_serdes_sel = 3487 cpu_to_le16(op_code | (serdes_num & 0xF)); 3488 cmd = &desc.params.dnl_call; 3489 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); 3490 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | 3491 ICE_AQ_FLAG_RD | 3492 ICE_AQ_FLAG_SI); 3493 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); 3494 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); 3495 3496 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), 3497 NULL); 3498 *output = err ? 0 : buf.sto.txrx_equa_resp.val; 3499 3500 return err; 3501 } 3502 3503 #define FEC_REG_PORT(port) { \ 3504 FEC_CORR_LOW_REG_PORT##port, \ 3505 FEC_CORR_HIGH_REG_PORT##port, \ 3506 FEC_UNCORR_LOW_REG_PORT##port, \ 3507 FEC_UNCORR_HIGH_REG_PORT##port, \ 3508 } 3509 3510 static const u32 fec_reg[][ICE_FEC_MAX] = { 3511 FEC_REG_PORT(0), 3512 FEC_REG_PORT(1), 3513 FEC_REG_PORT(2), 3514 FEC_REG_PORT(3) 3515 }; 3516 3517 /** 3518 * ice_aq_get_fec_stats - reads fec stats from phy 3519 * @hw: pointer to the HW struct 3520 * @pcs_quad: represents pcsquad of user input serdes 3521 * @pcs_port: represents the pcs port number part of above pcs quad 3522 * @fec_type: represents FEC stats type 3523 * @output: pointer to the caller-supplied buffer to return requested fec stats 3524 * 3525 * Return: non-zero status on error and 0 on success. 3526 */ 3527 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 3528 enum ice_fec_stats_types fec_type, u32 *output) 3529 { 3530 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); 3531 struct ice_sbq_msg_input msg = {}; 3532 u32 receiver_id, reg_offset; 3533 int err; 3534 3535 if (pcs_port > 3) 3536 return -EINVAL; 3537 3538 reg_offset = fec_reg[pcs_port][fec_type]; 3539 3540 if (pcs_quad == 0) 3541 receiver_id = FEC_RECEIVER_ID_PCS0; 3542 else if (pcs_quad == 1) 3543 receiver_id = FEC_RECEIVER_ID_PCS1; 3544 else 3545 return -EINVAL; 3546 3547 msg.msg_addr_low = lower_16_bits(reg_offset); 3548 msg.msg_addr_high = receiver_id; 3549 msg.opcode = ice_sbq_msg_rd; 3550 msg.dest_dev = rmn_0; 3551 3552 err = ice_sbq_rw_reg(hw, &msg, flag); 3553 if (err) 3554 return err; 3555 3556 *output = msg.data; 3557 return 0; 3558 } 3559 3560 /** 3561 * ice_cache_phy_user_req 3562 * @pi: port information structure 3563 * @cache_data: PHY logging data 3564 * @cache_mode: PHY logging mode 3565 * 3566 * Log the user request on (FC, FEC, SPEED) for later use. 3567 */ 3568 static void 3569 ice_cache_phy_user_req(struct ice_port_info *pi, 3570 struct ice_phy_cache_mode_data cache_data, 3571 enum ice_phy_cache_mode cache_mode) 3572 { 3573 if (!pi) 3574 return; 3575 3576 switch (cache_mode) { 3577 case ICE_FC_MODE: 3578 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3579 break; 3580 case ICE_SPEED_MODE: 3581 pi->phy.curr_user_speed_req = 3582 cache_data.data.curr_user_speed_req; 3583 break; 3584 case ICE_FEC_MODE: 3585 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3586 break; 3587 default: 3588 break; 3589 } 3590 } 3591 3592 /** 3593 * ice_caps_to_fc_mode 3594 * @caps: PHY capabilities 3595 * 3596 * Convert PHY FC capabilities to ice FC mode 3597 */ 3598 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3599 { 3600 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3601 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3602 return ICE_FC_FULL; 3603 3604 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3605 return ICE_FC_TX_PAUSE; 3606 3607 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3608 return ICE_FC_RX_PAUSE; 3609 3610 return ICE_FC_NONE; 3611 } 3612 3613 /** 3614 * ice_caps_to_fec_mode 3615 * @caps: PHY capabilities 3616 * @fec_options: Link FEC options 3617 * 3618 * Convert PHY FEC capabilities to ice FEC mode 3619 */ 3620 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3621 { 3622 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3623 return ICE_FEC_AUTO; 3624 3625 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3626 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3627 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3628 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3629 return ICE_FEC_BASER; 3630 3631 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3632 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3633 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3634 return ICE_FEC_RS; 3635 3636 return ICE_FEC_NONE; 3637 } 3638 3639 /** 3640 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3641 * @pi: port information structure 3642 * @cfg: PHY configuration data to set FC mode 3643 * @req_mode: FC mode to configure 3644 */ 3645 int 3646 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3647 enum ice_fc_mode req_mode) 3648 { 3649 struct ice_phy_cache_mode_data cache_data; 3650 u8 pause_mask = 0x0; 3651 3652 if (!pi || !cfg) 3653 return -EINVAL; 3654 3655 switch (req_mode) { 3656 case ICE_FC_FULL: 3657 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3658 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3659 break; 3660 case ICE_FC_RX_PAUSE: 3661 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3662 break; 3663 case ICE_FC_TX_PAUSE: 3664 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3665 break; 3666 default: 3667 break; 3668 } 3669 3670 /* clear the old pause settings */ 3671 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3672 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3673 3674 /* set the new capabilities */ 3675 cfg->caps |= pause_mask; 3676 3677 /* Cache user FC request */ 3678 cache_data.data.curr_user_fc_req = req_mode; 3679 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3680 3681 return 0; 3682 } 3683 3684 /** 3685 * ice_set_fc 3686 * @pi: port information structure 3687 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3688 * @ena_auto_link_update: enable automatic link update 3689 * 3690 * Set the requested flow control mode. 3691 */ 3692 int 3693 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3694 { 3695 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3696 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3697 struct ice_hw *hw; 3698 int status; 3699 3700 if (!pi || !aq_failures) 3701 return -EINVAL; 3702 3703 *aq_failures = 0; 3704 hw = pi->hw; 3705 3706 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3707 if (!pcaps) 3708 return -ENOMEM; 3709 3710 /* Get the current PHY config */ 3711 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3712 pcaps, NULL); 3713 if (status) { 3714 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3715 goto out; 3716 } 3717 3718 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3719 3720 /* Configure the set PHY data */ 3721 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3722 if (status) 3723 goto out; 3724 3725 /* If the capabilities have changed, then set the new config */ 3726 if (cfg.caps != pcaps->caps) { 3727 int retry_count, retry_max = 10; 3728 3729 /* Auto restart link so settings take effect */ 3730 if (ena_auto_link_update) 3731 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3732 3733 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3734 if (status) { 3735 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3736 goto out; 3737 } 3738 3739 /* Update the link info 3740 * It sometimes takes a really long time for link to 3741 * come back from the atomic reset. Thus, we wait a 3742 * little bit. 3743 */ 3744 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3745 status = ice_update_link_info(pi); 3746 3747 if (!status) 3748 break; 3749 3750 mdelay(100); 3751 } 3752 3753 if (status) 3754 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3755 } 3756 3757 out: 3758 return status; 3759 } 3760 3761 /** 3762 * ice_phy_caps_equals_cfg 3763 * @phy_caps: PHY capabilities 3764 * @phy_cfg: PHY configuration 3765 * 3766 * Helper function to determine if PHY capabilities matches PHY 3767 * configuration 3768 */ 3769 bool 3770 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3771 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3772 { 3773 u8 caps_mask, cfg_mask; 3774 3775 if (!phy_caps || !phy_cfg) 3776 return false; 3777 3778 /* These bits are not common between capabilities and configuration. 3779 * Do not use them to determine equality. 3780 */ 3781 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3782 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3783 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3784 3785 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3786 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3787 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3788 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3789 phy_caps->eee_cap != phy_cfg->eee_cap || 3790 phy_caps->eeer_value != phy_cfg->eeer_value || 3791 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3792 return false; 3793 3794 return true; 3795 } 3796 3797 /** 3798 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3799 * @pi: port information structure 3800 * @caps: PHY ability structure to copy date from 3801 * @cfg: PHY configuration structure to copy data to 3802 * 3803 * Helper function to copy AQC PHY get ability data to PHY set configuration 3804 * data structure 3805 */ 3806 void 3807 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3808 struct ice_aqc_get_phy_caps_data *caps, 3809 struct ice_aqc_set_phy_cfg_data *cfg) 3810 { 3811 if (!pi || !caps || !cfg) 3812 return; 3813 3814 memset(cfg, 0, sizeof(*cfg)); 3815 cfg->phy_type_low = caps->phy_type_low; 3816 cfg->phy_type_high = caps->phy_type_high; 3817 cfg->caps = caps->caps; 3818 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3819 cfg->eee_cap = caps->eee_cap; 3820 cfg->eeer_value = caps->eeer_value; 3821 cfg->link_fec_opt = caps->link_fec_options; 3822 cfg->module_compliance_enforcement = 3823 caps->module_compliance_enforcement; 3824 } 3825 3826 /** 3827 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3828 * @pi: port information structure 3829 * @cfg: PHY configuration data to set FEC mode 3830 * @fec: FEC mode to configure 3831 */ 3832 int 3833 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3834 enum ice_fec_mode fec) 3835 { 3836 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3837 struct ice_hw *hw; 3838 int status; 3839 3840 if (!pi || !cfg) 3841 return -EINVAL; 3842 3843 hw = pi->hw; 3844 3845 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3846 if (!pcaps) 3847 return -ENOMEM; 3848 3849 status = ice_aq_get_phy_caps(pi, false, 3850 (ice_fw_supports_report_dflt_cfg(hw) ? 3851 ICE_AQC_REPORT_DFLT_CFG : 3852 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3853 if (status) 3854 goto out; 3855 3856 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3857 cfg->link_fec_opt = pcaps->link_fec_options; 3858 3859 switch (fec) { 3860 case ICE_FEC_BASER: 3861 /* Clear RS bits, and AND BASE-R ability 3862 * bits and OR request bits. 3863 */ 3864 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3865 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3866 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3867 ICE_AQC_PHY_FEC_25G_KR_REQ; 3868 break; 3869 case ICE_FEC_RS: 3870 /* Clear BASE-R bits, and AND RS ability 3871 * bits and OR request bits. 3872 */ 3873 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3874 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3875 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3876 break; 3877 case ICE_FEC_NONE: 3878 /* Clear all FEC option bits. */ 3879 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3880 break; 3881 case ICE_FEC_AUTO: 3882 /* AND auto FEC bit, and all caps bits. */ 3883 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3884 cfg->link_fec_opt |= pcaps->link_fec_options; 3885 break; 3886 default: 3887 status = -EINVAL; 3888 break; 3889 } 3890 3891 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3892 !ice_fw_supports_report_dflt_cfg(hw)) { 3893 struct ice_link_default_override_tlv tlv = { 0 }; 3894 3895 status = ice_get_link_default_override(&tlv, pi); 3896 if (status) 3897 goto out; 3898 3899 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3900 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3901 cfg->link_fec_opt = tlv.fec_options; 3902 } 3903 3904 out: 3905 return status; 3906 } 3907 3908 /** 3909 * ice_get_link_status - get status of the HW network link 3910 * @pi: port information structure 3911 * @link_up: pointer to bool (true/false = linkup/linkdown) 3912 * 3913 * Variable link_up is true if link is up, false if link is down. 3914 * The variable link_up is invalid if status is non zero. As a 3915 * result of this call, link status reporting becomes enabled 3916 */ 3917 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3918 { 3919 struct ice_phy_info *phy_info; 3920 int status = 0; 3921 3922 if (!pi || !link_up) 3923 return -EINVAL; 3924 3925 phy_info = &pi->phy; 3926 3927 if (phy_info->get_link_info) { 3928 status = ice_update_link_info(pi); 3929 3930 if (status) 3931 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3932 status); 3933 } 3934 3935 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3936 3937 return status; 3938 } 3939 3940 /** 3941 * ice_aq_set_link_restart_an 3942 * @pi: pointer to the port information structure 3943 * @ena_link: if true: enable link, if false: disable link 3944 * @cd: pointer to command details structure or NULL 3945 * 3946 * Sets up the link and restarts the Auto-Negotiation over the link. 3947 */ 3948 int 3949 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3950 struct ice_sq_cd *cd) 3951 { 3952 struct ice_aqc_restart_an *cmd; 3953 struct ice_aq_desc desc; 3954 3955 cmd = &desc.params.restart_an; 3956 3957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3958 3959 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3960 cmd->lport_num = pi->lport; 3961 if (ena_link) 3962 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3963 else 3964 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3965 3966 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3967 } 3968 3969 /** 3970 * ice_aq_set_event_mask 3971 * @hw: pointer to the HW struct 3972 * @port_num: port number of the physical function 3973 * @mask: event mask to be set 3974 * @cd: pointer to command details structure or NULL 3975 * 3976 * Set event mask (0x0613) 3977 */ 3978 int 3979 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3980 struct ice_sq_cd *cd) 3981 { 3982 struct ice_aqc_set_event_mask *cmd; 3983 struct ice_aq_desc desc; 3984 3985 cmd = &desc.params.set_event_mask; 3986 3987 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3988 3989 cmd->lport_num = port_num; 3990 3991 cmd->event_mask = cpu_to_le16(mask); 3992 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3993 } 3994 3995 /** 3996 * ice_aq_set_mac_loopback 3997 * @hw: pointer to the HW struct 3998 * @ena_lpbk: Enable or Disable loopback 3999 * @cd: pointer to command details structure or NULL 4000 * 4001 * Enable/disable loopback on a given port 4002 */ 4003 int 4004 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 4005 { 4006 struct ice_aqc_set_mac_lb *cmd; 4007 struct ice_aq_desc desc; 4008 4009 cmd = &desc.params.set_mac_lb; 4010 4011 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 4012 if (ena_lpbk) 4013 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 4014 4015 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4016 } 4017 4018 /** 4019 * ice_aq_set_port_id_led 4020 * @pi: pointer to the port information 4021 * @is_orig_mode: is this LED set to original mode (by the net-list) 4022 * @cd: pointer to command details structure or NULL 4023 * 4024 * Set LED value for the given port (0x06e9) 4025 */ 4026 int 4027 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 4028 struct ice_sq_cd *cd) 4029 { 4030 struct ice_aqc_set_port_id_led *cmd; 4031 struct ice_hw *hw = pi->hw; 4032 struct ice_aq_desc desc; 4033 4034 cmd = &desc.params.set_port_id_led; 4035 4036 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 4037 4038 if (is_orig_mode) 4039 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 4040 else 4041 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 4042 4043 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4044 } 4045 4046 /** 4047 * ice_aq_get_port_options 4048 * @hw: pointer to the HW struct 4049 * @options: buffer for the resultant port options 4050 * @option_count: input - size of the buffer in port options structures, 4051 * output - number of returned port options 4052 * @lport: logical port to call the command with (optional) 4053 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4054 * when PF owns more than 1 port it must be true 4055 * @active_option_idx: index of active port option in returned buffer 4056 * @active_option_valid: active option in returned buffer is valid 4057 * @pending_option_idx: index of pending port option in returned buffer 4058 * @pending_option_valid: pending option in returned buffer is valid 4059 * 4060 * Calls Get Port Options AQC (0x06ea) and verifies result. 4061 */ 4062 int 4063 ice_aq_get_port_options(struct ice_hw *hw, 4064 struct ice_aqc_get_port_options_elem *options, 4065 u8 *option_count, u8 lport, bool lport_valid, 4066 u8 *active_option_idx, bool *active_option_valid, 4067 u8 *pending_option_idx, bool *pending_option_valid) 4068 { 4069 struct ice_aqc_get_port_options *cmd; 4070 struct ice_aq_desc desc; 4071 int status; 4072 u8 i; 4073 4074 /* options buffer shall be able to hold max returned options */ 4075 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 4076 return -EINVAL; 4077 4078 cmd = &desc.params.get_port_options; 4079 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 4080 4081 if (lport_valid) 4082 cmd->lport_num = lport; 4083 cmd->lport_num_valid = lport_valid; 4084 4085 status = ice_aq_send_cmd(hw, &desc, options, 4086 *option_count * sizeof(*options), NULL); 4087 if (status) 4088 return status; 4089 4090 /* verify direct FW response & set output parameters */ 4091 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 4092 cmd->port_options_count); 4093 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 4094 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 4095 cmd->port_options); 4096 if (*active_option_valid) { 4097 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 4098 cmd->port_options); 4099 if (*active_option_idx > (*option_count - 1)) 4100 return -EIO; 4101 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 4102 *active_option_idx); 4103 } 4104 4105 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 4106 cmd->pending_port_option_status); 4107 if (*pending_option_valid) { 4108 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4109 cmd->pending_port_option_status); 4110 if (*pending_option_idx > (*option_count - 1)) 4111 return -EIO; 4112 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4113 *pending_option_idx); 4114 } 4115 4116 /* mask output options fields */ 4117 for (i = 0; i < *option_count; i++) { 4118 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4119 options[i].pmd); 4120 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4121 options[i].max_lane_speed); 4122 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4123 options[i].pmd, options[i].max_lane_speed); 4124 } 4125 4126 return 0; 4127 } 4128 4129 /** 4130 * ice_aq_set_port_option 4131 * @hw: pointer to the HW struct 4132 * @lport: logical port to call the command with 4133 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4134 * when PF owns more than 1 port it must be true 4135 * @new_option: new port option to be written 4136 * 4137 * Calls Set Port Options AQC (0x06eb). 4138 */ 4139 int 4140 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4141 u8 new_option) 4142 { 4143 struct ice_aqc_set_port_option *cmd; 4144 struct ice_aq_desc desc; 4145 4146 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4147 return -EINVAL; 4148 4149 cmd = &desc.params.set_port_option; 4150 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4151 4152 if (lport_valid) 4153 cmd->lport_num = lport; 4154 4155 cmd->lport_num_valid = lport_valid; 4156 cmd->selected_port_option = new_option; 4157 4158 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4159 } 4160 4161 /** 4162 * ice_get_phy_lane_number - Get PHY lane number for current adapter 4163 * @hw: pointer to the hw struct 4164 * 4165 * Return: PHY lane number on success, negative error code otherwise. 4166 */ 4167 int ice_get_phy_lane_number(struct ice_hw *hw) 4168 { 4169 struct ice_aqc_get_port_options_elem *options; 4170 unsigned int lport = 0; 4171 unsigned int lane; 4172 int err; 4173 4174 options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); 4175 if (!options) 4176 return -ENOMEM; 4177 4178 for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) { 4179 u8 options_count = ICE_AQC_PORT_OPT_MAX; 4180 u8 speed, active_idx, pending_idx; 4181 bool active_valid, pending_valid; 4182 4183 err = ice_aq_get_port_options(hw, options, &options_count, lane, 4184 true, &active_idx, &active_valid, 4185 &pending_idx, &pending_valid); 4186 if (err) 4187 goto err; 4188 4189 if (!active_valid) 4190 continue; 4191 4192 speed = options[active_idx].max_lane_speed; 4193 /* If we don't get speed for this lane, it's unoccupied */ 4194 if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G) 4195 continue; 4196 4197 if (hw->pf_id == lport) { 4198 kfree(options); 4199 return lane; 4200 } 4201 4202 lport++; 4203 } 4204 4205 /* PHY lane not found */ 4206 err = -ENXIO; 4207 err: 4208 kfree(options); 4209 return err; 4210 } 4211 4212 /** 4213 * ice_aq_sff_eeprom 4214 * @hw: pointer to the HW struct 4215 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4216 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4217 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4218 * @page: QSFP page 4219 * @set_page: set or ignore the page 4220 * @data: pointer to data buffer to be read/written to the I2C device. 4221 * @length: 1-16 for read, 1 for write. 4222 * @write: 0 read, 1 for write. 4223 * @cd: pointer to command details structure or NULL 4224 * 4225 * Read/Write SFF EEPROM (0x06EE) 4226 */ 4227 int 4228 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4229 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4230 bool write, struct ice_sq_cd *cd) 4231 { 4232 struct ice_aqc_sff_eeprom *cmd; 4233 struct ice_aq_desc desc; 4234 u16 i2c_bus_addr; 4235 int status; 4236 4237 if (!data || (mem_addr & 0xff00)) 4238 return -EINVAL; 4239 4240 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4241 cmd = &desc.params.read_write_sff_param; 4242 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4243 cmd->lport_num = (u8)(lport & 0xff); 4244 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4245 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 4246 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 4247 if (write) 4248 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 4249 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 4250 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4251 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 4252 4253 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4254 return status; 4255 } 4256 4257 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4258 { 4259 switch (type) { 4260 case ICE_LUT_VSI: 4261 return ICE_LUT_VSI_SIZE; 4262 case ICE_LUT_GLOBAL: 4263 return ICE_LUT_GLOBAL_SIZE; 4264 case ICE_LUT_PF: 4265 return ICE_LUT_PF_SIZE; 4266 } 4267 WARN_ONCE(1, "incorrect type passed"); 4268 return ICE_LUT_VSI_SIZE; 4269 } 4270 4271 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4272 { 4273 switch (size) { 4274 case ICE_LUT_VSI_SIZE: 4275 return ICE_AQC_LUT_SIZE_SMALL; 4276 case ICE_LUT_GLOBAL_SIZE: 4277 return ICE_AQC_LUT_SIZE_512; 4278 case ICE_LUT_PF_SIZE: 4279 return ICE_AQC_LUT_SIZE_2K; 4280 } 4281 WARN_ONCE(1, "incorrect size passed"); 4282 return 0; 4283 } 4284 4285 /** 4286 * __ice_aq_get_set_rss_lut 4287 * @hw: pointer to the hardware structure 4288 * @params: RSS LUT parameters 4289 * @set: set true to set the table, false to get the table 4290 * 4291 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4292 */ 4293 static int 4294 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4295 struct ice_aq_get_set_rss_lut_params *params, bool set) 4296 { 4297 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4298 enum ice_lut_type lut_type = params->lut_type; 4299 struct ice_aqc_get_set_rss_lut *desc_params; 4300 enum ice_aqc_lut_flags flags; 4301 enum ice_lut_size lut_size; 4302 struct ice_aq_desc desc; 4303 u8 *lut = params->lut; 4304 4305 4306 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4307 return -EINVAL; 4308 4309 lut_size = ice_lut_type_to_size(lut_type); 4310 if (lut_size > params->lut_size) 4311 return -EINVAL; 4312 else if (set && lut_size != params->lut_size) 4313 return -EINVAL; 4314 4315 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4316 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4317 if (set) 4318 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4319 4320 desc_params = &desc.params.get_set_rss_lut; 4321 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4322 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4323 4324 if (lut_type == ICE_LUT_GLOBAL) 4325 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4326 params->global_lut_id); 4327 4328 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4329 desc_params->flags = cpu_to_le16(flags); 4330 4331 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4332 } 4333 4334 /** 4335 * ice_aq_get_rss_lut 4336 * @hw: pointer to the hardware structure 4337 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4338 * 4339 * get the RSS lookup table, PF or VSI type 4340 */ 4341 int 4342 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4343 { 4344 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4345 } 4346 4347 /** 4348 * ice_aq_set_rss_lut 4349 * @hw: pointer to the hardware structure 4350 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4351 * 4352 * set the RSS lookup table, PF or VSI type 4353 */ 4354 int 4355 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4356 { 4357 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4358 } 4359 4360 /** 4361 * __ice_aq_get_set_rss_key 4362 * @hw: pointer to the HW struct 4363 * @vsi_id: VSI FW index 4364 * @key: pointer to key info struct 4365 * @set: set true to set the key, false to get the key 4366 * 4367 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4368 */ 4369 static int 4370 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4371 struct ice_aqc_get_set_rss_keys *key, bool set) 4372 { 4373 struct ice_aqc_get_set_rss_key *desc_params; 4374 u16 key_size = sizeof(*key); 4375 struct ice_aq_desc desc; 4376 4377 if (set) { 4378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4379 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4380 } else { 4381 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4382 } 4383 4384 desc_params = &desc.params.get_set_rss_key; 4385 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4386 4387 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4388 } 4389 4390 /** 4391 * ice_aq_get_rss_key 4392 * @hw: pointer to the HW struct 4393 * @vsi_handle: software VSI handle 4394 * @key: pointer to key info struct 4395 * 4396 * get the RSS key per VSI 4397 */ 4398 int 4399 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4400 struct ice_aqc_get_set_rss_keys *key) 4401 { 4402 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4403 return -EINVAL; 4404 4405 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4406 key, false); 4407 } 4408 4409 /** 4410 * ice_aq_set_rss_key 4411 * @hw: pointer to the HW struct 4412 * @vsi_handle: software VSI handle 4413 * @keys: pointer to key info struct 4414 * 4415 * set the RSS key per VSI 4416 */ 4417 int 4418 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4419 struct ice_aqc_get_set_rss_keys *keys) 4420 { 4421 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4422 return -EINVAL; 4423 4424 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4425 keys, true); 4426 } 4427 4428 /** 4429 * ice_aq_add_lan_txq 4430 * @hw: pointer to the hardware structure 4431 * @num_qgrps: Number of added queue groups 4432 * @qg_list: list of queue groups to be added 4433 * @buf_size: size of buffer for indirect command 4434 * @cd: pointer to command details structure or NULL 4435 * 4436 * Add Tx LAN queue (0x0C30) 4437 * 4438 * NOTE: 4439 * Prior to calling add Tx LAN queue: 4440 * Initialize the following as part of the Tx queue context: 4441 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4442 * Cache profile and Packet shaper profile. 4443 * 4444 * After add Tx LAN queue AQ command is completed: 4445 * Interrupts should be associated with specific queues, 4446 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4447 * flow. 4448 */ 4449 static int 4450 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4451 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4452 struct ice_sq_cd *cd) 4453 { 4454 struct ice_aqc_add_tx_qgrp *list; 4455 struct ice_aqc_add_txqs *cmd; 4456 struct ice_aq_desc desc; 4457 u16 i, sum_size = 0; 4458 4459 cmd = &desc.params.add_txqs; 4460 4461 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4462 4463 if (!qg_list) 4464 return -EINVAL; 4465 4466 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4467 return -EINVAL; 4468 4469 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4470 sum_size += struct_size(list, txqs, list->num_txqs); 4471 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4472 list->num_txqs); 4473 } 4474 4475 if (buf_size != sum_size) 4476 return -EINVAL; 4477 4478 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4479 4480 cmd->num_qgrps = num_qgrps; 4481 4482 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4483 } 4484 4485 /** 4486 * ice_aq_dis_lan_txq 4487 * @hw: pointer to the hardware structure 4488 * @num_qgrps: number of groups in the list 4489 * @qg_list: the list of groups to disable 4490 * @buf_size: the total size of the qg_list buffer in bytes 4491 * @rst_src: if called due to reset, specifies the reset source 4492 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4493 * @cd: pointer to command details structure or NULL 4494 * 4495 * Disable LAN Tx queue (0x0C31) 4496 */ 4497 static int 4498 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4499 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4500 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4501 struct ice_sq_cd *cd) 4502 { 4503 struct ice_aqc_dis_txq_item *item; 4504 struct ice_aqc_dis_txqs *cmd; 4505 struct ice_aq_desc desc; 4506 u16 vmvf_and_timeout; 4507 u16 i, sz = 0; 4508 int status; 4509 4510 cmd = &desc.params.dis_txqs; 4511 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4512 4513 /* qg_list can be NULL only in VM/VF reset flow */ 4514 if (!qg_list && !rst_src) 4515 return -EINVAL; 4516 4517 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4518 return -EINVAL; 4519 4520 cmd->num_entries = num_qgrps; 4521 4522 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4523 4524 switch (rst_src) { 4525 case ICE_VM_RESET: 4526 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4527 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4528 break; 4529 case ICE_VF_RESET: 4530 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4531 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4532 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4533 ICE_AQC_Q_DIS_VMVF_NUM_M; 4534 break; 4535 case ICE_NO_RESET: 4536 default: 4537 break; 4538 } 4539 4540 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4541 4542 /* flush pipe on time out */ 4543 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4544 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4545 if (!qg_list) 4546 goto do_aq; 4547 4548 /* set RD bit to indicate that command buffer is provided by the driver 4549 * and it needs to be read by the firmware 4550 */ 4551 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4552 4553 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4554 u16 item_size = struct_size(item, q_id, item->num_qs); 4555 4556 /* If the num of queues is even, add 2 bytes of padding */ 4557 if ((item->num_qs % 2) == 0) 4558 item_size += 2; 4559 4560 sz += item_size; 4561 4562 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4563 } 4564 4565 if (buf_size != sz) 4566 return -EINVAL; 4567 4568 do_aq: 4569 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4570 if (status) { 4571 if (!qg_list) 4572 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4573 vmvf_num, hw->adminq.sq_last_status); 4574 else 4575 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4576 le16_to_cpu(qg_list[0].q_id[0]), 4577 hw->adminq.sq_last_status); 4578 } 4579 return status; 4580 } 4581 4582 /** 4583 * ice_aq_cfg_lan_txq 4584 * @hw: pointer to the hardware structure 4585 * @buf: buffer for command 4586 * @buf_size: size of buffer in bytes 4587 * @num_qs: number of queues being configured 4588 * @oldport: origination lport 4589 * @newport: destination lport 4590 * @cd: pointer to command details structure or NULL 4591 * 4592 * Move/Configure LAN Tx queue (0x0C32) 4593 * 4594 * There is a better AQ command to use for moving nodes, so only coding 4595 * this one for configuring the node. 4596 */ 4597 int 4598 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4599 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4600 struct ice_sq_cd *cd) 4601 { 4602 struct ice_aqc_cfg_txqs *cmd; 4603 struct ice_aq_desc desc; 4604 int status; 4605 4606 cmd = &desc.params.cfg_txqs; 4607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4608 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4609 4610 if (!buf) 4611 return -EINVAL; 4612 4613 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4614 cmd->num_qs = num_qs; 4615 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4616 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4617 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4618 cmd->blocked_cgds = 0; 4619 4620 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4621 if (status) 4622 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4623 hw->adminq.sq_last_status); 4624 return status; 4625 } 4626 4627 /** 4628 * ice_aq_add_rdma_qsets 4629 * @hw: pointer to the hardware structure 4630 * @num_qset_grps: Number of RDMA Qset groups 4631 * @qset_list: list of Qset groups to be added 4632 * @buf_size: size of buffer for indirect command 4633 * @cd: pointer to command details structure or NULL 4634 * 4635 * Add Tx RDMA Qsets (0x0C33) 4636 */ 4637 static int 4638 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4639 struct ice_aqc_add_rdma_qset_data *qset_list, 4640 u16 buf_size, struct ice_sq_cd *cd) 4641 { 4642 struct ice_aqc_add_rdma_qset_data *list; 4643 struct ice_aqc_add_rdma_qset *cmd; 4644 struct ice_aq_desc desc; 4645 u16 i, sum_size = 0; 4646 4647 cmd = &desc.params.add_rdma_qset; 4648 4649 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4650 4651 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4652 return -EINVAL; 4653 4654 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4655 u16 num_qsets = le16_to_cpu(list->num_qsets); 4656 4657 sum_size += struct_size(list, rdma_qsets, num_qsets); 4658 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4659 num_qsets); 4660 } 4661 4662 if (buf_size != sum_size) 4663 return -EINVAL; 4664 4665 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4666 4667 cmd->num_qset_grps = num_qset_grps; 4668 4669 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4670 } 4671 4672 /* End of FW Admin Queue command wrappers */ 4673 4674 /** 4675 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4676 * @hw: pointer to the HW struct 4677 * @vsi_handle: software VSI handle 4678 * @tc: TC number 4679 * @q_handle: software queue handle 4680 */ 4681 struct ice_q_ctx * 4682 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4683 { 4684 struct ice_vsi_ctx *vsi; 4685 struct ice_q_ctx *q_ctx; 4686 4687 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4688 if (!vsi) 4689 return NULL; 4690 if (q_handle >= vsi->num_lan_q_entries[tc]) 4691 return NULL; 4692 if (!vsi->lan_q_ctx[tc]) 4693 return NULL; 4694 q_ctx = vsi->lan_q_ctx[tc]; 4695 return &q_ctx[q_handle]; 4696 } 4697 4698 /** 4699 * ice_ena_vsi_txq 4700 * @pi: port information structure 4701 * @vsi_handle: software VSI handle 4702 * @tc: TC number 4703 * @q_handle: software queue handle 4704 * @num_qgrps: Number of added queue groups 4705 * @buf: list of queue groups to be added 4706 * @buf_size: size of buffer for indirect command 4707 * @cd: pointer to command details structure or NULL 4708 * 4709 * This function adds one LAN queue 4710 */ 4711 int 4712 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4713 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4714 struct ice_sq_cd *cd) 4715 { 4716 struct ice_aqc_txsched_elem_data node = { 0 }; 4717 struct ice_sched_node *parent; 4718 struct ice_q_ctx *q_ctx; 4719 struct ice_hw *hw; 4720 int status; 4721 4722 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4723 return -EIO; 4724 4725 if (num_qgrps > 1 || buf->num_txqs > 1) 4726 return -ENOSPC; 4727 4728 hw = pi->hw; 4729 4730 if (!ice_is_vsi_valid(hw, vsi_handle)) 4731 return -EINVAL; 4732 4733 mutex_lock(&pi->sched_lock); 4734 4735 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4736 if (!q_ctx) { 4737 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4738 q_handle); 4739 status = -EINVAL; 4740 goto ena_txq_exit; 4741 } 4742 4743 /* find a parent node */ 4744 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4745 ICE_SCHED_NODE_OWNER_LAN); 4746 if (!parent) { 4747 status = -EINVAL; 4748 goto ena_txq_exit; 4749 } 4750 4751 buf->parent_teid = parent->info.node_teid; 4752 node.parent_teid = parent->info.node_teid; 4753 /* Mark that the values in the "generic" section as valid. The default 4754 * value in the "generic" section is zero. This means that : 4755 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4756 * - 0 priority among siblings, indicated by Bit 1-3. 4757 * - WFQ, indicated by Bit 4. 4758 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4759 * Bit 5-6. 4760 * - Bit 7 is reserved. 4761 * Without setting the generic section as valid in valid_sections, the 4762 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4763 */ 4764 buf->txqs[0].info.valid_sections = 4765 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4766 ICE_AQC_ELEM_VALID_EIR; 4767 buf->txqs[0].info.generic = 0; 4768 buf->txqs[0].info.cir_bw.bw_profile_idx = 4769 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4770 buf->txqs[0].info.cir_bw.bw_alloc = 4771 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4772 buf->txqs[0].info.eir_bw.bw_profile_idx = 4773 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4774 buf->txqs[0].info.eir_bw.bw_alloc = 4775 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4776 4777 /* add the LAN queue */ 4778 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4779 if (status) { 4780 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4781 le16_to_cpu(buf->txqs[0].txq_id), 4782 hw->adminq.sq_last_status); 4783 goto ena_txq_exit; 4784 } 4785 4786 node.node_teid = buf->txqs[0].q_teid; 4787 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4788 q_ctx->q_handle = q_handle; 4789 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4790 4791 /* add a leaf node into scheduler tree queue layer */ 4792 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4793 if (!status) 4794 status = ice_sched_replay_q_bw(pi, q_ctx); 4795 4796 ena_txq_exit: 4797 mutex_unlock(&pi->sched_lock); 4798 return status; 4799 } 4800 4801 /** 4802 * ice_dis_vsi_txq 4803 * @pi: port information structure 4804 * @vsi_handle: software VSI handle 4805 * @tc: TC number 4806 * @num_queues: number of queues 4807 * @q_handles: pointer to software queue handle array 4808 * @q_ids: pointer to the q_id array 4809 * @q_teids: pointer to queue node teids 4810 * @rst_src: if called due to reset, specifies the reset source 4811 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4812 * @cd: pointer to command details structure or NULL 4813 * 4814 * This function removes queues and their corresponding nodes in SW DB 4815 */ 4816 int 4817 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4818 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4819 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4820 struct ice_sq_cd *cd) 4821 { 4822 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4823 u16 i, buf_size = __struct_size(qg_list); 4824 struct ice_q_ctx *q_ctx; 4825 int status = -ENOENT; 4826 struct ice_hw *hw; 4827 4828 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4829 return -EIO; 4830 4831 hw = pi->hw; 4832 4833 if (!num_queues) { 4834 /* if queue is disabled already yet the disable queue command 4835 * has to be sent to complete the VF reset, then call 4836 * ice_aq_dis_lan_txq without any queue information 4837 */ 4838 if (rst_src) 4839 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4840 vmvf_num, NULL); 4841 return -EIO; 4842 } 4843 4844 mutex_lock(&pi->sched_lock); 4845 4846 for (i = 0; i < num_queues; i++) { 4847 struct ice_sched_node *node; 4848 4849 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4850 if (!node) 4851 continue; 4852 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4853 if (!q_ctx) { 4854 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4855 q_handles[i]); 4856 continue; 4857 } 4858 if (q_ctx->q_handle != q_handles[i]) { 4859 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4860 q_ctx->q_handle, q_handles[i]); 4861 continue; 4862 } 4863 qg_list->parent_teid = node->info.parent_teid; 4864 qg_list->num_qs = 1; 4865 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4866 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4867 vmvf_num, cd); 4868 4869 if (status) 4870 break; 4871 ice_free_sched_node(pi, node); 4872 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4873 q_ctx->q_teid = ICE_INVAL_TEID; 4874 } 4875 mutex_unlock(&pi->sched_lock); 4876 return status; 4877 } 4878 4879 /** 4880 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4881 * @pi: port information structure 4882 * @vsi_handle: software VSI handle 4883 * @tc_bitmap: TC bitmap 4884 * @maxqs: max queues array per TC 4885 * @owner: LAN or RDMA 4886 * 4887 * This function adds/updates the VSI queues per TC. 4888 */ 4889 static int 4890 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4891 u16 *maxqs, u8 owner) 4892 { 4893 int status = 0; 4894 u8 i; 4895 4896 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4897 return -EIO; 4898 4899 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4900 return -EINVAL; 4901 4902 mutex_lock(&pi->sched_lock); 4903 4904 ice_for_each_traffic_class(i) { 4905 /* configuration is possible only if TC node is present */ 4906 if (!ice_sched_get_tc_node(pi, i)) 4907 continue; 4908 4909 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4910 ice_is_tc_ena(tc_bitmap, i)); 4911 if (status) 4912 break; 4913 } 4914 4915 mutex_unlock(&pi->sched_lock); 4916 return status; 4917 } 4918 4919 /** 4920 * ice_cfg_vsi_lan - configure VSI LAN queues 4921 * @pi: port information structure 4922 * @vsi_handle: software VSI handle 4923 * @tc_bitmap: TC bitmap 4924 * @max_lanqs: max LAN queues array per TC 4925 * 4926 * This function adds/updates the VSI LAN queues per TC. 4927 */ 4928 int 4929 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4930 u16 *max_lanqs) 4931 { 4932 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4933 ICE_SCHED_NODE_OWNER_LAN); 4934 } 4935 4936 /** 4937 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4938 * @pi: port information structure 4939 * @vsi_handle: software VSI handle 4940 * @tc_bitmap: TC bitmap 4941 * @max_rdmaqs: max RDMA queues array per TC 4942 * 4943 * This function adds/updates the VSI RDMA queues per TC. 4944 */ 4945 int 4946 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4947 u16 *max_rdmaqs) 4948 { 4949 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4950 ICE_SCHED_NODE_OWNER_RDMA); 4951 } 4952 4953 /** 4954 * ice_ena_vsi_rdma_qset 4955 * @pi: port information structure 4956 * @vsi_handle: software VSI handle 4957 * @tc: TC number 4958 * @rdma_qset: pointer to RDMA Qset 4959 * @num_qsets: number of RDMA Qsets 4960 * @qset_teid: pointer to Qset node TEIDs 4961 * 4962 * This function adds RDMA Qset 4963 */ 4964 int 4965 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4966 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4967 { 4968 struct ice_aqc_txsched_elem_data node = { 0 }; 4969 struct ice_aqc_add_rdma_qset_data *buf; 4970 struct ice_sched_node *parent; 4971 struct ice_hw *hw; 4972 u16 i, buf_size; 4973 int ret; 4974 4975 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4976 return -EIO; 4977 hw = pi->hw; 4978 4979 if (!ice_is_vsi_valid(hw, vsi_handle)) 4980 return -EINVAL; 4981 4982 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4983 buf = kzalloc(buf_size, GFP_KERNEL); 4984 if (!buf) 4985 return -ENOMEM; 4986 mutex_lock(&pi->sched_lock); 4987 4988 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4989 ICE_SCHED_NODE_OWNER_RDMA); 4990 if (!parent) { 4991 ret = -EINVAL; 4992 goto rdma_error_exit; 4993 } 4994 buf->parent_teid = parent->info.node_teid; 4995 node.parent_teid = parent->info.node_teid; 4996 4997 buf->num_qsets = cpu_to_le16(num_qsets); 4998 for (i = 0; i < num_qsets; i++) { 4999 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5000 buf->rdma_qsets[i].info.valid_sections = 5001 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5002 ICE_AQC_ELEM_VALID_EIR; 5003 buf->rdma_qsets[i].info.generic = 0; 5004 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5005 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5006 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5007 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5008 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5009 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5010 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5011 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5012 } 5013 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5014 if (ret) { 5015 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5016 goto rdma_error_exit; 5017 } 5018 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5019 for (i = 0; i < num_qsets; i++) { 5020 node.node_teid = buf->rdma_qsets[i].qset_teid; 5021 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5022 &node, NULL); 5023 if (ret) 5024 break; 5025 qset_teid[i] = le32_to_cpu(node.node_teid); 5026 } 5027 rdma_error_exit: 5028 mutex_unlock(&pi->sched_lock); 5029 kfree(buf); 5030 return ret; 5031 } 5032 5033 /** 5034 * ice_dis_vsi_rdma_qset - free RDMA resources 5035 * @pi: port_info struct 5036 * @count: number of RDMA Qsets to free 5037 * @qset_teid: TEID of Qset node 5038 * @q_id: list of queue IDs being disabled 5039 */ 5040 int 5041 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5042 u16 *q_id) 5043 { 5044 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5045 u16 qg_size = __struct_size(qg_list); 5046 struct ice_hw *hw; 5047 int status = 0; 5048 int i; 5049 5050 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5051 return -EIO; 5052 5053 hw = pi->hw; 5054 5055 mutex_lock(&pi->sched_lock); 5056 5057 for (i = 0; i < count; i++) { 5058 struct ice_sched_node *node; 5059 5060 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5061 if (!node) 5062 continue; 5063 5064 qg_list->parent_teid = node->info.parent_teid; 5065 qg_list->num_qs = 1; 5066 qg_list->q_id[0] = 5067 cpu_to_le16(q_id[i] | 5068 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5069 5070 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5071 ICE_NO_RESET, 0, NULL); 5072 if (status) 5073 break; 5074 5075 ice_free_sched_node(pi, node); 5076 } 5077 5078 mutex_unlock(&pi->sched_lock); 5079 return status; 5080 } 5081 5082 /** 5083 * ice_aq_get_cgu_abilities - get cgu abilities 5084 * @hw: pointer to the HW struct 5085 * @abilities: CGU abilities 5086 * 5087 * Get CGU abilities (0x0C61) 5088 * Return: 0 on success or negative value on failure. 5089 */ 5090 int 5091 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5092 struct ice_aqc_get_cgu_abilities *abilities) 5093 { 5094 struct ice_aq_desc desc; 5095 5096 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5097 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5098 } 5099 5100 /** 5101 * ice_aq_set_input_pin_cfg - set input pin config 5102 * @hw: pointer to the HW struct 5103 * @input_idx: Input index 5104 * @flags1: Input flags 5105 * @flags2: Input flags 5106 * @freq: Frequency in Hz 5107 * @phase_delay: Delay in ps 5108 * 5109 * Set CGU input config (0x0C62) 5110 * Return: 0 on success or negative value on failure. 5111 */ 5112 int 5113 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5114 u32 freq, s32 phase_delay) 5115 { 5116 struct ice_aqc_set_cgu_input_config *cmd; 5117 struct ice_aq_desc desc; 5118 5119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5120 cmd = &desc.params.set_cgu_input_config; 5121 cmd->input_idx = input_idx; 5122 cmd->flags1 = flags1; 5123 cmd->flags2 = flags2; 5124 cmd->freq = cpu_to_le32(freq); 5125 cmd->phase_delay = cpu_to_le32(phase_delay); 5126 5127 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5128 } 5129 5130 /** 5131 * ice_aq_get_input_pin_cfg - get input pin config 5132 * @hw: pointer to the HW struct 5133 * @input_idx: Input index 5134 * @status: Pin status 5135 * @type: Pin type 5136 * @flags1: Input flags 5137 * @flags2: Input flags 5138 * @freq: Frequency in Hz 5139 * @phase_delay: Delay in ps 5140 * 5141 * Get CGU input config (0x0C63) 5142 * Return: 0 on success or negative value on failure. 5143 */ 5144 int 5145 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5146 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5147 { 5148 struct ice_aqc_get_cgu_input_config *cmd; 5149 struct ice_aq_desc desc; 5150 int ret; 5151 5152 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5153 cmd = &desc.params.get_cgu_input_config; 5154 cmd->input_idx = input_idx; 5155 5156 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5157 if (!ret) { 5158 if (status) 5159 *status = cmd->status; 5160 if (type) 5161 *type = cmd->type; 5162 if (flags1) 5163 *flags1 = cmd->flags1; 5164 if (flags2) 5165 *flags2 = cmd->flags2; 5166 if (freq) 5167 *freq = le32_to_cpu(cmd->freq); 5168 if (phase_delay) 5169 *phase_delay = le32_to_cpu(cmd->phase_delay); 5170 } 5171 5172 return ret; 5173 } 5174 5175 /** 5176 * ice_aq_set_output_pin_cfg - set output pin config 5177 * @hw: pointer to the HW struct 5178 * @output_idx: Output index 5179 * @flags: Output flags 5180 * @src_sel: Index of DPLL block 5181 * @freq: Output frequency 5182 * @phase_delay: Output phase compensation 5183 * 5184 * Set CGU output config (0x0C64) 5185 * Return: 0 on success or negative value on failure. 5186 */ 5187 int 5188 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5189 u8 src_sel, u32 freq, s32 phase_delay) 5190 { 5191 struct ice_aqc_set_cgu_output_config *cmd; 5192 struct ice_aq_desc desc; 5193 5194 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5195 cmd = &desc.params.set_cgu_output_config; 5196 cmd->output_idx = output_idx; 5197 cmd->flags = flags; 5198 cmd->src_sel = src_sel; 5199 cmd->freq = cpu_to_le32(freq); 5200 cmd->phase_delay = cpu_to_le32(phase_delay); 5201 5202 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5203 } 5204 5205 /** 5206 * ice_aq_get_output_pin_cfg - get output pin config 5207 * @hw: pointer to the HW struct 5208 * @output_idx: Output index 5209 * @flags: Output flags 5210 * @src_sel: Internal DPLL source 5211 * @freq: Output frequency 5212 * @src_freq: Source frequency 5213 * 5214 * Get CGU output config (0x0C65) 5215 * Return: 0 on success or negative value on failure. 5216 */ 5217 int 5218 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5219 u8 *src_sel, u32 *freq, u32 *src_freq) 5220 { 5221 struct ice_aqc_get_cgu_output_config *cmd; 5222 struct ice_aq_desc desc; 5223 int ret; 5224 5225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5226 cmd = &desc.params.get_cgu_output_config; 5227 cmd->output_idx = output_idx; 5228 5229 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5230 if (!ret) { 5231 if (flags) 5232 *flags = cmd->flags; 5233 if (src_sel) 5234 *src_sel = cmd->src_sel; 5235 if (freq) 5236 *freq = le32_to_cpu(cmd->freq); 5237 if (src_freq) 5238 *src_freq = le32_to_cpu(cmd->src_freq); 5239 } 5240 5241 return ret; 5242 } 5243 5244 /** 5245 * ice_aq_get_cgu_dpll_status - get dpll status 5246 * @hw: pointer to the HW struct 5247 * @dpll_num: DPLL index 5248 * @ref_state: Reference clock state 5249 * @config: current DPLL config 5250 * @dpll_state: current DPLL state 5251 * @phase_offset: Phase offset in ns 5252 * @eec_mode: EEC_mode 5253 * 5254 * Get CGU DPLL status (0x0C66) 5255 * Return: 0 on success or negative value on failure. 5256 */ 5257 int 5258 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5259 u8 *dpll_state, u8 *config, s64 *phase_offset, 5260 u8 *eec_mode) 5261 { 5262 struct ice_aqc_get_cgu_dpll_status *cmd; 5263 struct ice_aq_desc desc; 5264 int status; 5265 5266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5267 cmd = &desc.params.get_cgu_dpll_status; 5268 cmd->dpll_num = dpll_num; 5269 5270 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5271 if (!status) { 5272 *ref_state = cmd->ref_state; 5273 *dpll_state = cmd->dpll_state; 5274 *config = cmd->config; 5275 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5276 *phase_offset <<= 32; 5277 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5278 *phase_offset = sign_extend64(*phase_offset, 47); 5279 *eec_mode = cmd->eec_mode; 5280 } 5281 5282 return status; 5283 } 5284 5285 /** 5286 * ice_aq_set_cgu_dpll_config - set dpll config 5287 * @hw: pointer to the HW struct 5288 * @dpll_num: DPLL index 5289 * @ref_state: Reference clock state 5290 * @config: DPLL config 5291 * @eec_mode: EEC mode 5292 * 5293 * Set CGU DPLL config (0x0C67) 5294 * Return: 0 on success or negative value on failure. 5295 */ 5296 int 5297 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5298 u8 config, u8 eec_mode) 5299 { 5300 struct ice_aqc_set_cgu_dpll_config *cmd; 5301 struct ice_aq_desc desc; 5302 5303 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5304 cmd = &desc.params.set_cgu_dpll_config; 5305 cmd->dpll_num = dpll_num; 5306 cmd->ref_state = ref_state; 5307 cmd->config = config; 5308 cmd->eec_mode = eec_mode; 5309 5310 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5311 } 5312 5313 /** 5314 * ice_aq_set_cgu_ref_prio - set input reference priority 5315 * @hw: pointer to the HW struct 5316 * @dpll_num: DPLL index 5317 * @ref_idx: Reference pin index 5318 * @ref_priority: Reference input priority 5319 * 5320 * Set CGU reference priority (0x0C68) 5321 * Return: 0 on success or negative value on failure. 5322 */ 5323 int 5324 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5325 u8 ref_priority) 5326 { 5327 struct ice_aqc_set_cgu_ref_prio *cmd; 5328 struct ice_aq_desc desc; 5329 5330 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5331 cmd = &desc.params.set_cgu_ref_prio; 5332 cmd->dpll_num = dpll_num; 5333 cmd->ref_idx = ref_idx; 5334 cmd->ref_priority = ref_priority; 5335 5336 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5337 } 5338 5339 /** 5340 * ice_aq_get_cgu_ref_prio - get input reference priority 5341 * @hw: pointer to the HW struct 5342 * @dpll_num: DPLL index 5343 * @ref_idx: Reference pin index 5344 * @ref_prio: Reference input priority 5345 * 5346 * Get CGU reference priority (0x0C69) 5347 * Return: 0 on success or negative value on failure. 5348 */ 5349 int 5350 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5351 u8 *ref_prio) 5352 { 5353 struct ice_aqc_get_cgu_ref_prio *cmd; 5354 struct ice_aq_desc desc; 5355 int status; 5356 5357 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5358 cmd = &desc.params.get_cgu_ref_prio; 5359 cmd->dpll_num = dpll_num; 5360 cmd->ref_idx = ref_idx; 5361 5362 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5363 if (!status) 5364 *ref_prio = cmd->ref_priority; 5365 5366 return status; 5367 } 5368 5369 /** 5370 * ice_aq_get_cgu_info - get cgu info 5371 * @hw: pointer to the HW struct 5372 * @cgu_id: CGU ID 5373 * @cgu_cfg_ver: CGU config version 5374 * @cgu_fw_ver: CGU firmware version 5375 * 5376 * Get CGU info (0x0C6A) 5377 * Return: 0 on success or negative value on failure. 5378 */ 5379 int 5380 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5381 u32 *cgu_fw_ver) 5382 { 5383 struct ice_aqc_get_cgu_info *cmd; 5384 struct ice_aq_desc desc; 5385 int status; 5386 5387 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5388 cmd = &desc.params.get_cgu_info; 5389 5390 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5391 if (!status) { 5392 *cgu_id = le32_to_cpu(cmd->cgu_id); 5393 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5394 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5395 } 5396 5397 return status; 5398 } 5399 5400 /** 5401 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5402 * @hw: pointer to the HW struct 5403 * @phy_output: PHY reference clock output pin 5404 * @enable: GPIO state to be applied 5405 * @freq: PHY output frequency 5406 * 5407 * Set phy recovered clock as reference (0x0630) 5408 * Return: 0 on success or negative value on failure. 5409 */ 5410 int 5411 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5412 u32 *freq) 5413 { 5414 struct ice_aqc_set_phy_rec_clk_out *cmd; 5415 struct ice_aq_desc desc; 5416 int status; 5417 5418 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5419 cmd = &desc.params.set_phy_rec_clk_out; 5420 cmd->phy_output = phy_output; 5421 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5422 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5423 cmd->freq = cpu_to_le32(*freq); 5424 5425 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5426 if (!status) 5427 *freq = le32_to_cpu(cmd->freq); 5428 5429 return status; 5430 } 5431 5432 /** 5433 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5434 * @hw: pointer to the HW struct 5435 * @phy_output: PHY reference clock output pin 5436 * @port_num: Port number 5437 * @flags: PHY flags 5438 * @node_handle: PHY output frequency 5439 * 5440 * Get PHY recovered clock output info (0x0631) 5441 * Return: 0 on success or negative value on failure. 5442 */ 5443 int 5444 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5445 u8 *flags, u16 *node_handle) 5446 { 5447 struct ice_aqc_get_phy_rec_clk_out *cmd; 5448 struct ice_aq_desc desc; 5449 int status; 5450 5451 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5452 cmd = &desc.params.get_phy_rec_clk_out; 5453 cmd->phy_output = *phy_output; 5454 5455 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5456 if (!status) { 5457 *phy_output = cmd->phy_output; 5458 if (port_num) 5459 *port_num = cmd->port_num; 5460 if (flags) 5461 *flags = cmd->flags; 5462 if (node_handle) 5463 *node_handle = le16_to_cpu(cmd->node_handle); 5464 } 5465 5466 return status; 5467 } 5468 5469 /** 5470 * ice_aq_get_sensor_reading 5471 * @hw: pointer to the HW struct 5472 * @data: pointer to data to be read from the sensor 5473 * 5474 * Get sensor reading (0x0632) 5475 */ 5476 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5477 struct ice_aqc_get_sensor_reading_resp *data) 5478 { 5479 struct ice_aqc_get_sensor_reading *cmd; 5480 struct ice_aq_desc desc; 5481 int status; 5482 5483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5484 cmd = &desc.params.get_sensor_reading; 5485 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5486 #define ICE_INTERNAL_TEMP_SENSOR 0 5487 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5488 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5489 5490 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5491 if (!status) 5492 memcpy(data, &desc.params.get_sensor_reading_resp, 5493 sizeof(*data)); 5494 5495 return status; 5496 } 5497 5498 /** 5499 * ice_replay_pre_init - replay pre initialization 5500 * @hw: pointer to the HW struct 5501 * 5502 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5503 */ 5504 static int ice_replay_pre_init(struct ice_hw *hw) 5505 { 5506 struct ice_switch_info *sw = hw->switch_info; 5507 u8 i; 5508 5509 /* Delete old entries from replay filter list head if there is any */ 5510 ice_rm_all_sw_replay_rule_info(hw); 5511 /* In start of replay, move entries into replay_rules list, it 5512 * will allow adding rules entries back to filt_rules list, 5513 * which is operational list. 5514 */ 5515 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5516 list_replace_init(&sw->recp_list[i].filt_rules, 5517 &sw->recp_list[i].filt_replay_rules); 5518 ice_sched_replay_agg_vsi_preinit(hw); 5519 5520 return 0; 5521 } 5522 5523 /** 5524 * ice_replay_vsi - replay VSI configuration 5525 * @hw: pointer to the HW struct 5526 * @vsi_handle: driver VSI handle 5527 * 5528 * Restore all VSI configuration after reset. It is required to call this 5529 * function with main VSI first. 5530 */ 5531 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5532 { 5533 int status; 5534 5535 if (!ice_is_vsi_valid(hw, vsi_handle)) 5536 return -EINVAL; 5537 5538 /* Replay pre-initialization if there is any */ 5539 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5540 status = ice_replay_pre_init(hw); 5541 if (status) 5542 return status; 5543 } 5544 /* Replay per VSI all RSS configurations */ 5545 status = ice_replay_rss_cfg(hw, vsi_handle); 5546 if (status) 5547 return status; 5548 /* Replay per VSI all filters */ 5549 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5550 if (!status) 5551 status = ice_replay_vsi_agg(hw, vsi_handle); 5552 return status; 5553 } 5554 5555 /** 5556 * ice_replay_post - post replay configuration cleanup 5557 * @hw: pointer to the HW struct 5558 * 5559 * Post replay cleanup. 5560 */ 5561 void ice_replay_post(struct ice_hw *hw) 5562 { 5563 /* Delete old entries from replay filter list head */ 5564 ice_rm_all_sw_replay_rule_info(hw); 5565 ice_sched_replay_agg(hw); 5566 } 5567 5568 /** 5569 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5570 * @hw: ptr to the hardware info 5571 * @reg: offset of 64 bit HW register to read from 5572 * @prev_stat_loaded: bool to specify if previous stats are loaded 5573 * @prev_stat: ptr to previous loaded stat value 5574 * @cur_stat: ptr to current stat value 5575 */ 5576 void 5577 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5578 u64 *prev_stat, u64 *cur_stat) 5579 { 5580 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5581 5582 /* device stats are not reset at PFR, they likely will not be zeroed 5583 * when the driver starts. Thus, save the value from the first read 5584 * without adding to the statistic value so that we report stats which 5585 * count up from zero. 5586 */ 5587 if (!prev_stat_loaded) { 5588 *prev_stat = new_data; 5589 return; 5590 } 5591 5592 /* Calculate the difference between the new and old values, and then 5593 * add it to the software stat value. 5594 */ 5595 if (new_data >= *prev_stat) 5596 *cur_stat += new_data - *prev_stat; 5597 else 5598 /* to manage the potential roll-over */ 5599 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5600 5601 /* Update the previously stored value to prepare for next read */ 5602 *prev_stat = new_data; 5603 } 5604 5605 /** 5606 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5607 * @hw: ptr to the hardware info 5608 * @reg: offset of HW register to read from 5609 * @prev_stat_loaded: bool to specify if previous stats are loaded 5610 * @prev_stat: ptr to previous loaded stat value 5611 * @cur_stat: ptr to current stat value 5612 */ 5613 void 5614 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5615 u64 *prev_stat, u64 *cur_stat) 5616 { 5617 u32 new_data; 5618 5619 new_data = rd32(hw, reg); 5620 5621 /* device stats are not reset at PFR, they likely will not be zeroed 5622 * when the driver starts. Thus, save the value from the first read 5623 * without adding to the statistic value so that we report stats which 5624 * count up from zero. 5625 */ 5626 if (!prev_stat_loaded) { 5627 *prev_stat = new_data; 5628 return; 5629 } 5630 5631 /* Calculate the difference between the new and old values, and then 5632 * add it to the software stat value. 5633 */ 5634 if (new_data >= *prev_stat) 5635 *cur_stat += new_data - *prev_stat; 5636 else 5637 /* to manage the potential roll-over */ 5638 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5639 5640 /* Update the previously stored value to prepare for next read */ 5641 *prev_stat = new_data; 5642 } 5643 5644 /** 5645 * ice_sched_query_elem - query element information from HW 5646 * @hw: pointer to the HW struct 5647 * @node_teid: node TEID to be queried 5648 * @buf: buffer to element information 5649 * 5650 * This function queries HW element information 5651 */ 5652 int 5653 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5654 struct ice_aqc_txsched_elem_data *buf) 5655 { 5656 u16 buf_size, num_elem_ret = 0; 5657 int status; 5658 5659 buf_size = sizeof(*buf); 5660 memset(buf, 0, buf_size); 5661 buf->node_teid = cpu_to_le32(node_teid); 5662 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5663 NULL); 5664 if (status || num_elem_ret != 1) 5665 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5666 return status; 5667 } 5668 5669 /** 5670 * ice_aq_read_i2c 5671 * @hw: pointer to the hw struct 5672 * @topo_addr: topology address for a device to communicate with 5673 * @bus_addr: 7-bit I2C bus address 5674 * @addr: I2C memory address (I2C offset) with up to 16 bits 5675 * @params: I2C parameters: bit [7] - Repeated start, 5676 * bits [6:5] data offset size, 5677 * bit [4] - I2C address type, 5678 * bits [3:0] - data size to read (0-16 bytes) 5679 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5680 * @cd: pointer to command details structure or NULL 5681 * 5682 * Read I2C (0x06E2) 5683 */ 5684 int 5685 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5686 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5687 struct ice_sq_cd *cd) 5688 { 5689 struct ice_aq_desc desc = { 0 }; 5690 struct ice_aqc_i2c *cmd; 5691 u8 data_size; 5692 int status; 5693 5694 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5695 cmd = &desc.params.read_write_i2c; 5696 5697 if (!data) 5698 return -EINVAL; 5699 5700 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5701 5702 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5703 cmd->topo_addr = topo_addr; 5704 cmd->i2c_params = params; 5705 cmd->i2c_addr = addr; 5706 5707 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5708 if (!status) { 5709 struct ice_aqc_read_i2c_resp *resp; 5710 u8 i; 5711 5712 resp = &desc.params.read_i2c_resp; 5713 for (i = 0; i < data_size; i++) { 5714 *data = resp->i2c_data[i]; 5715 data++; 5716 } 5717 } 5718 5719 return status; 5720 } 5721 5722 /** 5723 * ice_aq_write_i2c 5724 * @hw: pointer to the hw struct 5725 * @topo_addr: topology address for a device to communicate with 5726 * @bus_addr: 7-bit I2C bus address 5727 * @addr: I2C memory address (I2C offset) with up to 16 bits 5728 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5729 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5730 * @cd: pointer to command details structure or NULL 5731 * 5732 * Write I2C (0x06E3) 5733 * 5734 * * Return: 5735 * * 0 - Successful write to the i2c device 5736 * * -EINVAL - Data size greater than 4 bytes 5737 * * -EIO - FW error 5738 */ 5739 int 5740 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5741 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5742 struct ice_sq_cd *cd) 5743 { 5744 struct ice_aq_desc desc = { 0 }; 5745 struct ice_aqc_i2c *cmd; 5746 u8 data_size; 5747 5748 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5749 cmd = &desc.params.read_write_i2c; 5750 5751 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5752 5753 /* data_size limited to 4 */ 5754 if (data_size > 4) 5755 return -EINVAL; 5756 5757 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5758 cmd->topo_addr = topo_addr; 5759 cmd->i2c_params = params; 5760 cmd->i2c_addr = addr; 5761 5762 memcpy(cmd->i2c_data, data, data_size); 5763 5764 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5765 } 5766 5767 /** 5768 * ice_aq_set_gpio 5769 * @hw: pointer to the hw struct 5770 * @gpio_ctrl_handle: GPIO controller node handle 5771 * @pin_idx: IO Number of the GPIO that needs to be set 5772 * @value: SW provide IO value to set in the LSB 5773 * @cd: pointer to command details structure or NULL 5774 * 5775 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5776 */ 5777 int 5778 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5779 struct ice_sq_cd *cd) 5780 { 5781 struct ice_aqc_gpio *cmd; 5782 struct ice_aq_desc desc; 5783 5784 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5785 cmd = &desc.params.read_write_gpio; 5786 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5787 cmd->gpio_num = pin_idx; 5788 cmd->gpio_val = value ? 1 : 0; 5789 5790 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5791 } 5792 5793 /** 5794 * ice_aq_get_gpio 5795 * @hw: pointer to the hw struct 5796 * @gpio_ctrl_handle: GPIO controller node handle 5797 * @pin_idx: IO Number of the GPIO that needs to be set 5798 * @value: IO value read 5799 * @cd: pointer to command details structure or NULL 5800 * 5801 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5802 * the topology 5803 */ 5804 int 5805 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5806 bool *value, struct ice_sq_cd *cd) 5807 { 5808 struct ice_aqc_gpio *cmd; 5809 struct ice_aq_desc desc; 5810 int status; 5811 5812 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5813 cmd = &desc.params.read_write_gpio; 5814 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5815 cmd->gpio_num = pin_idx; 5816 5817 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5818 if (status) 5819 return status; 5820 5821 *value = !!cmd->gpio_val; 5822 return 0; 5823 } 5824 5825 /** 5826 * ice_is_fw_api_min_ver 5827 * @hw: pointer to the hardware structure 5828 * @maj: major version 5829 * @min: minor version 5830 * @patch: patch version 5831 * 5832 * Checks if the firmware API is minimum version 5833 */ 5834 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5835 { 5836 if (hw->api_maj_ver == maj) { 5837 if (hw->api_min_ver > min) 5838 return true; 5839 if (hw->api_min_ver == min && hw->api_patch >= patch) 5840 return true; 5841 } else if (hw->api_maj_ver > maj) { 5842 return true; 5843 } 5844 5845 return false; 5846 } 5847 5848 /** 5849 * ice_fw_supports_link_override 5850 * @hw: pointer to the hardware structure 5851 * 5852 * Checks if the firmware supports link override 5853 */ 5854 bool ice_fw_supports_link_override(struct ice_hw *hw) 5855 { 5856 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5857 ICE_FW_API_LINK_OVERRIDE_MIN, 5858 ICE_FW_API_LINK_OVERRIDE_PATCH); 5859 } 5860 5861 /** 5862 * ice_get_link_default_override 5863 * @ldo: pointer to the link default override struct 5864 * @pi: pointer to the port info struct 5865 * 5866 * Gets the link default override for a port 5867 */ 5868 int 5869 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5870 struct ice_port_info *pi) 5871 { 5872 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5873 struct ice_hw *hw = pi->hw; 5874 int status; 5875 5876 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5877 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5878 if (status) { 5879 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5880 return status; 5881 } 5882 5883 /* Each port has its own config; calculate for our port */ 5884 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5885 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5886 5887 /* link options first */ 5888 status = ice_read_sr_word(hw, tlv_start, &buf); 5889 if (status) { 5890 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5891 return status; 5892 } 5893 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5894 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5895 ICE_LINK_OVERRIDE_PHY_CFG_S; 5896 5897 /* link PHY config */ 5898 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5899 status = ice_read_sr_word(hw, offset, &buf); 5900 if (status) { 5901 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5902 return status; 5903 } 5904 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5905 5906 /* PHY types low */ 5907 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5908 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5909 status = ice_read_sr_word(hw, (offset + i), &buf); 5910 if (status) { 5911 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5912 return status; 5913 } 5914 /* shift 16 bits at a time to fill 64 bits */ 5915 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5916 } 5917 5918 /* PHY types high */ 5919 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5920 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5921 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5922 status = ice_read_sr_word(hw, (offset + i), &buf); 5923 if (status) { 5924 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5925 return status; 5926 } 5927 /* shift 16 bits at a time to fill 64 bits */ 5928 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5929 } 5930 5931 return status; 5932 } 5933 5934 /** 5935 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5936 * @caps: get PHY capability data 5937 */ 5938 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5939 { 5940 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5941 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5942 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5943 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5944 return true; 5945 5946 return false; 5947 } 5948 5949 /** 5950 * ice_is_fw_health_report_supported - checks if firmware supports health events 5951 * @hw: pointer to the hardware structure 5952 * 5953 * Return: true if firmware supports health status reports, 5954 * false otherwise 5955 */ 5956 bool ice_is_fw_health_report_supported(struct ice_hw *hw) 5957 { 5958 return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, 5959 ICE_FW_API_HEALTH_REPORT_MIN, 5960 ICE_FW_API_HEALTH_REPORT_PATCH); 5961 } 5962 5963 /** 5964 * ice_aq_set_health_status_cfg - Configure FW health events 5965 * @hw: pointer to the HW struct 5966 * @event_source: type of diagnostic events to enable 5967 * 5968 * Configure the health status event types that the firmware will send to this 5969 * PF. The supported event types are: PF-specific, all PFs, and global. 5970 * 5971 * Return: 0 on success, negative error code otherwise. 5972 */ 5973 int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) 5974 { 5975 struct ice_aqc_set_health_status_cfg *cmd; 5976 struct ice_aq_desc desc; 5977 5978 cmd = &desc.params.set_health_status_cfg; 5979 5980 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); 5981 5982 cmd->event_source = event_source; 5983 5984 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5985 } 5986 5987 /** 5988 * ice_aq_set_lldp_mib - Set the LLDP MIB 5989 * @hw: pointer to the HW struct 5990 * @mib_type: Local, Remote or both Local and Remote MIBs 5991 * @buf: pointer to the caller-supplied buffer to store the MIB block 5992 * @buf_size: size of the buffer (in bytes) 5993 * @cd: pointer to command details structure or NULL 5994 * 5995 * Set the LLDP MIB. (0x0A08) 5996 */ 5997 int 5998 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5999 struct ice_sq_cd *cd) 6000 { 6001 struct ice_aqc_lldp_set_local_mib *cmd; 6002 struct ice_aq_desc desc; 6003 6004 cmd = &desc.params.lldp_set_mib; 6005 6006 if (buf_size == 0 || !buf) 6007 return -EINVAL; 6008 6009 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6010 6011 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 6012 desc.datalen = cpu_to_le16(buf_size); 6013 6014 cmd->type = mib_type; 6015 cmd->length = cpu_to_le16(buf_size); 6016 6017 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6018 } 6019 6020 /** 6021 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6022 * @hw: pointer to HW struct 6023 */ 6024 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6025 { 6026 if (hw->mac_type != ICE_MAC_E810) 6027 return false; 6028 6029 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6030 ICE_FW_API_LLDP_FLTR_MIN, 6031 ICE_FW_API_LLDP_FLTR_PATCH); 6032 } 6033 6034 /** 6035 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6036 * @hw: pointer to HW struct 6037 * @vsi_num: absolute HW index for VSI 6038 * @add: boolean for if adding or removing a filter 6039 */ 6040 int 6041 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6042 { 6043 struct ice_aqc_lldp_filter_ctrl *cmd; 6044 struct ice_aq_desc desc; 6045 6046 cmd = &desc.params.lldp_filter_ctrl; 6047 6048 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6049 6050 if (add) 6051 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6052 else 6053 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6054 6055 cmd->vsi_num = cpu_to_le16(vsi_num); 6056 6057 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6058 } 6059 6060 /** 6061 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6062 * @hw: pointer to HW struct 6063 */ 6064 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6065 { 6066 struct ice_aq_desc desc; 6067 6068 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6069 6070 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6071 } 6072 6073 /** 6074 * ice_fw_supports_report_dflt_cfg 6075 * @hw: pointer to the hardware structure 6076 * 6077 * Checks if the firmware supports report default configuration 6078 */ 6079 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6080 { 6081 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6082 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6083 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6084 } 6085 6086 /* each of the indexes into the following array match the speed of a return 6087 * value from the list of AQ returned speeds like the range: 6088 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6089 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6090 * array. The array is defined as 15 elements long because the link_speed 6091 * returned by the firmware is a 16 bit * value, but is indexed 6092 * by [fls(speed) - 1] 6093 */ 6094 static const u32 ice_aq_to_link_speed[] = { 6095 SPEED_10, /* BIT(0) */ 6096 SPEED_100, 6097 SPEED_1000, 6098 SPEED_2500, 6099 SPEED_5000, 6100 SPEED_10000, 6101 SPEED_20000, 6102 SPEED_25000, 6103 SPEED_40000, 6104 SPEED_50000, 6105 SPEED_100000, /* BIT(10) */ 6106 SPEED_200000, 6107 }; 6108 6109 /** 6110 * ice_get_link_speed - get integer speed from table 6111 * @index: array index from fls(aq speed) - 1 6112 * 6113 * Returns: u32 value containing integer speed 6114 */ 6115 u32 ice_get_link_speed(u16 index) 6116 { 6117 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6118 return 0; 6119 6120 return ice_aq_to_link_speed[index]; 6121 } 6122