1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830CC_BACKPLANE: 164 case ICE_DEV_ID_E830CC_QSFP56: 165 case ICE_DEV_ID_E830CC_SFP: 166 case ICE_DEV_ID_E830CC_SFP_DD: 167 case ICE_DEV_ID_E830C_BACKPLANE: 168 case ICE_DEV_ID_E830_XXV_BACKPLANE: 169 case ICE_DEV_ID_E830C_QSFP: 170 case ICE_DEV_ID_E830_XXV_QSFP: 171 case ICE_DEV_ID_E830C_SFP: 172 case ICE_DEV_ID_E830_XXV_SFP: 173 hw->mac_type = ICE_MAC_E830; 174 break; 175 default: 176 hw->mac_type = ICE_MAC_UNKNOWN; 177 break; 178 } 179 180 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 181 return 0; 182 } 183 184 /** 185 * ice_is_generic_mac - check if device's mac_type is generic 186 * @hw: pointer to the hardware structure 187 * 188 * Return: true if mac_type is generic (with SBQ support), false if not 189 */ 190 bool ice_is_generic_mac(struct ice_hw *hw) 191 { 192 return (hw->mac_type == ICE_MAC_GENERIC || 193 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 194 } 195 196 /** 197 * ice_is_e810 198 * @hw: pointer to the hardware structure 199 * 200 * returns true if the device is E810 based, false if not. 201 */ 202 bool ice_is_e810(struct ice_hw *hw) 203 { 204 return hw->mac_type == ICE_MAC_E810; 205 } 206 207 /** 208 * ice_is_e810t 209 * @hw: pointer to the hardware structure 210 * 211 * returns true if the device is E810T based, false if not. 212 */ 213 bool ice_is_e810t(struct ice_hw *hw) 214 { 215 switch (hw->device_id) { 216 case ICE_DEV_ID_E810C_SFP: 217 switch (hw->subsystem_device_id) { 218 case ICE_SUBDEV_ID_E810T: 219 case ICE_SUBDEV_ID_E810T2: 220 case ICE_SUBDEV_ID_E810T3: 221 case ICE_SUBDEV_ID_E810T4: 222 case ICE_SUBDEV_ID_E810T6: 223 case ICE_SUBDEV_ID_E810T7: 224 return true; 225 } 226 break; 227 case ICE_DEV_ID_E810C_QSFP: 228 switch (hw->subsystem_device_id) { 229 case ICE_SUBDEV_ID_E810T2: 230 case ICE_SUBDEV_ID_E810T3: 231 case ICE_SUBDEV_ID_E810T5: 232 return true; 233 } 234 break; 235 default: 236 break; 237 } 238 239 return false; 240 } 241 242 /** 243 * ice_is_e822 - Check if a device is E822 family device 244 * @hw: pointer to the hardware structure 245 * 246 * Return: true if the device is E822 based, false if not. 247 */ 248 bool ice_is_e822(struct ice_hw *hw) 249 { 250 switch (hw->device_id) { 251 case ICE_DEV_ID_E822C_BACKPLANE: 252 case ICE_DEV_ID_E822C_QSFP: 253 case ICE_DEV_ID_E822C_SFP: 254 case ICE_DEV_ID_E822C_10G_BASE_T: 255 case ICE_DEV_ID_E822C_SGMII: 256 case ICE_DEV_ID_E822L_BACKPLANE: 257 case ICE_DEV_ID_E822L_SFP: 258 case ICE_DEV_ID_E822L_10G_BASE_T: 259 case ICE_DEV_ID_E822L_SGMII: 260 return true; 261 default: 262 return false; 263 } 264 } 265 266 /** 267 * ice_is_e823 268 * @hw: pointer to the hardware structure 269 * 270 * returns true if the device is E823-L or E823-C based, false if not. 271 */ 272 bool ice_is_e823(struct ice_hw *hw) 273 { 274 switch (hw->device_id) { 275 case ICE_DEV_ID_E823L_BACKPLANE: 276 case ICE_DEV_ID_E823L_SFP: 277 case ICE_DEV_ID_E823L_10G_BASE_T: 278 case ICE_DEV_ID_E823L_1GBE: 279 case ICE_DEV_ID_E823L_QSFP: 280 case ICE_DEV_ID_E823C_BACKPLANE: 281 case ICE_DEV_ID_E823C_QSFP: 282 case ICE_DEV_ID_E823C_SFP: 283 case ICE_DEV_ID_E823C_10G_BASE_T: 284 case ICE_DEV_ID_E823C_SGMII: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 /** 292 * ice_is_e825c - Check if a device is E825C family device 293 * @hw: pointer to the hardware structure 294 * 295 * Return: true if the device is E825-C based, false if not. 296 */ 297 bool ice_is_e825c(struct ice_hw *hw) 298 { 299 switch (hw->device_id) { 300 case ICE_DEV_ID_E825C_BACKPLANE: 301 case ICE_DEV_ID_E825C_QSFP: 302 case ICE_DEV_ID_E825C_SFP: 303 case ICE_DEV_ID_E825C_SGMII: 304 return true; 305 default: 306 return false; 307 } 308 } 309 310 /** 311 * ice_clear_pf_cfg - Clear PF configuration 312 * @hw: pointer to the hardware structure 313 * 314 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 315 * configuration, flow director filters, etc.). 316 */ 317 int ice_clear_pf_cfg(struct ice_hw *hw) 318 { 319 struct ice_aq_desc desc; 320 321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 322 323 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 324 } 325 326 /** 327 * ice_aq_manage_mac_read - manage MAC address read command 328 * @hw: pointer to the HW struct 329 * @buf: a virtual buffer to hold the manage MAC read response 330 * @buf_size: Size of the virtual buffer 331 * @cd: pointer to command details structure or NULL 332 * 333 * This function is used to return per PF station MAC address (0x0107). 334 * NOTE: Upon successful completion of this command, MAC address information 335 * is returned in user specified buffer. Please interpret user specified 336 * buffer as "manage_mac_read" response. 337 * Response such as various MAC addresses are stored in HW struct (port.mac) 338 * ice_discover_dev_caps is expected to be called before this function is 339 * called. 340 */ 341 static int 342 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 343 struct ice_sq_cd *cd) 344 { 345 struct ice_aqc_manage_mac_read_resp *resp; 346 struct ice_aqc_manage_mac_read *cmd; 347 struct ice_aq_desc desc; 348 int status; 349 u16 flags; 350 u8 i; 351 352 cmd = &desc.params.mac_read; 353 354 if (buf_size < sizeof(*resp)) 355 return -EINVAL; 356 357 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 358 359 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 360 if (status) 361 return status; 362 363 resp = buf; 364 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 365 366 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 367 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 368 return -EIO; 369 } 370 371 /* A single port can report up to two (LAN and WoL) addresses */ 372 for (i = 0; i < cmd->num_addr; i++) 373 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 374 ether_addr_copy(hw->port_info->mac.lan_addr, 375 resp[i].mac_addr); 376 ether_addr_copy(hw->port_info->mac.perm_addr, 377 resp[i].mac_addr); 378 break; 379 } 380 381 return 0; 382 } 383 384 /** 385 * ice_aq_get_phy_caps - returns PHY capabilities 386 * @pi: port information structure 387 * @qual_mods: report qualified modules 388 * @report_mode: report mode capabilities 389 * @pcaps: structure for PHY capabilities to be filled 390 * @cd: pointer to command details structure or NULL 391 * 392 * Returns the various PHY capabilities supported on the Port (0x0600) 393 */ 394 int 395 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 396 struct ice_aqc_get_phy_caps_data *pcaps, 397 struct ice_sq_cd *cd) 398 { 399 struct ice_aqc_get_phy_caps *cmd; 400 u16 pcaps_size = sizeof(*pcaps); 401 struct ice_aq_desc desc; 402 const char *prefix; 403 struct ice_hw *hw; 404 int status; 405 406 cmd = &desc.params.get_phy; 407 408 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 409 return -EINVAL; 410 hw = pi->hw; 411 412 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 413 !ice_fw_supports_report_dflt_cfg(hw)) 414 return -EINVAL; 415 416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 417 418 if (qual_mods) 419 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 420 421 cmd->param0 |= cpu_to_le16(report_mode); 422 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 423 424 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 425 426 switch (report_mode) { 427 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 428 prefix = "phy_caps_media"; 429 break; 430 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 431 prefix = "phy_caps_no_media"; 432 break; 433 case ICE_AQC_REPORT_ACTIVE_CFG: 434 prefix = "phy_caps_active"; 435 break; 436 case ICE_AQC_REPORT_DFLT_CFG: 437 prefix = "phy_caps_default"; 438 break; 439 default: 440 prefix = "phy_caps_invalid"; 441 } 442 443 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 444 le64_to_cpu(pcaps->phy_type_high), prefix); 445 446 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 447 prefix, report_mode); 448 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 449 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 450 pcaps->low_power_ctrl_an); 451 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 452 pcaps->eee_cap); 453 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 454 pcaps->eeer_value); 455 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 456 pcaps->link_fec_options); 457 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 458 prefix, pcaps->module_compliance_enforcement); 459 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 460 prefix, pcaps->extended_compliance_code); 461 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 462 pcaps->module_type[0]); 463 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 464 pcaps->module_type[1]); 465 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 466 pcaps->module_type[2]); 467 468 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 469 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 470 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 471 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 472 sizeof(pi->phy.link_info.module_type)); 473 } 474 475 return status; 476 } 477 478 /** 479 * ice_aq_get_link_topo_handle - get link topology node return status 480 * @pi: port information structure 481 * @node_type: requested node type 482 * @cd: pointer to command details structure or NULL 483 * 484 * Get link topology node return status for specified node type (0x06E0) 485 * 486 * Node type cage can be used to determine if cage is present. If AQC 487 * returns error (ENOENT), then no cage present. If no cage present, then 488 * connection type is backplane or BASE-T. 489 */ 490 static int 491 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 492 struct ice_sq_cd *cd) 493 { 494 struct ice_aqc_get_link_topo *cmd; 495 struct ice_aq_desc desc; 496 497 cmd = &desc.params.get_link_topo; 498 499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 500 501 cmd->addr.topo_params.node_type_ctx = 502 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 503 ICE_AQC_LINK_TOPO_NODE_CTX_S); 504 505 /* set node type */ 506 cmd->addr.topo_params.node_type_ctx |= 507 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 508 509 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 510 } 511 512 /** 513 * ice_aq_get_netlist_node 514 * @hw: pointer to the hw struct 515 * @cmd: get_link_topo AQ structure 516 * @node_part_number: output node part number if node found 517 * @node_handle: output node handle parameter if node found 518 * 519 * Get netlist node handle. 520 */ 521 int 522 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 523 u8 *node_part_number, u16 *node_handle) 524 { 525 struct ice_aq_desc desc; 526 527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 528 desc.params.get_link_topo = *cmd; 529 530 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 531 return -EINTR; 532 533 if (node_handle) 534 *node_handle = 535 le16_to_cpu(desc.params.get_link_topo.addr.handle); 536 if (node_part_number) 537 *node_part_number = desc.params.get_link_topo.node_part_num; 538 539 return 0; 540 } 541 542 /** 543 * ice_find_netlist_node 544 * @hw: pointer to the hw struct 545 * @node_type_ctx: type of netlist node to look for 546 * @node_part_number: node part number to look for 547 * @node_handle: output parameter if node found - optional 548 * 549 * Scan the netlist for a node handle of the given node type and part number. 550 * 551 * If node_handle is non-NULL it will be modified on function exit. It is only 552 * valid if the function returns zero, and should be ignored on any non-zero 553 * return value. 554 * 555 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 556 * a negative error code on failure to access the AQ. 557 */ 558 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 559 u8 node_part_number, u16 *node_handle) 560 { 561 u8 idx; 562 563 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 564 struct ice_aqc_get_link_topo cmd = {}; 565 u8 rec_node_part_number; 566 int status; 567 568 cmd.addr.topo_params.node_type_ctx = 569 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 570 node_type_ctx); 571 cmd.addr.topo_params.index = idx; 572 573 status = ice_aq_get_netlist_node(hw, &cmd, 574 &rec_node_part_number, 575 node_handle); 576 if (status) 577 return status; 578 579 if (rec_node_part_number == node_part_number) 580 return 0; 581 } 582 583 return -ENOENT; 584 } 585 586 /** 587 * ice_is_media_cage_present 588 * @pi: port information structure 589 * 590 * Returns true if media cage is present, else false. If no cage, then 591 * media type is backplane or BASE-T. 592 */ 593 static bool ice_is_media_cage_present(struct ice_port_info *pi) 594 { 595 /* Node type cage can be used to determine if cage is present. If AQC 596 * returns error (ENOENT), then no cage present. If no cage present then 597 * connection type is backplane or BASE-T. 598 */ 599 return !ice_aq_get_link_topo_handle(pi, 600 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 601 NULL); 602 } 603 604 /** 605 * ice_get_media_type - Gets media type 606 * @pi: port information structure 607 */ 608 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 609 { 610 struct ice_link_status *hw_link_info; 611 612 if (!pi) 613 return ICE_MEDIA_UNKNOWN; 614 615 hw_link_info = &pi->phy.link_info; 616 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 617 /* If more than one media type is selected, report unknown */ 618 return ICE_MEDIA_UNKNOWN; 619 620 if (hw_link_info->phy_type_low) { 621 /* 1G SGMII is a special case where some DA cable PHYs 622 * may show this as an option when it really shouldn't 623 * be since SGMII is meant to be between a MAC and a PHY 624 * in a backplane. Try to detect this case and handle it 625 */ 626 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 627 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 628 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 629 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 630 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 631 return ICE_MEDIA_DA; 632 633 switch (hw_link_info->phy_type_low) { 634 case ICE_PHY_TYPE_LOW_1000BASE_SX: 635 case ICE_PHY_TYPE_LOW_1000BASE_LX: 636 case ICE_PHY_TYPE_LOW_10GBASE_SR: 637 case ICE_PHY_TYPE_LOW_10GBASE_LR: 638 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 639 case ICE_PHY_TYPE_LOW_25GBASE_SR: 640 case ICE_PHY_TYPE_LOW_25GBASE_LR: 641 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 642 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 644 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 645 case ICE_PHY_TYPE_LOW_50GBASE_SR: 646 case ICE_PHY_TYPE_LOW_50GBASE_FR: 647 case ICE_PHY_TYPE_LOW_50GBASE_LR: 648 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 649 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 650 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 651 case ICE_PHY_TYPE_LOW_100GBASE_DR: 652 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 653 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 654 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 655 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 656 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 657 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 658 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 659 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 660 return ICE_MEDIA_FIBER; 661 case ICE_PHY_TYPE_LOW_100BASE_TX: 662 case ICE_PHY_TYPE_LOW_1000BASE_T: 663 case ICE_PHY_TYPE_LOW_2500BASE_T: 664 case ICE_PHY_TYPE_LOW_5GBASE_T: 665 case ICE_PHY_TYPE_LOW_10GBASE_T: 666 case ICE_PHY_TYPE_LOW_25GBASE_T: 667 return ICE_MEDIA_BASET; 668 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 669 case ICE_PHY_TYPE_LOW_25GBASE_CR: 670 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 671 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 672 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 673 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 674 case ICE_PHY_TYPE_LOW_50GBASE_CP: 675 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 676 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 677 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 678 return ICE_MEDIA_DA; 679 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 680 case ICE_PHY_TYPE_LOW_40G_XLAUI: 681 case ICE_PHY_TYPE_LOW_50G_LAUI2: 682 case ICE_PHY_TYPE_LOW_50G_AUI2: 683 case ICE_PHY_TYPE_LOW_50G_AUI1: 684 case ICE_PHY_TYPE_LOW_100G_AUI4: 685 case ICE_PHY_TYPE_LOW_100G_CAUI4: 686 if (ice_is_media_cage_present(pi)) 687 return ICE_MEDIA_DA; 688 fallthrough; 689 case ICE_PHY_TYPE_LOW_1000BASE_KX: 690 case ICE_PHY_TYPE_LOW_2500BASE_KX: 691 case ICE_PHY_TYPE_LOW_2500BASE_X: 692 case ICE_PHY_TYPE_LOW_5GBASE_KR: 693 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 694 case ICE_PHY_TYPE_LOW_25GBASE_KR: 695 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 696 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 697 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 698 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 699 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 700 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 701 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 702 return ICE_MEDIA_BACKPLANE; 703 } 704 } else { 705 switch (hw_link_info->phy_type_high) { 706 case ICE_PHY_TYPE_HIGH_100G_AUI2: 707 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 708 if (ice_is_media_cage_present(pi)) 709 return ICE_MEDIA_DA; 710 fallthrough; 711 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 712 return ICE_MEDIA_BACKPLANE; 713 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 714 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 715 return ICE_MEDIA_FIBER; 716 } 717 } 718 return ICE_MEDIA_UNKNOWN; 719 } 720 721 /** 722 * ice_get_link_status_datalen 723 * @hw: pointer to the HW struct 724 * 725 * Returns datalength for the Get Link Status AQ command, which is bigger for 726 * newer adapter families handled by ice driver. 727 */ 728 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 729 { 730 switch (hw->mac_type) { 731 case ICE_MAC_E830: 732 return ICE_AQC_LS_DATA_SIZE_V2; 733 case ICE_MAC_E810: 734 default: 735 return ICE_AQC_LS_DATA_SIZE_V1; 736 } 737 } 738 739 /** 740 * ice_aq_get_link_info 741 * @pi: port information structure 742 * @ena_lse: enable/disable LinkStatusEvent reporting 743 * @link: pointer to link status structure - optional 744 * @cd: pointer to command details structure or NULL 745 * 746 * Get Link Status (0x607). Returns the link status of the adapter. 747 */ 748 int 749 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 750 struct ice_link_status *link, struct ice_sq_cd *cd) 751 { 752 struct ice_aqc_get_link_status_data link_data = { 0 }; 753 struct ice_aqc_get_link_status *resp; 754 struct ice_link_status *li_old, *li; 755 enum ice_media_type *hw_media_type; 756 struct ice_fc_info *hw_fc_info; 757 bool tx_pause, rx_pause; 758 struct ice_aq_desc desc; 759 struct ice_hw *hw; 760 u16 cmd_flags; 761 int status; 762 763 if (!pi) 764 return -EINVAL; 765 hw = pi->hw; 766 li_old = &pi->phy.link_info_old; 767 hw_media_type = &pi->phy.media_type; 768 li = &pi->phy.link_info; 769 hw_fc_info = &pi->fc; 770 771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 772 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 773 resp = &desc.params.get_link_status; 774 resp->cmd_flags = cpu_to_le16(cmd_flags); 775 resp->lport_num = pi->lport; 776 777 status = ice_aq_send_cmd(hw, &desc, &link_data, 778 ice_get_link_status_datalen(hw), cd); 779 if (status) 780 return status; 781 782 /* save off old link status information */ 783 *li_old = *li; 784 785 /* update current link status information */ 786 li->link_speed = le16_to_cpu(link_data.link_speed); 787 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 788 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 789 *hw_media_type = ice_get_media_type(pi); 790 li->link_info = link_data.link_info; 791 li->link_cfg_err = link_data.link_cfg_err; 792 li->an_info = link_data.an_info; 793 li->ext_info = link_data.ext_info; 794 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 795 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 796 li->topo_media_conflict = link_data.topo_media_conflict; 797 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 798 ICE_AQ_CFG_PACING_TYPE_M); 799 800 /* update fc info */ 801 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 802 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 803 if (tx_pause && rx_pause) 804 hw_fc_info->current_mode = ICE_FC_FULL; 805 else if (tx_pause) 806 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 807 else if (rx_pause) 808 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 809 else 810 hw_fc_info->current_mode = ICE_FC_NONE; 811 812 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 813 814 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 815 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 816 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 817 (unsigned long long)li->phy_type_low); 818 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 819 (unsigned long long)li->phy_type_high); 820 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 821 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 822 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 823 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 824 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 825 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 826 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 827 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 828 li->max_frame_size); 829 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 830 831 /* save link status information */ 832 if (link) 833 *link = *li; 834 835 /* flag cleared so calling functions don't call AQ again */ 836 pi->phy.get_link_info = false; 837 838 return 0; 839 } 840 841 /** 842 * ice_fill_tx_timer_and_fc_thresh 843 * @hw: pointer to the HW struct 844 * @cmd: pointer to MAC cfg structure 845 * 846 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 847 * descriptor 848 */ 849 static void 850 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 851 struct ice_aqc_set_mac_cfg *cmd) 852 { 853 u32 val, fc_thres_m; 854 855 /* We read back the transmit timer and FC threshold value of 856 * LFC. Thus, we will use index = 857 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 858 * 859 * Also, because we are operating on transmit timer and FC 860 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 861 */ 862 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 863 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 864 865 if (hw->mac_type == ICE_MAC_E830) { 866 /* Retrieve the transmit timer */ 867 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 868 cmd->tx_tmr_value = 869 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 870 871 /* Retrieve the fc threshold */ 872 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 873 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 874 } else { 875 /* Retrieve the transmit timer */ 876 val = rd32(hw, 877 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 878 cmd->tx_tmr_value = 879 le16_encode_bits(val, 880 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 881 882 /* Retrieve the fc threshold */ 883 val = rd32(hw, 884 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 885 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 886 } 887 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 888 } 889 890 /** 891 * ice_aq_set_mac_cfg 892 * @hw: pointer to the HW struct 893 * @max_frame_size: Maximum Frame Size to be supported 894 * @cd: pointer to command details structure or NULL 895 * 896 * Set MAC configuration (0x0603) 897 */ 898 int 899 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 900 { 901 struct ice_aqc_set_mac_cfg *cmd; 902 struct ice_aq_desc desc; 903 904 cmd = &desc.params.set_mac_cfg; 905 906 if (max_frame_size == 0) 907 return -EINVAL; 908 909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 910 911 cmd->max_frame_size = cpu_to_le16(max_frame_size); 912 913 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 914 915 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 916 } 917 918 /** 919 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 920 * @hw: pointer to the HW struct 921 */ 922 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 923 { 924 struct ice_switch_info *sw; 925 int status; 926 927 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 928 sizeof(*hw->switch_info), GFP_KERNEL); 929 sw = hw->switch_info; 930 931 if (!sw) 932 return -ENOMEM; 933 934 INIT_LIST_HEAD(&sw->vsi_list_map_head); 935 sw->prof_res_bm_init = 0; 936 937 /* Initialize recipe count with default recipes read from NVM */ 938 sw->recp_cnt = ICE_SW_LKUP_LAST; 939 940 status = ice_init_def_sw_recp(hw); 941 if (status) { 942 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 943 return status; 944 } 945 return 0; 946 } 947 948 /** 949 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 950 * @hw: pointer to the HW struct 951 */ 952 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 953 { 954 struct ice_switch_info *sw = hw->switch_info; 955 struct ice_vsi_list_map_info *v_pos_map; 956 struct ice_vsi_list_map_info *v_tmp_map; 957 struct ice_sw_recipe *recps; 958 u8 i; 959 960 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 961 list_entry) { 962 list_del(&v_pos_map->list_entry); 963 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 964 } 965 recps = sw->recp_list; 966 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 967 recps[i].root_rid = i; 968 969 if (recps[i].adv_rule) { 970 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 971 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 972 973 mutex_destroy(&recps[i].filt_rule_lock); 974 list_for_each_entry_safe(lst_itr, tmp_entry, 975 &recps[i].filt_rules, 976 list_entry) { 977 list_del(&lst_itr->list_entry); 978 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 979 devm_kfree(ice_hw_to_dev(hw), lst_itr); 980 } 981 } else { 982 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 983 984 mutex_destroy(&recps[i].filt_rule_lock); 985 list_for_each_entry_safe(lst_itr, tmp_entry, 986 &recps[i].filt_rules, 987 list_entry) { 988 list_del(&lst_itr->list_entry); 989 devm_kfree(ice_hw_to_dev(hw), lst_itr); 990 } 991 } 992 } 993 ice_rm_all_sw_replay_rule_info(hw); 994 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 995 devm_kfree(ice_hw_to_dev(hw), sw); 996 } 997 998 /** 999 * ice_get_itr_intrl_gran 1000 * @hw: pointer to the HW struct 1001 * 1002 * Determines the ITR/INTRL granularities based on the maximum aggregate 1003 * bandwidth according to the device's configuration during power-on. 1004 */ 1005 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1006 { 1007 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 1008 rd32(hw, GL_PWR_MODE_CTL)); 1009 1010 switch (max_agg_bw) { 1011 case ICE_MAX_AGG_BW_200G: 1012 case ICE_MAX_AGG_BW_100G: 1013 case ICE_MAX_AGG_BW_50G: 1014 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1015 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1016 break; 1017 case ICE_MAX_AGG_BW_25G: 1018 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1019 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1020 break; 1021 } 1022 } 1023 1024 /** 1025 * ice_init_hw - main hardware initialization routine 1026 * @hw: pointer to the hardware structure 1027 */ 1028 int ice_init_hw(struct ice_hw *hw) 1029 { 1030 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 1031 void *mac_buf __free(kfree) = NULL; 1032 u16 mac_buf_len; 1033 int status; 1034 1035 /* Set MAC type based on DeviceID */ 1036 status = ice_set_mac_type(hw); 1037 if (status) 1038 return status; 1039 1040 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1041 1042 status = ice_reset(hw, ICE_RESET_PFR); 1043 if (status) 1044 return status; 1045 1046 ice_get_itr_intrl_gran(hw); 1047 1048 status = ice_create_all_ctrlq(hw); 1049 if (status) 1050 goto err_unroll_cqinit; 1051 1052 status = ice_fwlog_init(hw); 1053 if (status) 1054 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1055 status); 1056 1057 status = ice_clear_pf_cfg(hw); 1058 if (status) 1059 goto err_unroll_cqinit; 1060 1061 /* Set bit to enable Flow Director filters */ 1062 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1063 INIT_LIST_HEAD(&hw->fdir_list_head); 1064 1065 ice_clear_pxe_mode(hw); 1066 1067 status = ice_init_nvm(hw); 1068 if (status) 1069 goto err_unroll_cqinit; 1070 1071 status = ice_get_caps(hw); 1072 if (status) 1073 goto err_unroll_cqinit; 1074 1075 if (!hw->port_info) 1076 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1077 sizeof(*hw->port_info), 1078 GFP_KERNEL); 1079 if (!hw->port_info) { 1080 status = -ENOMEM; 1081 goto err_unroll_cqinit; 1082 } 1083 1084 hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; 1085 /* set the back pointer to HW */ 1086 hw->port_info->hw = hw; 1087 1088 /* Initialize port_info struct with switch configuration data */ 1089 status = ice_get_initial_sw_cfg(hw); 1090 if (status) 1091 goto err_unroll_alloc; 1092 1093 hw->evb_veb = true; 1094 1095 /* init xarray for identifying scheduling nodes uniquely */ 1096 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1097 1098 /* Query the allocated resources for Tx scheduler */ 1099 status = ice_sched_query_res_alloc(hw); 1100 if (status) { 1101 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1102 goto err_unroll_alloc; 1103 } 1104 ice_sched_get_psm_clk_freq(hw); 1105 1106 /* Initialize port_info struct with scheduler data */ 1107 status = ice_sched_init_port(hw->port_info); 1108 if (status) 1109 goto err_unroll_sched; 1110 1111 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1112 if (!pcaps) { 1113 status = -ENOMEM; 1114 goto err_unroll_sched; 1115 } 1116 1117 /* Initialize port_info struct with PHY capabilities */ 1118 status = ice_aq_get_phy_caps(hw->port_info, false, 1119 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1120 NULL); 1121 if (status) 1122 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1123 status); 1124 1125 /* Initialize port_info struct with link information */ 1126 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1127 if (status) 1128 goto err_unroll_sched; 1129 1130 /* need a valid SW entry point to build a Tx tree */ 1131 if (!hw->sw_entry_point_layer) { 1132 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1133 status = -EIO; 1134 goto err_unroll_sched; 1135 } 1136 INIT_LIST_HEAD(&hw->agg_list); 1137 /* Initialize max burst size */ 1138 if (!hw->max_burst_size) 1139 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1140 1141 status = ice_init_fltr_mgmt_struct(hw); 1142 if (status) 1143 goto err_unroll_sched; 1144 1145 /* Get MAC information */ 1146 /* A single port can report up to two (LAN and WoL) addresses */ 1147 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1148 GFP_KERNEL); 1149 if (!mac_buf) { 1150 status = -ENOMEM; 1151 goto err_unroll_fltr_mgmt_struct; 1152 } 1153 1154 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1155 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1156 1157 if (status) 1158 goto err_unroll_fltr_mgmt_struct; 1159 /* enable jumbo frame support at MAC level */ 1160 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1161 if (status) 1162 goto err_unroll_fltr_mgmt_struct; 1163 /* Obtain counter base index which would be used by flow director */ 1164 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1165 if (status) 1166 goto err_unroll_fltr_mgmt_struct; 1167 status = ice_init_hw_tbls(hw); 1168 if (status) 1169 goto err_unroll_fltr_mgmt_struct; 1170 mutex_init(&hw->tnl_lock); 1171 ice_init_chk_recipe_reuse_support(hw); 1172 1173 return 0; 1174 1175 err_unroll_fltr_mgmt_struct: 1176 ice_cleanup_fltr_mgmt_struct(hw); 1177 err_unroll_sched: 1178 ice_sched_cleanup_all(hw); 1179 err_unroll_alloc: 1180 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1181 err_unroll_cqinit: 1182 ice_destroy_all_ctrlq(hw); 1183 return status; 1184 } 1185 1186 /** 1187 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1188 * @hw: pointer to the hardware structure 1189 * 1190 * This should be called only during nominal operation, not as a result of 1191 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1192 * applicable initializations if it fails for any reason. 1193 */ 1194 void ice_deinit_hw(struct ice_hw *hw) 1195 { 1196 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1197 ice_cleanup_fltr_mgmt_struct(hw); 1198 1199 ice_sched_cleanup_all(hw); 1200 ice_sched_clear_agg(hw); 1201 ice_free_seg(hw); 1202 ice_free_hw_tbls(hw); 1203 mutex_destroy(&hw->tnl_lock); 1204 1205 ice_fwlog_deinit(hw); 1206 ice_destroy_all_ctrlq(hw); 1207 1208 /* Clear VSI contexts if not already cleared */ 1209 ice_clear_all_vsi_ctx(hw); 1210 } 1211 1212 /** 1213 * ice_check_reset - Check to see if a global reset is complete 1214 * @hw: pointer to the hardware structure 1215 */ 1216 int ice_check_reset(struct ice_hw *hw) 1217 { 1218 u32 cnt, reg = 0, grst_timeout, uld_mask; 1219 1220 /* Poll for Device Active state in case a recent CORER, GLOBR, 1221 * or EMPR has occurred. The grst delay value is in 100ms units. 1222 * Add 1sec for outstanding AQ commands that can take a long time. 1223 */ 1224 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1225 rd32(hw, GLGEN_RSTCTL)) + 10; 1226 1227 for (cnt = 0; cnt < grst_timeout; cnt++) { 1228 mdelay(100); 1229 reg = rd32(hw, GLGEN_RSTAT); 1230 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1231 break; 1232 } 1233 1234 if (cnt == grst_timeout) { 1235 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1236 return -EIO; 1237 } 1238 1239 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1240 GLNVM_ULD_PCIER_DONE_1_M |\ 1241 GLNVM_ULD_CORER_DONE_M |\ 1242 GLNVM_ULD_GLOBR_DONE_M |\ 1243 GLNVM_ULD_POR_DONE_M |\ 1244 GLNVM_ULD_POR_DONE_1_M |\ 1245 GLNVM_ULD_PCIER_DONE_2_M) 1246 1247 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1248 GLNVM_ULD_PE_DONE_M : 0); 1249 1250 /* Device is Active; check Global Reset processes are done */ 1251 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1252 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1253 if (reg == uld_mask) { 1254 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1255 break; 1256 } 1257 mdelay(10); 1258 } 1259 1260 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1261 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1262 reg); 1263 return -EIO; 1264 } 1265 1266 return 0; 1267 } 1268 1269 /** 1270 * ice_pf_reset - Reset the PF 1271 * @hw: pointer to the hardware structure 1272 * 1273 * If a global reset has been triggered, this function checks 1274 * for its completion and then issues the PF reset 1275 */ 1276 static int ice_pf_reset(struct ice_hw *hw) 1277 { 1278 u32 cnt, reg; 1279 1280 /* If at function entry a global reset was already in progress, i.e. 1281 * state is not 'device active' or any of the reset done bits are not 1282 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1283 * global reset is done. 1284 */ 1285 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1286 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1287 /* poll on global reset currently in progress until done */ 1288 if (ice_check_reset(hw)) 1289 return -EIO; 1290 1291 return 0; 1292 } 1293 1294 /* Reset the PF */ 1295 reg = rd32(hw, PFGEN_CTRL); 1296 1297 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1298 1299 /* Wait for the PFR to complete. The wait time is the global config lock 1300 * timeout plus the PFR timeout which will account for a possible reset 1301 * that is occurring during a download package operation. 1302 */ 1303 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1304 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1305 reg = rd32(hw, PFGEN_CTRL); 1306 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1307 break; 1308 1309 mdelay(1); 1310 } 1311 1312 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1313 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1314 return -EIO; 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * ice_reset - Perform different types of reset 1322 * @hw: pointer to the hardware structure 1323 * @req: reset request 1324 * 1325 * This function triggers a reset as specified by the req parameter. 1326 * 1327 * Note: 1328 * If anything other than a PF reset is triggered, PXE mode is restored. 1329 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1330 * interface has been restored in the rebuild flow. 1331 */ 1332 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1333 { 1334 u32 val = 0; 1335 1336 switch (req) { 1337 case ICE_RESET_PFR: 1338 return ice_pf_reset(hw); 1339 case ICE_RESET_CORER: 1340 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1341 val = GLGEN_RTRIG_CORER_M; 1342 break; 1343 case ICE_RESET_GLOBR: 1344 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1345 val = GLGEN_RTRIG_GLOBR_M; 1346 break; 1347 default: 1348 return -EINVAL; 1349 } 1350 1351 val |= rd32(hw, GLGEN_RTRIG); 1352 wr32(hw, GLGEN_RTRIG, val); 1353 ice_flush(hw); 1354 1355 /* wait for the FW to be ready */ 1356 return ice_check_reset(hw); 1357 } 1358 1359 /** 1360 * ice_copy_rxq_ctx_to_hw 1361 * @hw: pointer to the hardware structure 1362 * @ice_rxq_ctx: pointer to the rxq context 1363 * @rxq_index: the index of the Rx queue 1364 * 1365 * Copies rxq context from dense structure to HW register space 1366 */ 1367 static int 1368 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1369 { 1370 u8 i; 1371 1372 if (!ice_rxq_ctx) 1373 return -EINVAL; 1374 1375 if (rxq_index > QRX_CTRL_MAX_INDEX) 1376 return -EINVAL; 1377 1378 /* Copy each dword separately to HW */ 1379 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1380 wr32(hw, QRX_CONTEXT(i, rxq_index), 1381 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1382 1383 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1384 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1385 } 1386 1387 return 0; 1388 } 1389 1390 /* LAN Rx Queue Context */ 1391 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1392 /* Field Width LSB */ 1393 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1394 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1395 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1396 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1397 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1398 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1399 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1400 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1401 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1402 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1403 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1404 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1405 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1406 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1407 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1408 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1409 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1410 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1411 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1412 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1413 { 0 } 1414 }; 1415 1416 /** 1417 * ice_write_rxq_ctx 1418 * @hw: pointer to the hardware structure 1419 * @rlan_ctx: pointer to the rxq context 1420 * @rxq_index: the index of the Rx queue 1421 * 1422 * Converts rxq context from sparse to dense structure and then writes 1423 * it to HW register space and enables the hardware to prefetch descriptors 1424 * instead of only fetching them on demand 1425 */ 1426 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1427 u32 rxq_index) 1428 { 1429 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1430 1431 if (!rlan_ctx) 1432 return -EINVAL; 1433 1434 rlan_ctx->prefena = 1; 1435 1436 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1437 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1438 } 1439 1440 /* LAN Tx Queue Context */ 1441 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1442 /* Field Width LSB */ 1443 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1444 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1445 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1446 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1447 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1448 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1449 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1450 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1451 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1452 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1453 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1454 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1455 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1456 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1457 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1458 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1459 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1460 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1461 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1462 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1463 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1464 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1465 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1466 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1467 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1468 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1469 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1470 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1471 { 0 } 1472 }; 1473 1474 /* Sideband Queue command wrappers */ 1475 1476 /** 1477 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1478 * @hw: pointer to the HW struct 1479 * @desc: descriptor describing the command 1480 * @buf: buffer to use for indirect commands (NULL for direct commands) 1481 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1482 * @cd: pointer to command details structure 1483 */ 1484 static int 1485 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1486 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1487 { 1488 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1489 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1490 } 1491 1492 /** 1493 * ice_sbq_rw_reg - Fill Sideband Queue command 1494 * @hw: pointer to the HW struct 1495 * @in: message info to be filled in descriptor 1496 * @flags: control queue descriptor flags 1497 */ 1498 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) 1499 { 1500 struct ice_sbq_cmd_desc desc = {0}; 1501 struct ice_sbq_msg_req msg = {0}; 1502 u16 msg_len; 1503 int status; 1504 1505 msg_len = sizeof(msg); 1506 1507 msg.dest_dev = in->dest_dev; 1508 msg.opcode = in->opcode; 1509 msg.flags = ICE_SBQ_MSG_FLAGS; 1510 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1511 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1512 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1513 1514 if (in->opcode) 1515 msg.data = cpu_to_le32(in->data); 1516 else 1517 /* data read comes back in completion, so shorten the struct by 1518 * sizeof(msg.data) 1519 */ 1520 msg_len -= sizeof(msg.data); 1521 1522 desc.flags = cpu_to_le16(flags); 1523 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1524 desc.param0.cmd_len = cpu_to_le16(msg_len); 1525 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1526 if (!status && !in->opcode) 1527 in->data = le32_to_cpu 1528 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1529 return status; 1530 } 1531 1532 /* FW Admin Queue command wrappers */ 1533 1534 /* Software lock/mutex that is meant to be held while the Global Config Lock 1535 * in firmware is acquired by the software to prevent most (but not all) types 1536 * of AQ commands from being sent to FW 1537 */ 1538 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1539 1540 /** 1541 * ice_should_retry_sq_send_cmd 1542 * @opcode: AQ opcode 1543 * 1544 * Decide if we should retry the send command routine for the ATQ, depending 1545 * on the opcode. 1546 */ 1547 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1548 { 1549 switch (opcode) { 1550 case ice_aqc_opc_get_link_topo: 1551 case ice_aqc_opc_lldp_stop: 1552 case ice_aqc_opc_lldp_start: 1553 case ice_aqc_opc_lldp_filter_ctrl: 1554 return true; 1555 } 1556 1557 return false; 1558 } 1559 1560 /** 1561 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1562 * @hw: pointer to the HW struct 1563 * @cq: pointer to the specific Control queue 1564 * @desc: prefilled descriptor describing the command 1565 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1566 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1567 * @cd: pointer to command details structure 1568 * 1569 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1570 * Queue if the EBUSY AQ error is returned. 1571 */ 1572 static int 1573 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1574 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1575 struct ice_sq_cd *cd) 1576 { 1577 struct ice_aq_desc desc_cpy; 1578 bool is_cmd_for_retry; 1579 u8 idx = 0; 1580 u16 opcode; 1581 int status; 1582 1583 opcode = le16_to_cpu(desc->opcode); 1584 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1585 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1586 1587 if (is_cmd_for_retry) { 1588 /* All retryable cmds are direct, without buf. */ 1589 WARN_ON(buf); 1590 1591 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1592 } 1593 1594 do { 1595 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1596 1597 if (!is_cmd_for_retry || !status || 1598 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1599 break; 1600 1601 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1602 1603 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1604 1605 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1606 1607 return status; 1608 } 1609 1610 /** 1611 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1612 * @hw: pointer to the HW struct 1613 * @desc: descriptor describing the command 1614 * @buf: buffer to use for indirect commands (NULL for direct commands) 1615 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1616 * @cd: pointer to command details structure 1617 * 1618 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1619 */ 1620 int 1621 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1622 u16 buf_size, struct ice_sq_cd *cd) 1623 { 1624 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1625 bool lock_acquired = false; 1626 int status; 1627 1628 /* When a package download is in process (i.e. when the firmware's 1629 * Global Configuration Lock resource is held), only the Download 1630 * Package, Get Version, Get Package Info List, Upload Section, 1631 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1632 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1633 * Recipes to Profile Association, and Release Resource (with resource 1634 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1635 * must block until the package download completes and the Global Config 1636 * Lock is released. See also ice_acquire_global_cfg_lock(). 1637 */ 1638 switch (le16_to_cpu(desc->opcode)) { 1639 case ice_aqc_opc_download_pkg: 1640 case ice_aqc_opc_get_pkg_info_list: 1641 case ice_aqc_opc_get_ver: 1642 case ice_aqc_opc_upload_section: 1643 case ice_aqc_opc_update_pkg: 1644 case ice_aqc_opc_set_port_params: 1645 case ice_aqc_opc_get_vlan_mode_parameters: 1646 case ice_aqc_opc_set_vlan_mode_parameters: 1647 case ice_aqc_opc_set_tx_topo: 1648 case ice_aqc_opc_get_tx_topo: 1649 case ice_aqc_opc_add_recipe: 1650 case ice_aqc_opc_recipe_to_profile: 1651 case ice_aqc_opc_get_recipe: 1652 case ice_aqc_opc_get_recipe_to_profile: 1653 break; 1654 case ice_aqc_opc_release_res: 1655 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1656 break; 1657 fallthrough; 1658 default: 1659 mutex_lock(&ice_global_cfg_lock_sw); 1660 lock_acquired = true; 1661 break; 1662 } 1663 1664 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1665 if (lock_acquired) 1666 mutex_unlock(&ice_global_cfg_lock_sw); 1667 1668 return status; 1669 } 1670 1671 /** 1672 * ice_aq_get_fw_ver 1673 * @hw: pointer to the HW struct 1674 * @cd: pointer to command details structure or NULL 1675 * 1676 * Get the firmware version (0x0001) from the admin queue commands 1677 */ 1678 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1679 { 1680 struct ice_aqc_get_ver *resp; 1681 struct ice_aq_desc desc; 1682 int status; 1683 1684 resp = &desc.params.get_ver; 1685 1686 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1687 1688 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1689 1690 if (!status) { 1691 hw->fw_branch = resp->fw_branch; 1692 hw->fw_maj_ver = resp->fw_major; 1693 hw->fw_min_ver = resp->fw_minor; 1694 hw->fw_patch = resp->fw_patch; 1695 hw->fw_build = le32_to_cpu(resp->fw_build); 1696 hw->api_branch = resp->api_branch; 1697 hw->api_maj_ver = resp->api_major; 1698 hw->api_min_ver = resp->api_minor; 1699 hw->api_patch = resp->api_patch; 1700 } 1701 1702 return status; 1703 } 1704 1705 /** 1706 * ice_aq_send_driver_ver 1707 * @hw: pointer to the HW struct 1708 * @dv: driver's major, minor version 1709 * @cd: pointer to command details structure or NULL 1710 * 1711 * Send the driver version (0x0002) to the firmware 1712 */ 1713 int 1714 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1715 struct ice_sq_cd *cd) 1716 { 1717 struct ice_aqc_driver_ver *cmd; 1718 struct ice_aq_desc desc; 1719 u16 len; 1720 1721 cmd = &desc.params.driver_ver; 1722 1723 if (!dv) 1724 return -EINVAL; 1725 1726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1727 1728 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1729 cmd->major_ver = dv->major_ver; 1730 cmd->minor_ver = dv->minor_ver; 1731 cmd->build_ver = dv->build_ver; 1732 cmd->subbuild_ver = dv->subbuild_ver; 1733 1734 len = 0; 1735 while (len < sizeof(dv->driver_string) && 1736 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1737 len++; 1738 1739 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1740 } 1741 1742 /** 1743 * ice_aq_q_shutdown 1744 * @hw: pointer to the HW struct 1745 * @unloading: is the driver unloading itself 1746 * 1747 * Tell the Firmware that we're shutting down the AdminQ and whether 1748 * or not the driver is unloading as well (0x0003). 1749 */ 1750 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1751 { 1752 struct ice_aqc_q_shutdown *cmd; 1753 struct ice_aq_desc desc; 1754 1755 cmd = &desc.params.q_shutdown; 1756 1757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1758 1759 if (unloading) 1760 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1761 1762 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1763 } 1764 1765 /** 1766 * ice_aq_req_res 1767 * @hw: pointer to the HW struct 1768 * @res: resource ID 1769 * @access: access type 1770 * @sdp_number: resource number 1771 * @timeout: the maximum time in ms that the driver may hold the resource 1772 * @cd: pointer to command details structure or NULL 1773 * 1774 * Requests common resource using the admin queue commands (0x0008). 1775 * When attempting to acquire the Global Config Lock, the driver can 1776 * learn of three states: 1777 * 1) 0 - acquired lock, and can perform download package 1778 * 2) -EIO - did not get lock, driver should fail to load 1779 * 3) -EALREADY - did not get lock, but another driver has 1780 * successfully downloaded the package; the driver does 1781 * not have to download the package and can continue 1782 * loading 1783 * 1784 * Note that if the caller is in an acquire lock, perform action, release lock 1785 * phase of operation, it is possible that the FW may detect a timeout and issue 1786 * a CORER. In this case, the driver will receive a CORER interrupt and will 1787 * have to determine its cause. The calling thread that is handling this flow 1788 * will likely get an error propagated back to it indicating the Download 1789 * Package, Update Package or the Release Resource AQ commands timed out. 1790 */ 1791 static int 1792 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1793 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1794 struct ice_sq_cd *cd) 1795 { 1796 struct ice_aqc_req_res *cmd_resp; 1797 struct ice_aq_desc desc; 1798 int status; 1799 1800 cmd_resp = &desc.params.res_owner; 1801 1802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1803 1804 cmd_resp->res_id = cpu_to_le16(res); 1805 cmd_resp->access_type = cpu_to_le16(access); 1806 cmd_resp->res_number = cpu_to_le32(sdp_number); 1807 cmd_resp->timeout = cpu_to_le32(*timeout); 1808 *timeout = 0; 1809 1810 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1811 1812 /* The completion specifies the maximum time in ms that the driver 1813 * may hold the resource in the Timeout field. 1814 */ 1815 1816 /* Global config lock response utilizes an additional status field. 1817 * 1818 * If the Global config lock resource is held by some other driver, the 1819 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1820 * and the timeout field indicates the maximum time the current owner 1821 * of the resource has to free it. 1822 */ 1823 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1824 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1825 *timeout = le32_to_cpu(cmd_resp->timeout); 1826 return 0; 1827 } else if (le16_to_cpu(cmd_resp->status) == 1828 ICE_AQ_RES_GLBL_IN_PROG) { 1829 *timeout = le32_to_cpu(cmd_resp->timeout); 1830 return -EIO; 1831 } else if (le16_to_cpu(cmd_resp->status) == 1832 ICE_AQ_RES_GLBL_DONE) { 1833 return -EALREADY; 1834 } 1835 1836 /* invalid FW response, force a timeout immediately */ 1837 *timeout = 0; 1838 return -EIO; 1839 } 1840 1841 /* If the resource is held by some other driver, the command completes 1842 * with a busy return value and the timeout field indicates the maximum 1843 * time the current owner of the resource has to free it. 1844 */ 1845 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1846 *timeout = le32_to_cpu(cmd_resp->timeout); 1847 1848 return status; 1849 } 1850 1851 /** 1852 * ice_aq_release_res 1853 * @hw: pointer to the HW struct 1854 * @res: resource ID 1855 * @sdp_number: resource number 1856 * @cd: pointer to command details structure or NULL 1857 * 1858 * release common resource using the admin queue commands (0x0009) 1859 */ 1860 static int 1861 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1862 struct ice_sq_cd *cd) 1863 { 1864 struct ice_aqc_req_res *cmd; 1865 struct ice_aq_desc desc; 1866 1867 cmd = &desc.params.res_owner; 1868 1869 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1870 1871 cmd->res_id = cpu_to_le16(res); 1872 cmd->res_number = cpu_to_le32(sdp_number); 1873 1874 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1875 } 1876 1877 /** 1878 * ice_acquire_res 1879 * @hw: pointer to the HW structure 1880 * @res: resource ID 1881 * @access: access type (read or write) 1882 * @timeout: timeout in milliseconds 1883 * 1884 * This function will attempt to acquire the ownership of a resource. 1885 */ 1886 int 1887 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1888 enum ice_aq_res_access_type access, u32 timeout) 1889 { 1890 #define ICE_RES_POLLING_DELAY_MS 10 1891 u32 delay = ICE_RES_POLLING_DELAY_MS; 1892 u32 time_left = timeout; 1893 int status; 1894 1895 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1896 1897 /* A return code of -EALREADY means that another driver has 1898 * previously acquired the resource and performed any necessary updates; 1899 * in this case the caller does not obtain the resource and has no 1900 * further work to do. 1901 */ 1902 if (status == -EALREADY) 1903 goto ice_acquire_res_exit; 1904 1905 if (status) 1906 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1907 1908 /* If necessary, poll until the current lock owner timeouts */ 1909 timeout = time_left; 1910 while (status && timeout && time_left) { 1911 mdelay(delay); 1912 timeout = (timeout > delay) ? timeout - delay : 0; 1913 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1914 1915 if (status == -EALREADY) 1916 /* lock free, but no work to do */ 1917 break; 1918 1919 if (!status) 1920 /* lock acquired */ 1921 break; 1922 } 1923 if (status && status != -EALREADY) 1924 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1925 1926 ice_acquire_res_exit: 1927 if (status == -EALREADY) { 1928 if (access == ICE_RES_WRITE) 1929 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1930 else 1931 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1932 } 1933 return status; 1934 } 1935 1936 /** 1937 * ice_release_res 1938 * @hw: pointer to the HW structure 1939 * @res: resource ID 1940 * 1941 * This function will release a resource using the proper Admin Command. 1942 */ 1943 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1944 { 1945 unsigned long timeout; 1946 int status; 1947 1948 /* there are some rare cases when trying to release the resource 1949 * results in an admin queue timeout, so handle them correctly 1950 */ 1951 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1952 do { 1953 status = ice_aq_release_res(hw, res, 0, NULL); 1954 if (status != -EIO) 1955 break; 1956 usleep_range(1000, 2000); 1957 } while (time_before(jiffies, timeout)); 1958 } 1959 1960 /** 1961 * ice_aq_alloc_free_res - command to allocate/free resources 1962 * @hw: pointer to the HW struct 1963 * @buf: Indirect buffer to hold data parameters and response 1964 * @buf_size: size of buffer for indirect commands 1965 * @opc: pass in the command opcode 1966 * 1967 * Helper function to allocate/free resources using the admin queue commands 1968 */ 1969 int ice_aq_alloc_free_res(struct ice_hw *hw, 1970 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1971 enum ice_adminq_opc opc) 1972 { 1973 struct ice_aqc_alloc_free_res_cmd *cmd; 1974 struct ice_aq_desc desc; 1975 1976 cmd = &desc.params.sw_res_ctrl; 1977 1978 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1979 return -EINVAL; 1980 1981 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1982 1983 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1984 1985 cmd->num_entries = cpu_to_le16(1); 1986 1987 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1988 } 1989 1990 /** 1991 * ice_alloc_hw_res - allocate resource 1992 * @hw: pointer to the HW struct 1993 * @type: type of resource 1994 * @num: number of resources to allocate 1995 * @btm: allocate from bottom 1996 * @res: pointer to array that will receive the resources 1997 */ 1998 int 1999 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2000 { 2001 struct ice_aqc_alloc_free_res_elem *buf; 2002 u16 buf_len; 2003 int status; 2004 2005 buf_len = struct_size(buf, elem, num); 2006 buf = kzalloc(buf_len, GFP_KERNEL); 2007 if (!buf) 2008 return -ENOMEM; 2009 2010 /* Prepare buffer to allocate resource. */ 2011 buf->num_elems = cpu_to_le16(num); 2012 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2013 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2014 if (btm) 2015 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2016 2017 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2018 if (status) 2019 goto ice_alloc_res_exit; 2020 2021 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2022 2023 ice_alloc_res_exit: 2024 kfree(buf); 2025 return status; 2026 } 2027 2028 /** 2029 * ice_free_hw_res - free allocated HW resource 2030 * @hw: pointer to the HW struct 2031 * @type: type of resource to free 2032 * @num: number of resources 2033 * @res: pointer to array that contains the resources to free 2034 */ 2035 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2036 { 2037 struct ice_aqc_alloc_free_res_elem *buf; 2038 u16 buf_len; 2039 int status; 2040 2041 buf_len = struct_size(buf, elem, num); 2042 buf = kzalloc(buf_len, GFP_KERNEL); 2043 if (!buf) 2044 return -ENOMEM; 2045 2046 /* Prepare buffer to free resource. */ 2047 buf->num_elems = cpu_to_le16(num); 2048 buf->res_type = cpu_to_le16(type); 2049 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2050 2051 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2052 if (status) 2053 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2054 2055 kfree(buf); 2056 return status; 2057 } 2058 2059 /** 2060 * ice_get_num_per_func - determine number of resources per PF 2061 * @hw: pointer to the HW structure 2062 * @max: value to be evenly split between each PF 2063 * 2064 * Determine the number of valid functions by going through the bitmap returned 2065 * from parsing capabilities and use this to calculate the number of resources 2066 * per PF based on the max value passed in. 2067 */ 2068 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2069 { 2070 u8 funcs; 2071 2072 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2073 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2074 ICE_CAPS_VALID_FUNCS_M); 2075 2076 if (!funcs) 2077 return 0; 2078 2079 return max / funcs; 2080 } 2081 2082 /** 2083 * ice_parse_common_caps - parse common device/function capabilities 2084 * @hw: pointer to the HW struct 2085 * @caps: pointer to common capabilities structure 2086 * @elem: the capability element to parse 2087 * @prefix: message prefix for tracing capabilities 2088 * 2089 * Given a capability element, extract relevant details into the common 2090 * capability structure. 2091 * 2092 * Returns: true if the capability matches one of the common capability ids, 2093 * false otherwise. 2094 */ 2095 static bool 2096 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2097 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2098 { 2099 u32 logical_id = le32_to_cpu(elem->logical_id); 2100 u32 phys_id = le32_to_cpu(elem->phys_id); 2101 u32 number = le32_to_cpu(elem->number); 2102 u16 cap = le16_to_cpu(elem->cap); 2103 bool found = true; 2104 2105 switch (cap) { 2106 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2107 caps->valid_functions = number; 2108 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2109 caps->valid_functions); 2110 break; 2111 case ICE_AQC_CAPS_SRIOV: 2112 caps->sr_iov_1_1 = (number == 1); 2113 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2114 caps->sr_iov_1_1); 2115 break; 2116 case ICE_AQC_CAPS_DCB: 2117 caps->dcb = (number == 1); 2118 caps->active_tc_bitmap = logical_id; 2119 caps->maxtc = phys_id; 2120 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2121 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2122 caps->active_tc_bitmap); 2123 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2124 break; 2125 case ICE_AQC_CAPS_RSS: 2126 caps->rss_table_size = number; 2127 caps->rss_table_entry_width = logical_id; 2128 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2129 caps->rss_table_size); 2130 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2131 caps->rss_table_entry_width); 2132 break; 2133 case ICE_AQC_CAPS_RXQS: 2134 caps->num_rxq = number; 2135 caps->rxq_first_id = phys_id; 2136 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2137 caps->num_rxq); 2138 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2139 caps->rxq_first_id); 2140 break; 2141 case ICE_AQC_CAPS_TXQS: 2142 caps->num_txq = number; 2143 caps->txq_first_id = phys_id; 2144 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2145 caps->num_txq); 2146 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2147 caps->txq_first_id); 2148 break; 2149 case ICE_AQC_CAPS_MSIX: 2150 caps->num_msix_vectors = number; 2151 caps->msix_vector_first_id = phys_id; 2152 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2153 caps->num_msix_vectors); 2154 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2155 caps->msix_vector_first_id); 2156 break; 2157 case ICE_AQC_CAPS_PENDING_NVM_VER: 2158 caps->nvm_update_pending_nvm = true; 2159 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2160 break; 2161 case ICE_AQC_CAPS_PENDING_OROM_VER: 2162 caps->nvm_update_pending_orom = true; 2163 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2164 break; 2165 case ICE_AQC_CAPS_PENDING_NET_VER: 2166 caps->nvm_update_pending_netlist = true; 2167 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2168 break; 2169 case ICE_AQC_CAPS_NVM_MGMT: 2170 caps->nvm_unified_update = 2171 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2172 true : false; 2173 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2174 caps->nvm_unified_update); 2175 break; 2176 case ICE_AQC_CAPS_RDMA: 2177 caps->rdma = (number == 1); 2178 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2179 break; 2180 case ICE_AQC_CAPS_MAX_MTU: 2181 caps->max_mtu = number; 2182 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2183 prefix, caps->max_mtu); 2184 break; 2185 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2186 caps->pcie_reset_avoidance = (number > 0); 2187 ice_debug(hw, ICE_DBG_INIT, 2188 "%s: pcie_reset_avoidance = %d\n", prefix, 2189 caps->pcie_reset_avoidance); 2190 break; 2191 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2192 caps->reset_restrict_support = (number == 1); 2193 ice_debug(hw, ICE_DBG_INIT, 2194 "%s: reset_restrict_support = %d\n", prefix, 2195 caps->reset_restrict_support); 2196 break; 2197 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2198 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2199 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2200 prefix, caps->roce_lag); 2201 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2202 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2203 prefix, caps->sriov_lag); 2204 break; 2205 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2206 caps->tx_sched_topo_comp_mode_en = (number == 1); 2207 break; 2208 default: 2209 /* Not one of the recognized common capabilities */ 2210 found = false; 2211 } 2212 2213 return found; 2214 } 2215 2216 /** 2217 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2218 * @hw: pointer to the HW structure 2219 * @caps: pointer to capabilities structure to fix 2220 * 2221 * Re-calculate the capabilities that are dependent on the number of physical 2222 * ports; i.e. some features are not supported or function differently on 2223 * devices with more than 4 ports. 2224 */ 2225 static void 2226 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2227 { 2228 /* This assumes device capabilities are always scanned before function 2229 * capabilities during the initialization flow. 2230 */ 2231 if (hw->dev_caps.num_funcs > 4) { 2232 /* Max 4 TCs per port */ 2233 caps->maxtc = 4; 2234 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2235 caps->maxtc); 2236 if (caps->rdma) { 2237 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2238 caps->rdma = 0; 2239 } 2240 2241 /* print message only when processing device capabilities 2242 * during initialization. 2243 */ 2244 if (caps == &hw->dev_caps.common_cap) 2245 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2246 } 2247 } 2248 2249 /** 2250 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2251 * @hw: pointer to the HW struct 2252 * @func_p: pointer to function capabilities structure 2253 * @cap: pointer to the capability element to parse 2254 * 2255 * Extract function capabilities for ICE_AQC_CAPS_VF. 2256 */ 2257 static void 2258 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2259 struct ice_aqc_list_caps_elem *cap) 2260 { 2261 u32 logical_id = le32_to_cpu(cap->logical_id); 2262 u32 number = le32_to_cpu(cap->number); 2263 2264 func_p->num_allocd_vfs = number; 2265 func_p->vf_base_id = logical_id; 2266 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2267 func_p->num_allocd_vfs); 2268 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2269 func_p->vf_base_id); 2270 } 2271 2272 /** 2273 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2274 * @hw: pointer to the HW struct 2275 * @func_p: pointer to function capabilities structure 2276 * @cap: pointer to the capability element to parse 2277 * 2278 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2279 */ 2280 static void 2281 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2282 struct ice_aqc_list_caps_elem *cap) 2283 { 2284 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2285 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2286 le32_to_cpu(cap->number)); 2287 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2288 func_p->guar_num_vsi); 2289 } 2290 2291 /** 2292 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2293 * @hw: pointer to the HW struct 2294 * @func_p: pointer to function capabilities structure 2295 * @cap: pointer to the capability element to parse 2296 * 2297 * Extract function capabilities for ICE_AQC_CAPS_1588. 2298 */ 2299 static void 2300 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2301 struct ice_aqc_list_caps_elem *cap) 2302 { 2303 struct ice_ts_func_info *info = &func_p->ts_func_info; 2304 u32 number = le32_to_cpu(cap->number); 2305 2306 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2307 func_p->common_cap.ieee_1588 = info->ena; 2308 2309 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2310 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2311 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2312 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2313 2314 if (!ice_is_e825c(hw)) { 2315 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2316 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2317 } else { 2318 info->clk_freq = ICE_TIME_REF_FREQ_156_250; 2319 info->clk_src = ICE_CLK_SRC_TCXO; 2320 } 2321 2322 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2323 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2324 } else { 2325 /* Unknown clock frequency, so assume a (probably incorrect) 2326 * default to avoid out-of-bounds look ups of frequency 2327 * related information. 2328 */ 2329 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2330 info->clk_freq); 2331 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2332 } 2333 2334 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2335 func_p->common_cap.ieee_1588); 2336 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2337 info->src_tmr_owned); 2338 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2339 info->tmr_ena); 2340 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2341 info->tmr_index_owned); 2342 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2343 info->tmr_index_assoc); 2344 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2345 info->clk_freq); 2346 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2347 info->clk_src); 2348 } 2349 2350 /** 2351 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2352 * @hw: pointer to the HW struct 2353 * @func_p: pointer to function capabilities structure 2354 * 2355 * Extract function capabilities for ICE_AQC_CAPS_FD. 2356 */ 2357 static void 2358 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2359 { 2360 u32 reg_val, gsize, bsize; 2361 2362 reg_val = rd32(hw, GLQF_FD_SIZE); 2363 switch (hw->mac_type) { 2364 case ICE_MAC_E830: 2365 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2366 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2367 break; 2368 case ICE_MAC_E810: 2369 default: 2370 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2371 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2372 } 2373 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2374 func_p->fd_fltr_best_effort = bsize; 2375 2376 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2377 func_p->fd_fltr_guar); 2378 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2379 func_p->fd_fltr_best_effort); 2380 } 2381 2382 /** 2383 * ice_parse_func_caps - Parse function capabilities 2384 * @hw: pointer to the HW struct 2385 * @func_p: pointer to function capabilities structure 2386 * @buf: buffer containing the function capability records 2387 * @cap_count: the number of capabilities 2388 * 2389 * Helper function to parse function (0x000A) capabilities list. For 2390 * capabilities shared between device and function, this relies on 2391 * ice_parse_common_caps. 2392 * 2393 * Loop through the list of provided capabilities and extract the relevant 2394 * data into the function capabilities structured. 2395 */ 2396 static void 2397 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2398 void *buf, u32 cap_count) 2399 { 2400 struct ice_aqc_list_caps_elem *cap_resp; 2401 u32 i; 2402 2403 cap_resp = buf; 2404 2405 memset(func_p, 0, sizeof(*func_p)); 2406 2407 for (i = 0; i < cap_count; i++) { 2408 u16 cap = le16_to_cpu(cap_resp[i].cap); 2409 bool found; 2410 2411 found = ice_parse_common_caps(hw, &func_p->common_cap, 2412 &cap_resp[i], "func caps"); 2413 2414 switch (cap) { 2415 case ICE_AQC_CAPS_VF: 2416 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2417 break; 2418 case ICE_AQC_CAPS_VSI: 2419 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2420 break; 2421 case ICE_AQC_CAPS_1588: 2422 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2423 break; 2424 case ICE_AQC_CAPS_FD: 2425 ice_parse_fdir_func_caps(hw, func_p); 2426 break; 2427 default: 2428 /* Don't list common capabilities as unknown */ 2429 if (!found) 2430 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2431 i, cap); 2432 break; 2433 } 2434 } 2435 2436 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2437 } 2438 2439 /** 2440 * ice_func_id_to_logical_id - map from function id to logical pf id 2441 * @active_function_bitmap: active function bitmap 2442 * @pf_id: function number of device 2443 * 2444 * Return: logical PF ID. 2445 */ 2446 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) 2447 { 2448 u8 logical_id = 0; 2449 u8 i; 2450 2451 for (i = 0; i < pf_id; i++) 2452 if (active_function_bitmap & BIT(i)) 2453 logical_id++; 2454 2455 return logical_id; 2456 } 2457 2458 /** 2459 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2460 * @hw: pointer to the HW struct 2461 * @dev_p: pointer to device capabilities structure 2462 * @cap: capability element to parse 2463 * 2464 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2465 */ 2466 static void 2467 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2468 struct ice_aqc_list_caps_elem *cap) 2469 { 2470 u32 number = le32_to_cpu(cap->number); 2471 2472 dev_p->num_funcs = hweight32(number); 2473 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2474 dev_p->num_funcs); 2475 2476 hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); 2477 } 2478 2479 /** 2480 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2481 * @hw: pointer to the HW struct 2482 * @dev_p: pointer to device capabilities structure 2483 * @cap: capability element to parse 2484 * 2485 * Parse ICE_AQC_CAPS_VF for device capabilities. 2486 */ 2487 static void 2488 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2489 struct ice_aqc_list_caps_elem *cap) 2490 { 2491 u32 number = le32_to_cpu(cap->number); 2492 2493 dev_p->num_vfs_exposed = number; 2494 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2495 dev_p->num_vfs_exposed); 2496 } 2497 2498 /** 2499 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2500 * @hw: pointer to the HW struct 2501 * @dev_p: pointer to device capabilities structure 2502 * @cap: capability element to parse 2503 * 2504 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2505 */ 2506 static void 2507 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2508 struct ice_aqc_list_caps_elem *cap) 2509 { 2510 u32 number = le32_to_cpu(cap->number); 2511 2512 dev_p->num_vsi_allocd_to_host = number; 2513 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2514 dev_p->num_vsi_allocd_to_host); 2515 } 2516 2517 /** 2518 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2519 * @hw: pointer to the HW struct 2520 * @dev_p: pointer to device capabilities structure 2521 * @cap: capability element to parse 2522 * 2523 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2524 */ 2525 static void 2526 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2527 struct ice_aqc_list_caps_elem *cap) 2528 { 2529 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2530 u32 logical_id = le32_to_cpu(cap->logical_id); 2531 u32 phys_id = le32_to_cpu(cap->phys_id); 2532 u32 number = le32_to_cpu(cap->number); 2533 2534 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2535 dev_p->common_cap.ieee_1588 = info->ena; 2536 2537 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2538 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2539 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2540 2541 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2542 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2543 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2544 2545 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2546 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2547 2548 info->ena_ports = logical_id; 2549 info->tmr_own_map = phys_id; 2550 2551 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2552 dev_p->common_cap.ieee_1588); 2553 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2554 info->tmr0_owner); 2555 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2556 info->tmr0_owned); 2557 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2558 info->tmr0_ena); 2559 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2560 info->tmr1_owner); 2561 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2562 info->tmr1_owned); 2563 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2564 info->tmr1_ena); 2565 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2566 info->ts_ll_read); 2567 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2568 info->ts_ll_int_read); 2569 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2570 info->ena_ports); 2571 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2572 info->tmr_own_map); 2573 } 2574 2575 /** 2576 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2577 * @hw: pointer to the HW struct 2578 * @dev_p: pointer to device capabilities structure 2579 * @cap: capability element to parse 2580 * 2581 * Parse ICE_AQC_CAPS_FD for device capabilities. 2582 */ 2583 static void 2584 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2585 struct ice_aqc_list_caps_elem *cap) 2586 { 2587 u32 number = le32_to_cpu(cap->number); 2588 2589 dev_p->num_flow_director_fltr = number; 2590 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2591 dev_p->num_flow_director_fltr); 2592 } 2593 2594 /** 2595 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2596 * @hw: pointer to the HW struct 2597 * @dev_p: pointer to device capabilities structure 2598 * @cap: capability element to parse 2599 * 2600 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2601 * enabled sensors. 2602 */ 2603 static void 2604 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2605 struct ice_aqc_list_caps_elem *cap) 2606 { 2607 dev_p->supported_sensors = le32_to_cpu(cap->number); 2608 2609 ice_debug(hw, ICE_DBG_INIT, 2610 "dev caps: supported sensors (bitmap) = 0x%x\n", 2611 dev_p->supported_sensors); 2612 } 2613 2614 /** 2615 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2616 * @hw: pointer to the HW struct 2617 * @dev_p: pointer to device capabilities structure 2618 * @cap: capability element to parse 2619 * 2620 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2621 */ 2622 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, 2623 struct ice_hw_dev_caps *dev_p, 2624 struct ice_aqc_list_caps_elem *cap) 2625 { 2626 dev_p->nac_topo.mode = le32_to_cpu(cap->number); 2627 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2628 2629 dev_info(ice_hw_to_dev(hw), 2630 "PF is configured in %s mode with IP instance ID %d\n", 2631 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? 2632 "primary" : "secondary", dev_p->nac_topo.id); 2633 2634 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2635 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2636 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2637 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2638 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2639 dev_p->nac_topo.id); 2640 } 2641 2642 /** 2643 * ice_parse_dev_caps - Parse device capabilities 2644 * @hw: pointer to the HW struct 2645 * @dev_p: pointer to device capabilities structure 2646 * @buf: buffer containing the device capability records 2647 * @cap_count: the number of capabilities 2648 * 2649 * Helper device to parse device (0x000B) capabilities list. For 2650 * capabilities shared between device and function, this relies on 2651 * ice_parse_common_caps. 2652 * 2653 * Loop through the list of provided capabilities and extract the relevant 2654 * data into the device capabilities structured. 2655 */ 2656 static void 2657 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2658 void *buf, u32 cap_count) 2659 { 2660 struct ice_aqc_list_caps_elem *cap_resp; 2661 u32 i; 2662 2663 cap_resp = buf; 2664 2665 memset(dev_p, 0, sizeof(*dev_p)); 2666 2667 for (i = 0; i < cap_count; i++) { 2668 u16 cap = le16_to_cpu(cap_resp[i].cap); 2669 bool found; 2670 2671 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2672 &cap_resp[i], "dev caps"); 2673 2674 switch (cap) { 2675 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2676 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2677 break; 2678 case ICE_AQC_CAPS_VF: 2679 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2680 break; 2681 case ICE_AQC_CAPS_VSI: 2682 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2683 break; 2684 case ICE_AQC_CAPS_1588: 2685 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2686 break; 2687 case ICE_AQC_CAPS_FD: 2688 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2689 break; 2690 case ICE_AQC_CAPS_SENSOR_READING: 2691 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2692 break; 2693 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2694 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2695 break; 2696 default: 2697 /* Don't list common capabilities as unknown */ 2698 if (!found) 2699 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2700 i, cap); 2701 break; 2702 } 2703 } 2704 2705 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2706 } 2707 2708 /** 2709 * ice_is_pf_c827 - check if pf contains c827 phy 2710 * @hw: pointer to the hw struct 2711 */ 2712 bool ice_is_pf_c827(struct ice_hw *hw) 2713 { 2714 struct ice_aqc_get_link_topo cmd = {}; 2715 u8 node_part_number; 2716 u16 node_handle; 2717 int status; 2718 2719 if (hw->mac_type != ICE_MAC_E810) 2720 return false; 2721 2722 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2723 return true; 2724 2725 cmd.addr.topo_params.node_type_ctx = 2726 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2727 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2728 cmd.addr.topo_params.index = 0; 2729 2730 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2731 &node_handle); 2732 2733 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2734 return false; 2735 2736 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2737 return true; 2738 2739 return false; 2740 } 2741 2742 /** 2743 * ice_is_phy_rclk_in_netlist 2744 * @hw: pointer to the hw struct 2745 * 2746 * Check if the PHY Recovered Clock device is present in the netlist 2747 */ 2748 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2749 { 2750 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2751 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2752 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2753 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2754 return false; 2755 2756 return true; 2757 } 2758 2759 /** 2760 * ice_is_clock_mux_in_netlist 2761 * @hw: pointer to the hw struct 2762 * 2763 * Check if the Clock Multiplexer device is present in the netlist 2764 */ 2765 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2766 { 2767 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2768 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2769 NULL)) 2770 return false; 2771 2772 return true; 2773 } 2774 2775 /** 2776 * ice_is_cgu_in_netlist - check for CGU presence 2777 * @hw: pointer to the hw struct 2778 * 2779 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2780 * Save the CGU part number in the hw structure for later use. 2781 * Return: 2782 * * true - cgu is present 2783 * * false - cgu is not present 2784 */ 2785 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2786 { 2787 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2788 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2789 NULL)) { 2790 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2791 return true; 2792 } else if (!ice_find_netlist_node(hw, 2793 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2794 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2795 NULL)) { 2796 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2797 return true; 2798 } 2799 2800 return false; 2801 } 2802 2803 /** 2804 * ice_is_gps_in_netlist 2805 * @hw: pointer to the hw struct 2806 * 2807 * Check if the GPS generic device is present in the netlist 2808 */ 2809 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2810 { 2811 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2812 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2813 return false; 2814 2815 return true; 2816 } 2817 2818 /** 2819 * ice_aq_list_caps - query function/device capabilities 2820 * @hw: pointer to the HW struct 2821 * @buf: a buffer to hold the capabilities 2822 * @buf_size: size of the buffer 2823 * @cap_count: if not NULL, set to the number of capabilities reported 2824 * @opc: capabilities type to discover, device or function 2825 * @cd: pointer to command details structure or NULL 2826 * 2827 * Get the function (0x000A) or device (0x000B) capabilities description from 2828 * firmware and store it in the buffer. 2829 * 2830 * If the cap_count pointer is not NULL, then it is set to the number of 2831 * capabilities firmware will report. Note that if the buffer size is too 2832 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2833 * cap_count will still be updated in this case. It is recommended that the 2834 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2835 * firmware could return) to avoid this. 2836 */ 2837 int 2838 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2839 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2840 { 2841 struct ice_aqc_list_caps *cmd; 2842 struct ice_aq_desc desc; 2843 int status; 2844 2845 cmd = &desc.params.get_cap; 2846 2847 if (opc != ice_aqc_opc_list_func_caps && 2848 opc != ice_aqc_opc_list_dev_caps) 2849 return -EINVAL; 2850 2851 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2852 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2853 2854 if (cap_count) 2855 *cap_count = le32_to_cpu(cmd->count); 2856 2857 return status; 2858 } 2859 2860 /** 2861 * ice_discover_dev_caps - Read and extract device capabilities 2862 * @hw: pointer to the hardware structure 2863 * @dev_caps: pointer to device capabilities structure 2864 * 2865 * Read the device capabilities and extract them into the dev_caps structure 2866 * for later use. 2867 */ 2868 int 2869 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2870 { 2871 u32 cap_count = 0; 2872 void *cbuf; 2873 int status; 2874 2875 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2876 if (!cbuf) 2877 return -ENOMEM; 2878 2879 /* Although the driver doesn't know the number of capabilities the 2880 * device will return, we can simply send a 4KB buffer, the maximum 2881 * possible size that firmware can return. 2882 */ 2883 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2884 2885 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2886 ice_aqc_opc_list_dev_caps, NULL); 2887 if (!status) 2888 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2889 kfree(cbuf); 2890 2891 return status; 2892 } 2893 2894 /** 2895 * ice_discover_func_caps - Read and extract function capabilities 2896 * @hw: pointer to the hardware structure 2897 * @func_caps: pointer to function capabilities structure 2898 * 2899 * Read the function capabilities and extract them into the func_caps structure 2900 * for later use. 2901 */ 2902 static int 2903 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2904 { 2905 u32 cap_count = 0; 2906 void *cbuf; 2907 int status; 2908 2909 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2910 if (!cbuf) 2911 return -ENOMEM; 2912 2913 /* Although the driver doesn't know the number of capabilities the 2914 * device will return, we can simply send a 4KB buffer, the maximum 2915 * possible size that firmware can return. 2916 */ 2917 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2918 2919 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2920 ice_aqc_opc_list_func_caps, NULL); 2921 if (!status) 2922 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2923 kfree(cbuf); 2924 2925 return status; 2926 } 2927 2928 /** 2929 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2930 * @hw: pointer to the hardware structure 2931 */ 2932 void ice_set_safe_mode_caps(struct ice_hw *hw) 2933 { 2934 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2935 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2936 struct ice_hw_common_caps cached_caps; 2937 u32 num_funcs; 2938 2939 /* cache some func_caps values that should be restored after memset */ 2940 cached_caps = func_caps->common_cap; 2941 2942 /* unset func capabilities */ 2943 memset(func_caps, 0, sizeof(*func_caps)); 2944 2945 #define ICE_RESTORE_FUNC_CAP(name) \ 2946 func_caps->common_cap.name = cached_caps.name 2947 2948 /* restore cached values */ 2949 ICE_RESTORE_FUNC_CAP(valid_functions); 2950 ICE_RESTORE_FUNC_CAP(txq_first_id); 2951 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2952 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2953 ICE_RESTORE_FUNC_CAP(max_mtu); 2954 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2955 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2956 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2957 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2958 2959 /* one Tx and one Rx queue in safe mode */ 2960 func_caps->common_cap.num_rxq = 1; 2961 func_caps->common_cap.num_txq = 1; 2962 2963 /* two MSIX vectors, one for traffic and one for misc causes */ 2964 func_caps->common_cap.num_msix_vectors = 2; 2965 func_caps->guar_num_vsi = 1; 2966 2967 /* cache some dev_caps values that should be restored after memset */ 2968 cached_caps = dev_caps->common_cap; 2969 num_funcs = dev_caps->num_funcs; 2970 2971 /* unset dev capabilities */ 2972 memset(dev_caps, 0, sizeof(*dev_caps)); 2973 2974 #define ICE_RESTORE_DEV_CAP(name) \ 2975 dev_caps->common_cap.name = cached_caps.name 2976 2977 /* restore cached values */ 2978 ICE_RESTORE_DEV_CAP(valid_functions); 2979 ICE_RESTORE_DEV_CAP(txq_first_id); 2980 ICE_RESTORE_DEV_CAP(rxq_first_id); 2981 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2982 ICE_RESTORE_DEV_CAP(max_mtu); 2983 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2984 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2985 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2986 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2987 dev_caps->num_funcs = num_funcs; 2988 2989 /* one Tx and one Rx queue per function in safe mode */ 2990 dev_caps->common_cap.num_rxq = num_funcs; 2991 dev_caps->common_cap.num_txq = num_funcs; 2992 2993 /* two MSIX vectors per function */ 2994 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2995 } 2996 2997 /** 2998 * ice_get_caps - get info about the HW 2999 * @hw: pointer to the hardware structure 3000 */ 3001 int ice_get_caps(struct ice_hw *hw) 3002 { 3003 int status; 3004 3005 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3006 if (status) 3007 return status; 3008 3009 return ice_discover_func_caps(hw, &hw->func_caps); 3010 } 3011 3012 /** 3013 * ice_aq_manage_mac_write - manage MAC address write command 3014 * @hw: pointer to the HW struct 3015 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3016 * @flags: flags to control write behavior 3017 * @cd: pointer to command details structure or NULL 3018 * 3019 * This function is used to write MAC address to the NVM (0x0108). 3020 */ 3021 int 3022 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3023 struct ice_sq_cd *cd) 3024 { 3025 struct ice_aqc_manage_mac_write *cmd; 3026 struct ice_aq_desc desc; 3027 3028 cmd = &desc.params.mac_write; 3029 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3030 3031 cmd->flags = flags; 3032 ether_addr_copy(cmd->mac_addr, mac_addr); 3033 3034 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3035 } 3036 3037 /** 3038 * ice_aq_clear_pxe_mode 3039 * @hw: pointer to the HW struct 3040 * 3041 * Tell the firmware that the driver is taking over from PXE (0x0110). 3042 */ 3043 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3044 { 3045 struct ice_aq_desc desc; 3046 3047 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3048 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3049 3050 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3051 } 3052 3053 /** 3054 * ice_clear_pxe_mode - clear pxe operations mode 3055 * @hw: pointer to the HW struct 3056 * 3057 * Make sure all PXE mode settings are cleared, including things 3058 * like descriptor fetch/write-back mode. 3059 */ 3060 void ice_clear_pxe_mode(struct ice_hw *hw) 3061 { 3062 if (ice_check_sq_alive(hw, &hw->adminq)) 3063 ice_aq_clear_pxe_mode(hw); 3064 } 3065 3066 /** 3067 * ice_aq_set_port_params - set physical port parameters. 3068 * @pi: pointer to the port info struct 3069 * @double_vlan: if set double VLAN is enabled 3070 * @cd: pointer to command details structure or NULL 3071 * 3072 * Set Physical port parameters (0x0203) 3073 */ 3074 int 3075 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3076 struct ice_sq_cd *cd) 3077 3078 { 3079 struct ice_aqc_set_port_params *cmd; 3080 struct ice_hw *hw = pi->hw; 3081 struct ice_aq_desc desc; 3082 u16 cmd_flags = 0; 3083 3084 cmd = &desc.params.set_port_params; 3085 3086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3087 if (double_vlan) 3088 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3089 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3090 3091 cmd->local_fwd_mode = pi->local_fwd_mode | 3092 ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; 3093 3094 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3095 } 3096 3097 /** 3098 * ice_is_100m_speed_supported 3099 * @hw: pointer to the HW struct 3100 * 3101 * returns true if 100M speeds are supported by the device, 3102 * false otherwise. 3103 */ 3104 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3105 { 3106 switch (hw->device_id) { 3107 case ICE_DEV_ID_E822C_SGMII: 3108 case ICE_DEV_ID_E822L_SGMII: 3109 case ICE_DEV_ID_E823L_1GBE: 3110 case ICE_DEV_ID_E823C_SGMII: 3111 return true; 3112 default: 3113 return false; 3114 } 3115 } 3116 3117 /** 3118 * ice_get_link_speed_based_on_phy_type - returns link speed 3119 * @phy_type_low: lower part of phy_type 3120 * @phy_type_high: higher part of phy_type 3121 * 3122 * This helper function will convert an entry in PHY type structure 3123 * [phy_type_low, phy_type_high] to its corresponding link speed. 3124 * Note: In the structure of [phy_type_low, phy_type_high], there should 3125 * be one bit set, as this function will convert one PHY type to its 3126 * speed. 3127 * 3128 * Return: 3129 * * PHY speed for recognized PHY type 3130 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3131 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3132 */ 3133 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3134 { 3135 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3136 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3137 3138 switch (phy_type_low) { 3139 case ICE_PHY_TYPE_LOW_100BASE_TX: 3140 case ICE_PHY_TYPE_LOW_100M_SGMII: 3141 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3142 break; 3143 case ICE_PHY_TYPE_LOW_1000BASE_T: 3144 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3145 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3146 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3147 case ICE_PHY_TYPE_LOW_1G_SGMII: 3148 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3149 break; 3150 case ICE_PHY_TYPE_LOW_2500BASE_T: 3151 case ICE_PHY_TYPE_LOW_2500BASE_X: 3152 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3153 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3154 break; 3155 case ICE_PHY_TYPE_LOW_5GBASE_T: 3156 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3157 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3158 break; 3159 case ICE_PHY_TYPE_LOW_10GBASE_T: 3160 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3161 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3162 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3163 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3164 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3165 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3166 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3167 break; 3168 case ICE_PHY_TYPE_LOW_25GBASE_T: 3169 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3170 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3171 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3172 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3173 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3174 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3175 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3176 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3177 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3178 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3179 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3180 break; 3181 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3182 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3183 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3184 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3185 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3186 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3187 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3188 break; 3189 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3190 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3191 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3192 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3193 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3194 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3195 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3196 case ICE_PHY_TYPE_LOW_50G_AUI2: 3197 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3198 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3199 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3200 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3201 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3202 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3203 case ICE_PHY_TYPE_LOW_50G_AUI1: 3204 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3205 break; 3206 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3207 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3208 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3209 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3210 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3211 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3212 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3213 case ICE_PHY_TYPE_LOW_100G_AUI4: 3214 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3215 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3216 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3217 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3218 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3219 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3220 break; 3221 default: 3222 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3223 break; 3224 } 3225 3226 switch (phy_type_high) { 3227 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3228 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3229 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3230 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3231 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3232 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3233 break; 3234 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3235 case ICE_PHY_TYPE_HIGH_200G_SR4: 3236 case ICE_PHY_TYPE_HIGH_200G_FR4: 3237 case ICE_PHY_TYPE_HIGH_200G_LR4: 3238 case ICE_PHY_TYPE_HIGH_200G_DR4: 3239 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3240 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3241 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3242 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3243 break; 3244 default: 3245 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3246 break; 3247 } 3248 3249 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3250 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3251 return ICE_AQ_LINK_SPEED_UNKNOWN; 3252 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3253 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3254 return ICE_AQ_LINK_SPEED_UNKNOWN; 3255 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3256 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3257 return speed_phy_type_low; 3258 else 3259 return speed_phy_type_high; 3260 } 3261 3262 /** 3263 * ice_update_phy_type 3264 * @phy_type_low: pointer to the lower part of phy_type 3265 * @phy_type_high: pointer to the higher part of phy_type 3266 * @link_speeds_bitmap: targeted link speeds bitmap 3267 * 3268 * Note: For the link_speeds_bitmap structure, you can check it at 3269 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3270 * link_speeds_bitmap include multiple speeds. 3271 * 3272 * Each entry in this [phy_type_low, phy_type_high] structure will 3273 * present a certain link speed. This helper function will turn on bits 3274 * in [phy_type_low, phy_type_high] structure based on the value of 3275 * link_speeds_bitmap input parameter. 3276 */ 3277 void 3278 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3279 u16 link_speeds_bitmap) 3280 { 3281 u64 pt_high; 3282 u64 pt_low; 3283 int index; 3284 u16 speed; 3285 3286 /* We first check with low part of phy_type */ 3287 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3288 pt_low = BIT_ULL(index); 3289 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3290 3291 if (link_speeds_bitmap & speed) 3292 *phy_type_low |= BIT_ULL(index); 3293 } 3294 3295 /* We then check with high part of phy_type */ 3296 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3297 pt_high = BIT_ULL(index); 3298 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3299 3300 if (link_speeds_bitmap & speed) 3301 *phy_type_high |= BIT_ULL(index); 3302 } 3303 } 3304 3305 /** 3306 * ice_aq_set_phy_cfg 3307 * @hw: pointer to the HW struct 3308 * @pi: port info structure of the interested logical port 3309 * @cfg: structure with PHY configuration data to be set 3310 * @cd: pointer to command details structure or NULL 3311 * 3312 * Set the various PHY configuration parameters supported on the Port. 3313 * One or more of the Set PHY config parameters may be ignored in an MFP 3314 * mode as the PF may not have the privilege to set some of the PHY Config 3315 * parameters. This status will be indicated by the command response (0x0601). 3316 */ 3317 int 3318 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3319 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3320 { 3321 struct ice_aq_desc desc; 3322 int status; 3323 3324 if (!cfg) 3325 return -EINVAL; 3326 3327 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3328 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3329 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3330 cfg->caps); 3331 3332 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3333 } 3334 3335 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3336 desc.params.set_phy.lport_num = pi->lport; 3337 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3338 3339 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3340 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3341 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3342 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3343 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3344 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3345 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3346 cfg->low_power_ctrl_an); 3347 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3348 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3349 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3350 cfg->link_fec_opt); 3351 3352 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3353 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3354 status = 0; 3355 3356 if (!status) 3357 pi->phy.curr_user_phy_cfg = *cfg; 3358 3359 return status; 3360 } 3361 3362 /** 3363 * ice_update_link_info - update status of the HW network link 3364 * @pi: port info structure of the interested logical port 3365 */ 3366 int ice_update_link_info(struct ice_port_info *pi) 3367 { 3368 struct ice_link_status *li; 3369 int status; 3370 3371 if (!pi) 3372 return -EINVAL; 3373 3374 li = &pi->phy.link_info; 3375 3376 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3377 if (status) 3378 return status; 3379 3380 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3381 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3382 3383 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3384 if (!pcaps) 3385 return -ENOMEM; 3386 3387 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3388 pcaps, NULL); 3389 } 3390 3391 return status; 3392 } 3393 3394 /** 3395 * ice_aq_get_phy_equalization - function to read serdes equaliser 3396 * value from firmware using admin queue command. 3397 * @hw: pointer to the HW struct 3398 * @data_in: represents the serdes equalization parameter requested 3399 * @op_code: represents the serdes number and flag to represent tx or rx 3400 * @serdes_num: represents the serdes number 3401 * @output: pointer to the caller-supplied buffer to return serdes equaliser 3402 * 3403 * Return: non-zero status on error and 0 on success. 3404 */ 3405 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, 3406 u8 serdes_num, int *output) 3407 { 3408 struct ice_aqc_dnl_call_command *cmd; 3409 struct ice_aqc_dnl_call buf = {}; 3410 struct ice_aq_desc desc; 3411 int err; 3412 3413 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); 3414 buf.sto.txrx_equa_reqs.op_code_serdes_sel = 3415 cpu_to_le16(op_code | (serdes_num & 0xF)); 3416 cmd = &desc.params.dnl_call; 3417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); 3418 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | 3419 ICE_AQ_FLAG_RD | 3420 ICE_AQ_FLAG_SI); 3421 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); 3422 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); 3423 3424 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), 3425 NULL); 3426 *output = err ? 0 : buf.sto.txrx_equa_resp.val; 3427 3428 return err; 3429 } 3430 3431 #define FEC_REG_PORT(port) { \ 3432 FEC_CORR_LOW_REG_PORT##port, \ 3433 FEC_CORR_HIGH_REG_PORT##port, \ 3434 FEC_UNCORR_LOW_REG_PORT##port, \ 3435 FEC_UNCORR_HIGH_REG_PORT##port, \ 3436 } 3437 3438 static const u32 fec_reg[][ICE_FEC_MAX] = { 3439 FEC_REG_PORT(0), 3440 FEC_REG_PORT(1), 3441 FEC_REG_PORT(2), 3442 FEC_REG_PORT(3) 3443 }; 3444 3445 /** 3446 * ice_aq_get_fec_stats - reads fec stats from phy 3447 * @hw: pointer to the HW struct 3448 * @pcs_quad: represents pcsquad of user input serdes 3449 * @pcs_port: represents the pcs port number part of above pcs quad 3450 * @fec_type: represents FEC stats type 3451 * @output: pointer to the caller-supplied buffer to return requested fec stats 3452 * 3453 * Return: non-zero status on error and 0 on success. 3454 */ 3455 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 3456 enum ice_fec_stats_types fec_type, u32 *output) 3457 { 3458 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); 3459 struct ice_sbq_msg_input msg = {}; 3460 u32 receiver_id, reg_offset; 3461 int err; 3462 3463 if (pcs_port > 3) 3464 return -EINVAL; 3465 3466 reg_offset = fec_reg[pcs_port][fec_type]; 3467 3468 if (pcs_quad == 0) 3469 receiver_id = FEC_RECEIVER_ID_PCS0; 3470 else if (pcs_quad == 1) 3471 receiver_id = FEC_RECEIVER_ID_PCS1; 3472 else 3473 return -EINVAL; 3474 3475 msg.msg_addr_low = lower_16_bits(reg_offset); 3476 msg.msg_addr_high = receiver_id; 3477 msg.opcode = ice_sbq_msg_rd; 3478 msg.dest_dev = rmn_0; 3479 3480 err = ice_sbq_rw_reg(hw, &msg, flag); 3481 if (err) 3482 return err; 3483 3484 *output = msg.data; 3485 return 0; 3486 } 3487 3488 /** 3489 * ice_cache_phy_user_req 3490 * @pi: port information structure 3491 * @cache_data: PHY logging data 3492 * @cache_mode: PHY logging mode 3493 * 3494 * Log the user request on (FC, FEC, SPEED) for later use. 3495 */ 3496 static void 3497 ice_cache_phy_user_req(struct ice_port_info *pi, 3498 struct ice_phy_cache_mode_data cache_data, 3499 enum ice_phy_cache_mode cache_mode) 3500 { 3501 if (!pi) 3502 return; 3503 3504 switch (cache_mode) { 3505 case ICE_FC_MODE: 3506 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3507 break; 3508 case ICE_SPEED_MODE: 3509 pi->phy.curr_user_speed_req = 3510 cache_data.data.curr_user_speed_req; 3511 break; 3512 case ICE_FEC_MODE: 3513 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3514 break; 3515 default: 3516 break; 3517 } 3518 } 3519 3520 /** 3521 * ice_caps_to_fc_mode 3522 * @caps: PHY capabilities 3523 * 3524 * Convert PHY FC capabilities to ice FC mode 3525 */ 3526 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3527 { 3528 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3529 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3530 return ICE_FC_FULL; 3531 3532 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3533 return ICE_FC_TX_PAUSE; 3534 3535 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3536 return ICE_FC_RX_PAUSE; 3537 3538 return ICE_FC_NONE; 3539 } 3540 3541 /** 3542 * ice_caps_to_fec_mode 3543 * @caps: PHY capabilities 3544 * @fec_options: Link FEC options 3545 * 3546 * Convert PHY FEC capabilities to ice FEC mode 3547 */ 3548 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3549 { 3550 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3551 return ICE_FEC_AUTO; 3552 3553 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3554 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3555 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3556 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3557 return ICE_FEC_BASER; 3558 3559 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3560 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3561 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3562 return ICE_FEC_RS; 3563 3564 return ICE_FEC_NONE; 3565 } 3566 3567 /** 3568 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3569 * @pi: port information structure 3570 * @cfg: PHY configuration data to set FC mode 3571 * @req_mode: FC mode to configure 3572 */ 3573 int 3574 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3575 enum ice_fc_mode req_mode) 3576 { 3577 struct ice_phy_cache_mode_data cache_data; 3578 u8 pause_mask = 0x0; 3579 3580 if (!pi || !cfg) 3581 return -EINVAL; 3582 3583 switch (req_mode) { 3584 case ICE_FC_FULL: 3585 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3586 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3587 break; 3588 case ICE_FC_RX_PAUSE: 3589 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3590 break; 3591 case ICE_FC_TX_PAUSE: 3592 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3593 break; 3594 default: 3595 break; 3596 } 3597 3598 /* clear the old pause settings */ 3599 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3600 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3601 3602 /* set the new capabilities */ 3603 cfg->caps |= pause_mask; 3604 3605 /* Cache user FC request */ 3606 cache_data.data.curr_user_fc_req = req_mode; 3607 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3608 3609 return 0; 3610 } 3611 3612 /** 3613 * ice_set_fc 3614 * @pi: port information structure 3615 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3616 * @ena_auto_link_update: enable automatic link update 3617 * 3618 * Set the requested flow control mode. 3619 */ 3620 int 3621 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3622 { 3623 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3624 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3625 struct ice_hw *hw; 3626 int status; 3627 3628 if (!pi || !aq_failures) 3629 return -EINVAL; 3630 3631 *aq_failures = 0; 3632 hw = pi->hw; 3633 3634 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3635 if (!pcaps) 3636 return -ENOMEM; 3637 3638 /* Get the current PHY config */ 3639 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3640 pcaps, NULL); 3641 if (status) { 3642 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3643 goto out; 3644 } 3645 3646 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3647 3648 /* Configure the set PHY data */ 3649 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3650 if (status) 3651 goto out; 3652 3653 /* If the capabilities have changed, then set the new config */ 3654 if (cfg.caps != pcaps->caps) { 3655 int retry_count, retry_max = 10; 3656 3657 /* Auto restart link so settings take effect */ 3658 if (ena_auto_link_update) 3659 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3660 3661 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3662 if (status) { 3663 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3664 goto out; 3665 } 3666 3667 /* Update the link info 3668 * It sometimes takes a really long time for link to 3669 * come back from the atomic reset. Thus, we wait a 3670 * little bit. 3671 */ 3672 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3673 status = ice_update_link_info(pi); 3674 3675 if (!status) 3676 break; 3677 3678 mdelay(100); 3679 } 3680 3681 if (status) 3682 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3683 } 3684 3685 out: 3686 return status; 3687 } 3688 3689 /** 3690 * ice_phy_caps_equals_cfg 3691 * @phy_caps: PHY capabilities 3692 * @phy_cfg: PHY configuration 3693 * 3694 * Helper function to determine if PHY capabilities matches PHY 3695 * configuration 3696 */ 3697 bool 3698 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3699 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3700 { 3701 u8 caps_mask, cfg_mask; 3702 3703 if (!phy_caps || !phy_cfg) 3704 return false; 3705 3706 /* These bits are not common between capabilities and configuration. 3707 * Do not use them to determine equality. 3708 */ 3709 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3710 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3711 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3712 3713 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3714 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3715 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3716 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3717 phy_caps->eee_cap != phy_cfg->eee_cap || 3718 phy_caps->eeer_value != phy_cfg->eeer_value || 3719 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3720 return false; 3721 3722 return true; 3723 } 3724 3725 /** 3726 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3727 * @pi: port information structure 3728 * @caps: PHY ability structure to copy date from 3729 * @cfg: PHY configuration structure to copy data to 3730 * 3731 * Helper function to copy AQC PHY get ability data to PHY set configuration 3732 * data structure 3733 */ 3734 void 3735 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3736 struct ice_aqc_get_phy_caps_data *caps, 3737 struct ice_aqc_set_phy_cfg_data *cfg) 3738 { 3739 if (!pi || !caps || !cfg) 3740 return; 3741 3742 memset(cfg, 0, sizeof(*cfg)); 3743 cfg->phy_type_low = caps->phy_type_low; 3744 cfg->phy_type_high = caps->phy_type_high; 3745 cfg->caps = caps->caps; 3746 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3747 cfg->eee_cap = caps->eee_cap; 3748 cfg->eeer_value = caps->eeer_value; 3749 cfg->link_fec_opt = caps->link_fec_options; 3750 cfg->module_compliance_enforcement = 3751 caps->module_compliance_enforcement; 3752 } 3753 3754 /** 3755 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3756 * @pi: port information structure 3757 * @cfg: PHY configuration data to set FEC mode 3758 * @fec: FEC mode to configure 3759 */ 3760 int 3761 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3762 enum ice_fec_mode fec) 3763 { 3764 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3765 struct ice_hw *hw; 3766 int status; 3767 3768 if (!pi || !cfg) 3769 return -EINVAL; 3770 3771 hw = pi->hw; 3772 3773 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3774 if (!pcaps) 3775 return -ENOMEM; 3776 3777 status = ice_aq_get_phy_caps(pi, false, 3778 (ice_fw_supports_report_dflt_cfg(hw) ? 3779 ICE_AQC_REPORT_DFLT_CFG : 3780 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3781 if (status) 3782 goto out; 3783 3784 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3785 cfg->link_fec_opt = pcaps->link_fec_options; 3786 3787 switch (fec) { 3788 case ICE_FEC_BASER: 3789 /* Clear RS bits, and AND BASE-R ability 3790 * bits and OR request bits. 3791 */ 3792 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3793 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3794 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3795 ICE_AQC_PHY_FEC_25G_KR_REQ; 3796 break; 3797 case ICE_FEC_RS: 3798 /* Clear BASE-R bits, and AND RS ability 3799 * bits and OR request bits. 3800 */ 3801 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3802 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3803 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3804 break; 3805 case ICE_FEC_NONE: 3806 /* Clear all FEC option bits. */ 3807 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3808 break; 3809 case ICE_FEC_AUTO: 3810 /* AND auto FEC bit, and all caps bits. */ 3811 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3812 cfg->link_fec_opt |= pcaps->link_fec_options; 3813 break; 3814 default: 3815 status = -EINVAL; 3816 break; 3817 } 3818 3819 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3820 !ice_fw_supports_report_dflt_cfg(hw)) { 3821 struct ice_link_default_override_tlv tlv = { 0 }; 3822 3823 status = ice_get_link_default_override(&tlv, pi); 3824 if (status) 3825 goto out; 3826 3827 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3828 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3829 cfg->link_fec_opt = tlv.fec_options; 3830 } 3831 3832 out: 3833 return status; 3834 } 3835 3836 /** 3837 * ice_get_link_status - get status of the HW network link 3838 * @pi: port information structure 3839 * @link_up: pointer to bool (true/false = linkup/linkdown) 3840 * 3841 * Variable link_up is true if link is up, false if link is down. 3842 * The variable link_up is invalid if status is non zero. As a 3843 * result of this call, link status reporting becomes enabled 3844 */ 3845 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3846 { 3847 struct ice_phy_info *phy_info; 3848 int status = 0; 3849 3850 if (!pi || !link_up) 3851 return -EINVAL; 3852 3853 phy_info = &pi->phy; 3854 3855 if (phy_info->get_link_info) { 3856 status = ice_update_link_info(pi); 3857 3858 if (status) 3859 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3860 status); 3861 } 3862 3863 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3864 3865 return status; 3866 } 3867 3868 /** 3869 * ice_aq_set_link_restart_an 3870 * @pi: pointer to the port information structure 3871 * @ena_link: if true: enable link, if false: disable link 3872 * @cd: pointer to command details structure or NULL 3873 * 3874 * Sets up the link and restarts the Auto-Negotiation over the link. 3875 */ 3876 int 3877 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3878 struct ice_sq_cd *cd) 3879 { 3880 struct ice_aqc_restart_an *cmd; 3881 struct ice_aq_desc desc; 3882 3883 cmd = &desc.params.restart_an; 3884 3885 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3886 3887 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3888 cmd->lport_num = pi->lport; 3889 if (ena_link) 3890 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3891 else 3892 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3893 3894 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3895 } 3896 3897 /** 3898 * ice_aq_set_event_mask 3899 * @hw: pointer to the HW struct 3900 * @port_num: port number of the physical function 3901 * @mask: event mask to be set 3902 * @cd: pointer to command details structure or NULL 3903 * 3904 * Set event mask (0x0613) 3905 */ 3906 int 3907 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3908 struct ice_sq_cd *cd) 3909 { 3910 struct ice_aqc_set_event_mask *cmd; 3911 struct ice_aq_desc desc; 3912 3913 cmd = &desc.params.set_event_mask; 3914 3915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3916 3917 cmd->lport_num = port_num; 3918 3919 cmd->event_mask = cpu_to_le16(mask); 3920 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3921 } 3922 3923 /** 3924 * ice_aq_set_mac_loopback 3925 * @hw: pointer to the HW struct 3926 * @ena_lpbk: Enable or Disable loopback 3927 * @cd: pointer to command details structure or NULL 3928 * 3929 * Enable/disable loopback on a given port 3930 */ 3931 int 3932 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3933 { 3934 struct ice_aqc_set_mac_lb *cmd; 3935 struct ice_aq_desc desc; 3936 3937 cmd = &desc.params.set_mac_lb; 3938 3939 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3940 if (ena_lpbk) 3941 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3942 3943 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3944 } 3945 3946 /** 3947 * ice_aq_set_port_id_led 3948 * @pi: pointer to the port information 3949 * @is_orig_mode: is this LED set to original mode (by the net-list) 3950 * @cd: pointer to command details structure or NULL 3951 * 3952 * Set LED value for the given port (0x06e9) 3953 */ 3954 int 3955 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3956 struct ice_sq_cd *cd) 3957 { 3958 struct ice_aqc_set_port_id_led *cmd; 3959 struct ice_hw *hw = pi->hw; 3960 struct ice_aq_desc desc; 3961 3962 cmd = &desc.params.set_port_id_led; 3963 3964 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3965 3966 if (is_orig_mode) 3967 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3968 else 3969 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3970 3971 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3972 } 3973 3974 /** 3975 * ice_aq_get_port_options 3976 * @hw: pointer to the HW struct 3977 * @options: buffer for the resultant port options 3978 * @option_count: input - size of the buffer in port options structures, 3979 * output - number of returned port options 3980 * @lport: logical port to call the command with (optional) 3981 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3982 * when PF owns more than 1 port it must be true 3983 * @active_option_idx: index of active port option in returned buffer 3984 * @active_option_valid: active option in returned buffer is valid 3985 * @pending_option_idx: index of pending port option in returned buffer 3986 * @pending_option_valid: pending option in returned buffer is valid 3987 * 3988 * Calls Get Port Options AQC (0x06ea) and verifies result. 3989 */ 3990 int 3991 ice_aq_get_port_options(struct ice_hw *hw, 3992 struct ice_aqc_get_port_options_elem *options, 3993 u8 *option_count, u8 lport, bool lport_valid, 3994 u8 *active_option_idx, bool *active_option_valid, 3995 u8 *pending_option_idx, bool *pending_option_valid) 3996 { 3997 struct ice_aqc_get_port_options *cmd; 3998 struct ice_aq_desc desc; 3999 int status; 4000 u8 i; 4001 4002 /* options buffer shall be able to hold max returned options */ 4003 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 4004 return -EINVAL; 4005 4006 cmd = &desc.params.get_port_options; 4007 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 4008 4009 if (lport_valid) 4010 cmd->lport_num = lport; 4011 cmd->lport_num_valid = lport_valid; 4012 4013 status = ice_aq_send_cmd(hw, &desc, options, 4014 *option_count * sizeof(*options), NULL); 4015 if (status) 4016 return status; 4017 4018 /* verify direct FW response & set output parameters */ 4019 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 4020 cmd->port_options_count); 4021 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 4022 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 4023 cmd->port_options); 4024 if (*active_option_valid) { 4025 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 4026 cmd->port_options); 4027 if (*active_option_idx > (*option_count - 1)) 4028 return -EIO; 4029 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 4030 *active_option_idx); 4031 } 4032 4033 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 4034 cmd->pending_port_option_status); 4035 if (*pending_option_valid) { 4036 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4037 cmd->pending_port_option_status); 4038 if (*pending_option_idx > (*option_count - 1)) 4039 return -EIO; 4040 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4041 *pending_option_idx); 4042 } 4043 4044 /* mask output options fields */ 4045 for (i = 0; i < *option_count; i++) { 4046 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4047 options[i].pmd); 4048 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4049 options[i].max_lane_speed); 4050 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4051 options[i].pmd, options[i].max_lane_speed); 4052 } 4053 4054 return 0; 4055 } 4056 4057 /** 4058 * ice_aq_set_port_option 4059 * @hw: pointer to the HW struct 4060 * @lport: logical port to call the command with 4061 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4062 * when PF owns more than 1 port it must be true 4063 * @new_option: new port option to be written 4064 * 4065 * Calls Set Port Options AQC (0x06eb). 4066 */ 4067 int 4068 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4069 u8 new_option) 4070 { 4071 struct ice_aqc_set_port_option *cmd; 4072 struct ice_aq_desc desc; 4073 4074 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4075 return -EINVAL; 4076 4077 cmd = &desc.params.set_port_option; 4078 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4079 4080 if (lport_valid) 4081 cmd->lport_num = lport; 4082 4083 cmd->lport_num_valid = lport_valid; 4084 cmd->selected_port_option = new_option; 4085 4086 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4087 } 4088 4089 /** 4090 * ice_aq_sff_eeprom 4091 * @hw: pointer to the HW struct 4092 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4093 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4094 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4095 * @page: QSFP page 4096 * @set_page: set or ignore the page 4097 * @data: pointer to data buffer to be read/written to the I2C device. 4098 * @length: 1-16 for read, 1 for write. 4099 * @write: 0 read, 1 for write. 4100 * @cd: pointer to command details structure or NULL 4101 * 4102 * Read/Write SFF EEPROM (0x06EE) 4103 */ 4104 int 4105 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4106 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4107 bool write, struct ice_sq_cd *cd) 4108 { 4109 struct ice_aqc_sff_eeprom *cmd; 4110 struct ice_aq_desc desc; 4111 u16 i2c_bus_addr; 4112 int status; 4113 4114 if (!data || (mem_addr & 0xff00)) 4115 return -EINVAL; 4116 4117 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4118 cmd = &desc.params.read_write_sff_param; 4119 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4120 cmd->lport_num = (u8)(lport & 0xff); 4121 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4122 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 4123 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 4124 if (write) 4125 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 4126 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 4127 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4128 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 4129 4130 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4131 return status; 4132 } 4133 4134 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4135 { 4136 switch (type) { 4137 case ICE_LUT_VSI: 4138 return ICE_LUT_VSI_SIZE; 4139 case ICE_LUT_GLOBAL: 4140 return ICE_LUT_GLOBAL_SIZE; 4141 case ICE_LUT_PF: 4142 return ICE_LUT_PF_SIZE; 4143 } 4144 WARN_ONCE(1, "incorrect type passed"); 4145 return ICE_LUT_VSI_SIZE; 4146 } 4147 4148 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4149 { 4150 switch (size) { 4151 case ICE_LUT_VSI_SIZE: 4152 return ICE_AQC_LUT_SIZE_SMALL; 4153 case ICE_LUT_GLOBAL_SIZE: 4154 return ICE_AQC_LUT_SIZE_512; 4155 case ICE_LUT_PF_SIZE: 4156 return ICE_AQC_LUT_SIZE_2K; 4157 } 4158 WARN_ONCE(1, "incorrect size passed"); 4159 return 0; 4160 } 4161 4162 /** 4163 * __ice_aq_get_set_rss_lut 4164 * @hw: pointer to the hardware structure 4165 * @params: RSS LUT parameters 4166 * @set: set true to set the table, false to get the table 4167 * 4168 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4169 */ 4170 static int 4171 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4172 struct ice_aq_get_set_rss_lut_params *params, bool set) 4173 { 4174 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4175 enum ice_lut_type lut_type = params->lut_type; 4176 struct ice_aqc_get_set_rss_lut *desc_params; 4177 enum ice_aqc_lut_flags flags; 4178 enum ice_lut_size lut_size; 4179 struct ice_aq_desc desc; 4180 u8 *lut = params->lut; 4181 4182 4183 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4184 return -EINVAL; 4185 4186 lut_size = ice_lut_type_to_size(lut_type); 4187 if (lut_size > params->lut_size) 4188 return -EINVAL; 4189 else if (set && lut_size != params->lut_size) 4190 return -EINVAL; 4191 4192 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4193 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4194 if (set) 4195 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4196 4197 desc_params = &desc.params.get_set_rss_lut; 4198 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4199 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4200 4201 if (lut_type == ICE_LUT_GLOBAL) 4202 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4203 params->global_lut_id); 4204 4205 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4206 desc_params->flags = cpu_to_le16(flags); 4207 4208 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4209 } 4210 4211 /** 4212 * ice_aq_get_rss_lut 4213 * @hw: pointer to the hardware structure 4214 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4215 * 4216 * get the RSS lookup table, PF or VSI type 4217 */ 4218 int 4219 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4220 { 4221 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4222 } 4223 4224 /** 4225 * ice_aq_set_rss_lut 4226 * @hw: pointer to the hardware structure 4227 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4228 * 4229 * set the RSS lookup table, PF or VSI type 4230 */ 4231 int 4232 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4233 { 4234 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4235 } 4236 4237 /** 4238 * __ice_aq_get_set_rss_key 4239 * @hw: pointer to the HW struct 4240 * @vsi_id: VSI FW index 4241 * @key: pointer to key info struct 4242 * @set: set true to set the key, false to get the key 4243 * 4244 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4245 */ 4246 static int 4247 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4248 struct ice_aqc_get_set_rss_keys *key, bool set) 4249 { 4250 struct ice_aqc_get_set_rss_key *desc_params; 4251 u16 key_size = sizeof(*key); 4252 struct ice_aq_desc desc; 4253 4254 if (set) { 4255 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4256 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4257 } else { 4258 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4259 } 4260 4261 desc_params = &desc.params.get_set_rss_key; 4262 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4263 4264 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4265 } 4266 4267 /** 4268 * ice_aq_get_rss_key 4269 * @hw: pointer to the HW struct 4270 * @vsi_handle: software VSI handle 4271 * @key: pointer to key info struct 4272 * 4273 * get the RSS key per VSI 4274 */ 4275 int 4276 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4277 struct ice_aqc_get_set_rss_keys *key) 4278 { 4279 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4280 return -EINVAL; 4281 4282 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4283 key, false); 4284 } 4285 4286 /** 4287 * ice_aq_set_rss_key 4288 * @hw: pointer to the HW struct 4289 * @vsi_handle: software VSI handle 4290 * @keys: pointer to key info struct 4291 * 4292 * set the RSS key per VSI 4293 */ 4294 int 4295 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4296 struct ice_aqc_get_set_rss_keys *keys) 4297 { 4298 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4299 return -EINVAL; 4300 4301 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4302 keys, true); 4303 } 4304 4305 /** 4306 * ice_aq_add_lan_txq 4307 * @hw: pointer to the hardware structure 4308 * @num_qgrps: Number of added queue groups 4309 * @qg_list: list of queue groups to be added 4310 * @buf_size: size of buffer for indirect command 4311 * @cd: pointer to command details structure or NULL 4312 * 4313 * Add Tx LAN queue (0x0C30) 4314 * 4315 * NOTE: 4316 * Prior to calling add Tx LAN queue: 4317 * Initialize the following as part of the Tx queue context: 4318 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4319 * Cache profile and Packet shaper profile. 4320 * 4321 * After add Tx LAN queue AQ command is completed: 4322 * Interrupts should be associated with specific queues, 4323 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4324 * flow. 4325 */ 4326 static int 4327 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4328 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4329 struct ice_sq_cd *cd) 4330 { 4331 struct ice_aqc_add_tx_qgrp *list; 4332 struct ice_aqc_add_txqs *cmd; 4333 struct ice_aq_desc desc; 4334 u16 i, sum_size = 0; 4335 4336 cmd = &desc.params.add_txqs; 4337 4338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4339 4340 if (!qg_list) 4341 return -EINVAL; 4342 4343 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4344 return -EINVAL; 4345 4346 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4347 sum_size += struct_size(list, txqs, list->num_txqs); 4348 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4349 list->num_txqs); 4350 } 4351 4352 if (buf_size != sum_size) 4353 return -EINVAL; 4354 4355 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4356 4357 cmd->num_qgrps = num_qgrps; 4358 4359 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4360 } 4361 4362 /** 4363 * ice_aq_dis_lan_txq 4364 * @hw: pointer to the hardware structure 4365 * @num_qgrps: number of groups in the list 4366 * @qg_list: the list of groups to disable 4367 * @buf_size: the total size of the qg_list buffer in bytes 4368 * @rst_src: if called due to reset, specifies the reset source 4369 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4370 * @cd: pointer to command details structure or NULL 4371 * 4372 * Disable LAN Tx queue (0x0C31) 4373 */ 4374 static int 4375 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4376 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4377 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4378 struct ice_sq_cd *cd) 4379 { 4380 struct ice_aqc_dis_txq_item *item; 4381 struct ice_aqc_dis_txqs *cmd; 4382 struct ice_aq_desc desc; 4383 u16 vmvf_and_timeout; 4384 u16 i, sz = 0; 4385 int status; 4386 4387 cmd = &desc.params.dis_txqs; 4388 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4389 4390 /* qg_list can be NULL only in VM/VF reset flow */ 4391 if (!qg_list && !rst_src) 4392 return -EINVAL; 4393 4394 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4395 return -EINVAL; 4396 4397 cmd->num_entries = num_qgrps; 4398 4399 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4400 4401 switch (rst_src) { 4402 case ICE_VM_RESET: 4403 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4404 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4405 break; 4406 case ICE_VF_RESET: 4407 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4408 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4409 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4410 ICE_AQC_Q_DIS_VMVF_NUM_M; 4411 break; 4412 case ICE_NO_RESET: 4413 default: 4414 break; 4415 } 4416 4417 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4418 4419 /* flush pipe on time out */ 4420 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4421 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4422 if (!qg_list) 4423 goto do_aq; 4424 4425 /* set RD bit to indicate that command buffer is provided by the driver 4426 * and it needs to be read by the firmware 4427 */ 4428 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4429 4430 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4431 u16 item_size = struct_size(item, q_id, item->num_qs); 4432 4433 /* If the num of queues is even, add 2 bytes of padding */ 4434 if ((item->num_qs % 2) == 0) 4435 item_size += 2; 4436 4437 sz += item_size; 4438 4439 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4440 } 4441 4442 if (buf_size != sz) 4443 return -EINVAL; 4444 4445 do_aq: 4446 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4447 if (status) { 4448 if (!qg_list) 4449 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4450 vmvf_num, hw->adminq.sq_last_status); 4451 else 4452 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4453 le16_to_cpu(qg_list[0].q_id[0]), 4454 hw->adminq.sq_last_status); 4455 } 4456 return status; 4457 } 4458 4459 /** 4460 * ice_aq_cfg_lan_txq 4461 * @hw: pointer to the hardware structure 4462 * @buf: buffer for command 4463 * @buf_size: size of buffer in bytes 4464 * @num_qs: number of queues being configured 4465 * @oldport: origination lport 4466 * @newport: destination lport 4467 * @cd: pointer to command details structure or NULL 4468 * 4469 * Move/Configure LAN Tx queue (0x0C32) 4470 * 4471 * There is a better AQ command to use for moving nodes, so only coding 4472 * this one for configuring the node. 4473 */ 4474 int 4475 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4476 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4477 struct ice_sq_cd *cd) 4478 { 4479 struct ice_aqc_cfg_txqs *cmd; 4480 struct ice_aq_desc desc; 4481 int status; 4482 4483 cmd = &desc.params.cfg_txqs; 4484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4485 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4486 4487 if (!buf) 4488 return -EINVAL; 4489 4490 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4491 cmd->num_qs = num_qs; 4492 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4493 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4494 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4495 cmd->blocked_cgds = 0; 4496 4497 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4498 if (status) 4499 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4500 hw->adminq.sq_last_status); 4501 return status; 4502 } 4503 4504 /** 4505 * ice_aq_add_rdma_qsets 4506 * @hw: pointer to the hardware structure 4507 * @num_qset_grps: Number of RDMA Qset groups 4508 * @qset_list: list of Qset groups to be added 4509 * @buf_size: size of buffer for indirect command 4510 * @cd: pointer to command details structure or NULL 4511 * 4512 * Add Tx RDMA Qsets (0x0C33) 4513 */ 4514 static int 4515 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4516 struct ice_aqc_add_rdma_qset_data *qset_list, 4517 u16 buf_size, struct ice_sq_cd *cd) 4518 { 4519 struct ice_aqc_add_rdma_qset_data *list; 4520 struct ice_aqc_add_rdma_qset *cmd; 4521 struct ice_aq_desc desc; 4522 u16 i, sum_size = 0; 4523 4524 cmd = &desc.params.add_rdma_qset; 4525 4526 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4527 4528 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4529 return -EINVAL; 4530 4531 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4532 u16 num_qsets = le16_to_cpu(list->num_qsets); 4533 4534 sum_size += struct_size(list, rdma_qsets, num_qsets); 4535 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4536 num_qsets); 4537 } 4538 4539 if (buf_size != sum_size) 4540 return -EINVAL; 4541 4542 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4543 4544 cmd->num_qset_grps = num_qset_grps; 4545 4546 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4547 } 4548 4549 /* End of FW Admin Queue command wrappers */ 4550 4551 /** 4552 * ice_pack_ctx_byte - write a byte to a packed context structure 4553 * @src_ctx: unpacked source context structure 4554 * @dest_ctx: packed destination context data 4555 * @ce_info: context element description 4556 */ 4557 static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, 4558 const struct ice_ctx_ele *ce_info) 4559 { 4560 u8 src_byte, dest_byte, mask; 4561 u8 *from, *dest; 4562 u16 shift_width; 4563 4564 /* copy from the next struct field */ 4565 from = src_ctx + ce_info->offset; 4566 4567 /* prepare the bits and mask */ 4568 shift_width = ce_info->lsb % 8; 4569 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4570 4571 src_byte = *from; 4572 src_byte <<= shift_width; 4573 src_byte &= mask; 4574 4575 /* get the current bits from the target bit string */ 4576 dest = dest_ctx + (ce_info->lsb / 8); 4577 4578 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4579 4580 dest_byte &= ~mask; /* get the bits not changing */ 4581 dest_byte |= src_byte; /* add in the new bits */ 4582 4583 /* put it all back */ 4584 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4585 } 4586 4587 /** 4588 * ice_pack_ctx_word - write a word to a packed context structure 4589 * @src_ctx: unpacked source context structure 4590 * @dest_ctx: packed destination context data 4591 * @ce_info: context element description 4592 */ 4593 static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, 4594 const struct ice_ctx_ele *ce_info) 4595 { 4596 u16 src_word, mask; 4597 __le16 dest_word; 4598 u8 *from, *dest; 4599 u16 shift_width; 4600 4601 /* copy from the next struct field */ 4602 from = src_ctx + ce_info->offset; 4603 4604 /* prepare the bits and mask */ 4605 shift_width = ce_info->lsb % 8; 4606 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4607 4608 /* don't swizzle the bits until after the mask because the mask bits 4609 * will be in a different bit position on big endian machines 4610 */ 4611 src_word = *(u16 *)from; 4612 src_word <<= shift_width; 4613 src_word &= mask; 4614 4615 /* get the current bits from the target bit string */ 4616 dest = dest_ctx + (ce_info->lsb / 8); 4617 4618 memcpy(&dest_word, dest, sizeof(dest_word)); 4619 4620 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4621 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4622 4623 /* put it all back */ 4624 memcpy(dest, &dest_word, sizeof(dest_word)); 4625 } 4626 4627 /** 4628 * ice_pack_ctx_dword - write a dword to a packed context structure 4629 * @src_ctx: unpacked source context structure 4630 * @dest_ctx: packed destination context data 4631 * @ce_info: context element description 4632 */ 4633 static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, 4634 const struct ice_ctx_ele *ce_info) 4635 { 4636 u32 src_dword, mask; 4637 __le32 dest_dword; 4638 u8 *from, *dest; 4639 u16 shift_width; 4640 4641 /* copy from the next struct field */ 4642 from = src_ctx + ce_info->offset; 4643 4644 /* prepare the bits and mask */ 4645 shift_width = ce_info->lsb % 8; 4646 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4647 4648 /* don't swizzle the bits until after the mask because the mask bits 4649 * will be in a different bit position on big endian machines 4650 */ 4651 src_dword = *(u32 *)from; 4652 src_dword <<= shift_width; 4653 src_dword &= mask; 4654 4655 /* get the current bits from the target bit string */ 4656 dest = dest_ctx + (ce_info->lsb / 8); 4657 4658 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4659 4660 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4661 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4662 4663 /* put it all back */ 4664 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4665 } 4666 4667 /** 4668 * ice_pack_ctx_qword - write a qword to a packed context structure 4669 * @src_ctx: unpacked source context structure 4670 * @dest_ctx: packed destination context data 4671 * @ce_info: context element description 4672 */ 4673 static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, 4674 const struct ice_ctx_ele *ce_info) 4675 { 4676 u64 src_qword, mask; 4677 __le64 dest_qword; 4678 u8 *from, *dest; 4679 u16 shift_width; 4680 4681 /* copy from the next struct field */ 4682 from = src_ctx + ce_info->offset; 4683 4684 /* prepare the bits and mask */ 4685 shift_width = ce_info->lsb % 8; 4686 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); 4687 4688 /* don't swizzle the bits until after the mask because the mask bits 4689 * will be in a different bit position on big endian machines 4690 */ 4691 src_qword = *(u64 *)from; 4692 src_qword <<= shift_width; 4693 src_qword &= mask; 4694 4695 /* get the current bits from the target bit string */ 4696 dest = dest_ctx + (ce_info->lsb / 8); 4697 4698 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4699 4700 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4701 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4702 4703 /* put it all back */ 4704 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4705 } 4706 4707 /** 4708 * ice_set_ctx - set context bits in packed structure 4709 * @hw: pointer to the hardware structure 4710 * @src_ctx: pointer to a generic non-packed context structure 4711 * @dest_ctx: pointer to memory for the packed structure 4712 * @ce_info: List of Rx context elements 4713 */ 4714 int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4715 const struct ice_ctx_ele *ce_info) 4716 { 4717 int f; 4718 4719 for (f = 0; ce_info[f].width; f++) { 4720 /* We have to deal with each element of the FW response 4721 * using the correct size so that we are correct regardless 4722 * of the endianness of the machine. 4723 */ 4724 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4725 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4726 f, ce_info[f].width, ce_info[f].size_of); 4727 continue; 4728 } 4729 switch (ce_info[f].size_of) { 4730 case sizeof(u8): 4731 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); 4732 break; 4733 case sizeof(u16): 4734 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); 4735 break; 4736 case sizeof(u32): 4737 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); 4738 break; 4739 case sizeof(u64): 4740 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); 4741 break; 4742 default: 4743 return -EINVAL; 4744 } 4745 } 4746 4747 return 0; 4748 } 4749 4750 /** 4751 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4752 * @hw: pointer to the HW struct 4753 * @vsi_handle: software VSI handle 4754 * @tc: TC number 4755 * @q_handle: software queue handle 4756 */ 4757 struct ice_q_ctx * 4758 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4759 { 4760 struct ice_vsi_ctx *vsi; 4761 struct ice_q_ctx *q_ctx; 4762 4763 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4764 if (!vsi) 4765 return NULL; 4766 if (q_handle >= vsi->num_lan_q_entries[tc]) 4767 return NULL; 4768 if (!vsi->lan_q_ctx[tc]) 4769 return NULL; 4770 q_ctx = vsi->lan_q_ctx[tc]; 4771 return &q_ctx[q_handle]; 4772 } 4773 4774 /** 4775 * ice_ena_vsi_txq 4776 * @pi: port information structure 4777 * @vsi_handle: software VSI handle 4778 * @tc: TC number 4779 * @q_handle: software queue handle 4780 * @num_qgrps: Number of added queue groups 4781 * @buf: list of queue groups to be added 4782 * @buf_size: size of buffer for indirect command 4783 * @cd: pointer to command details structure or NULL 4784 * 4785 * This function adds one LAN queue 4786 */ 4787 int 4788 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4789 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4790 struct ice_sq_cd *cd) 4791 { 4792 struct ice_aqc_txsched_elem_data node = { 0 }; 4793 struct ice_sched_node *parent; 4794 struct ice_q_ctx *q_ctx; 4795 struct ice_hw *hw; 4796 int status; 4797 4798 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4799 return -EIO; 4800 4801 if (num_qgrps > 1 || buf->num_txqs > 1) 4802 return -ENOSPC; 4803 4804 hw = pi->hw; 4805 4806 if (!ice_is_vsi_valid(hw, vsi_handle)) 4807 return -EINVAL; 4808 4809 mutex_lock(&pi->sched_lock); 4810 4811 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4812 if (!q_ctx) { 4813 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4814 q_handle); 4815 status = -EINVAL; 4816 goto ena_txq_exit; 4817 } 4818 4819 /* find a parent node */ 4820 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4821 ICE_SCHED_NODE_OWNER_LAN); 4822 if (!parent) { 4823 status = -EINVAL; 4824 goto ena_txq_exit; 4825 } 4826 4827 buf->parent_teid = parent->info.node_teid; 4828 node.parent_teid = parent->info.node_teid; 4829 /* Mark that the values in the "generic" section as valid. The default 4830 * value in the "generic" section is zero. This means that : 4831 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4832 * - 0 priority among siblings, indicated by Bit 1-3. 4833 * - WFQ, indicated by Bit 4. 4834 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4835 * Bit 5-6. 4836 * - Bit 7 is reserved. 4837 * Without setting the generic section as valid in valid_sections, the 4838 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4839 */ 4840 buf->txqs[0].info.valid_sections = 4841 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4842 ICE_AQC_ELEM_VALID_EIR; 4843 buf->txqs[0].info.generic = 0; 4844 buf->txqs[0].info.cir_bw.bw_profile_idx = 4845 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4846 buf->txqs[0].info.cir_bw.bw_alloc = 4847 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4848 buf->txqs[0].info.eir_bw.bw_profile_idx = 4849 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4850 buf->txqs[0].info.eir_bw.bw_alloc = 4851 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4852 4853 /* add the LAN queue */ 4854 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4855 if (status) { 4856 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4857 le16_to_cpu(buf->txqs[0].txq_id), 4858 hw->adminq.sq_last_status); 4859 goto ena_txq_exit; 4860 } 4861 4862 node.node_teid = buf->txqs[0].q_teid; 4863 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4864 q_ctx->q_handle = q_handle; 4865 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4866 4867 /* add a leaf node into scheduler tree queue layer */ 4868 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4869 if (!status) 4870 status = ice_sched_replay_q_bw(pi, q_ctx); 4871 4872 ena_txq_exit: 4873 mutex_unlock(&pi->sched_lock); 4874 return status; 4875 } 4876 4877 /** 4878 * ice_dis_vsi_txq 4879 * @pi: port information structure 4880 * @vsi_handle: software VSI handle 4881 * @tc: TC number 4882 * @num_queues: number of queues 4883 * @q_handles: pointer to software queue handle array 4884 * @q_ids: pointer to the q_id array 4885 * @q_teids: pointer to queue node teids 4886 * @rst_src: if called due to reset, specifies the reset source 4887 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4888 * @cd: pointer to command details structure or NULL 4889 * 4890 * This function removes queues and their corresponding nodes in SW DB 4891 */ 4892 int 4893 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4894 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4895 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4896 struct ice_sq_cd *cd) 4897 { 4898 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4899 u16 i, buf_size = __struct_size(qg_list); 4900 struct ice_q_ctx *q_ctx; 4901 int status = -ENOENT; 4902 struct ice_hw *hw; 4903 4904 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4905 return -EIO; 4906 4907 hw = pi->hw; 4908 4909 if (!num_queues) { 4910 /* if queue is disabled already yet the disable queue command 4911 * has to be sent to complete the VF reset, then call 4912 * ice_aq_dis_lan_txq without any queue information 4913 */ 4914 if (rst_src) 4915 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4916 vmvf_num, NULL); 4917 return -EIO; 4918 } 4919 4920 mutex_lock(&pi->sched_lock); 4921 4922 for (i = 0; i < num_queues; i++) { 4923 struct ice_sched_node *node; 4924 4925 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4926 if (!node) 4927 continue; 4928 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4929 if (!q_ctx) { 4930 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4931 q_handles[i]); 4932 continue; 4933 } 4934 if (q_ctx->q_handle != q_handles[i]) { 4935 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4936 q_ctx->q_handle, q_handles[i]); 4937 continue; 4938 } 4939 qg_list->parent_teid = node->info.parent_teid; 4940 qg_list->num_qs = 1; 4941 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4942 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4943 vmvf_num, cd); 4944 4945 if (status) 4946 break; 4947 ice_free_sched_node(pi, node); 4948 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4949 q_ctx->q_teid = ICE_INVAL_TEID; 4950 } 4951 mutex_unlock(&pi->sched_lock); 4952 return status; 4953 } 4954 4955 /** 4956 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4957 * @pi: port information structure 4958 * @vsi_handle: software VSI handle 4959 * @tc_bitmap: TC bitmap 4960 * @maxqs: max queues array per TC 4961 * @owner: LAN or RDMA 4962 * 4963 * This function adds/updates the VSI queues per TC. 4964 */ 4965 static int 4966 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4967 u16 *maxqs, u8 owner) 4968 { 4969 int status = 0; 4970 u8 i; 4971 4972 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4973 return -EIO; 4974 4975 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4976 return -EINVAL; 4977 4978 mutex_lock(&pi->sched_lock); 4979 4980 ice_for_each_traffic_class(i) { 4981 /* configuration is possible only if TC node is present */ 4982 if (!ice_sched_get_tc_node(pi, i)) 4983 continue; 4984 4985 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4986 ice_is_tc_ena(tc_bitmap, i)); 4987 if (status) 4988 break; 4989 } 4990 4991 mutex_unlock(&pi->sched_lock); 4992 return status; 4993 } 4994 4995 /** 4996 * ice_cfg_vsi_lan - configure VSI LAN queues 4997 * @pi: port information structure 4998 * @vsi_handle: software VSI handle 4999 * @tc_bitmap: TC bitmap 5000 * @max_lanqs: max LAN queues array per TC 5001 * 5002 * This function adds/updates the VSI LAN queues per TC. 5003 */ 5004 int 5005 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 5006 u16 *max_lanqs) 5007 { 5008 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 5009 ICE_SCHED_NODE_OWNER_LAN); 5010 } 5011 5012 /** 5013 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 5014 * @pi: port information structure 5015 * @vsi_handle: software VSI handle 5016 * @tc_bitmap: TC bitmap 5017 * @max_rdmaqs: max RDMA queues array per TC 5018 * 5019 * This function adds/updates the VSI RDMA queues per TC. 5020 */ 5021 int 5022 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5023 u16 *max_rdmaqs) 5024 { 5025 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5026 ICE_SCHED_NODE_OWNER_RDMA); 5027 } 5028 5029 /** 5030 * ice_ena_vsi_rdma_qset 5031 * @pi: port information structure 5032 * @vsi_handle: software VSI handle 5033 * @tc: TC number 5034 * @rdma_qset: pointer to RDMA Qset 5035 * @num_qsets: number of RDMA Qsets 5036 * @qset_teid: pointer to Qset node TEIDs 5037 * 5038 * This function adds RDMA Qset 5039 */ 5040 int 5041 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5042 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5043 { 5044 struct ice_aqc_txsched_elem_data node = { 0 }; 5045 struct ice_aqc_add_rdma_qset_data *buf; 5046 struct ice_sched_node *parent; 5047 struct ice_hw *hw; 5048 u16 i, buf_size; 5049 int ret; 5050 5051 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5052 return -EIO; 5053 hw = pi->hw; 5054 5055 if (!ice_is_vsi_valid(hw, vsi_handle)) 5056 return -EINVAL; 5057 5058 buf_size = struct_size(buf, rdma_qsets, num_qsets); 5059 buf = kzalloc(buf_size, GFP_KERNEL); 5060 if (!buf) 5061 return -ENOMEM; 5062 mutex_lock(&pi->sched_lock); 5063 5064 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5065 ICE_SCHED_NODE_OWNER_RDMA); 5066 if (!parent) { 5067 ret = -EINVAL; 5068 goto rdma_error_exit; 5069 } 5070 buf->parent_teid = parent->info.node_teid; 5071 node.parent_teid = parent->info.node_teid; 5072 5073 buf->num_qsets = cpu_to_le16(num_qsets); 5074 for (i = 0; i < num_qsets; i++) { 5075 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5076 buf->rdma_qsets[i].info.valid_sections = 5077 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5078 ICE_AQC_ELEM_VALID_EIR; 5079 buf->rdma_qsets[i].info.generic = 0; 5080 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5081 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5082 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5083 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5084 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5085 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5086 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5087 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5088 } 5089 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5090 if (ret) { 5091 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5092 goto rdma_error_exit; 5093 } 5094 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5095 for (i = 0; i < num_qsets; i++) { 5096 node.node_teid = buf->rdma_qsets[i].qset_teid; 5097 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5098 &node, NULL); 5099 if (ret) 5100 break; 5101 qset_teid[i] = le32_to_cpu(node.node_teid); 5102 } 5103 rdma_error_exit: 5104 mutex_unlock(&pi->sched_lock); 5105 kfree(buf); 5106 return ret; 5107 } 5108 5109 /** 5110 * ice_dis_vsi_rdma_qset - free RDMA resources 5111 * @pi: port_info struct 5112 * @count: number of RDMA Qsets to free 5113 * @qset_teid: TEID of Qset node 5114 * @q_id: list of queue IDs being disabled 5115 */ 5116 int 5117 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5118 u16 *q_id) 5119 { 5120 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5121 u16 qg_size = __struct_size(qg_list); 5122 struct ice_hw *hw; 5123 int status = 0; 5124 int i; 5125 5126 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5127 return -EIO; 5128 5129 hw = pi->hw; 5130 5131 mutex_lock(&pi->sched_lock); 5132 5133 for (i = 0; i < count; i++) { 5134 struct ice_sched_node *node; 5135 5136 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5137 if (!node) 5138 continue; 5139 5140 qg_list->parent_teid = node->info.parent_teid; 5141 qg_list->num_qs = 1; 5142 qg_list->q_id[0] = 5143 cpu_to_le16(q_id[i] | 5144 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5145 5146 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5147 ICE_NO_RESET, 0, NULL); 5148 if (status) 5149 break; 5150 5151 ice_free_sched_node(pi, node); 5152 } 5153 5154 mutex_unlock(&pi->sched_lock); 5155 return status; 5156 } 5157 5158 /** 5159 * ice_aq_get_cgu_abilities - get cgu abilities 5160 * @hw: pointer to the HW struct 5161 * @abilities: CGU abilities 5162 * 5163 * Get CGU abilities (0x0C61) 5164 * Return: 0 on success or negative value on failure. 5165 */ 5166 int 5167 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5168 struct ice_aqc_get_cgu_abilities *abilities) 5169 { 5170 struct ice_aq_desc desc; 5171 5172 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5173 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5174 } 5175 5176 /** 5177 * ice_aq_set_input_pin_cfg - set input pin config 5178 * @hw: pointer to the HW struct 5179 * @input_idx: Input index 5180 * @flags1: Input flags 5181 * @flags2: Input flags 5182 * @freq: Frequency in Hz 5183 * @phase_delay: Delay in ps 5184 * 5185 * Set CGU input config (0x0C62) 5186 * Return: 0 on success or negative value on failure. 5187 */ 5188 int 5189 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5190 u32 freq, s32 phase_delay) 5191 { 5192 struct ice_aqc_set_cgu_input_config *cmd; 5193 struct ice_aq_desc desc; 5194 5195 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5196 cmd = &desc.params.set_cgu_input_config; 5197 cmd->input_idx = input_idx; 5198 cmd->flags1 = flags1; 5199 cmd->flags2 = flags2; 5200 cmd->freq = cpu_to_le32(freq); 5201 cmd->phase_delay = cpu_to_le32(phase_delay); 5202 5203 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5204 } 5205 5206 /** 5207 * ice_aq_get_input_pin_cfg - get input pin config 5208 * @hw: pointer to the HW struct 5209 * @input_idx: Input index 5210 * @status: Pin status 5211 * @type: Pin type 5212 * @flags1: Input flags 5213 * @flags2: Input flags 5214 * @freq: Frequency in Hz 5215 * @phase_delay: Delay in ps 5216 * 5217 * Get CGU input config (0x0C63) 5218 * Return: 0 on success or negative value on failure. 5219 */ 5220 int 5221 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5222 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5223 { 5224 struct ice_aqc_get_cgu_input_config *cmd; 5225 struct ice_aq_desc desc; 5226 int ret; 5227 5228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5229 cmd = &desc.params.get_cgu_input_config; 5230 cmd->input_idx = input_idx; 5231 5232 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5233 if (!ret) { 5234 if (status) 5235 *status = cmd->status; 5236 if (type) 5237 *type = cmd->type; 5238 if (flags1) 5239 *flags1 = cmd->flags1; 5240 if (flags2) 5241 *flags2 = cmd->flags2; 5242 if (freq) 5243 *freq = le32_to_cpu(cmd->freq); 5244 if (phase_delay) 5245 *phase_delay = le32_to_cpu(cmd->phase_delay); 5246 } 5247 5248 return ret; 5249 } 5250 5251 /** 5252 * ice_aq_set_output_pin_cfg - set output pin config 5253 * @hw: pointer to the HW struct 5254 * @output_idx: Output index 5255 * @flags: Output flags 5256 * @src_sel: Index of DPLL block 5257 * @freq: Output frequency 5258 * @phase_delay: Output phase compensation 5259 * 5260 * Set CGU output config (0x0C64) 5261 * Return: 0 on success or negative value on failure. 5262 */ 5263 int 5264 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5265 u8 src_sel, u32 freq, s32 phase_delay) 5266 { 5267 struct ice_aqc_set_cgu_output_config *cmd; 5268 struct ice_aq_desc desc; 5269 5270 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5271 cmd = &desc.params.set_cgu_output_config; 5272 cmd->output_idx = output_idx; 5273 cmd->flags = flags; 5274 cmd->src_sel = src_sel; 5275 cmd->freq = cpu_to_le32(freq); 5276 cmd->phase_delay = cpu_to_le32(phase_delay); 5277 5278 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5279 } 5280 5281 /** 5282 * ice_aq_get_output_pin_cfg - get output pin config 5283 * @hw: pointer to the HW struct 5284 * @output_idx: Output index 5285 * @flags: Output flags 5286 * @src_sel: Internal DPLL source 5287 * @freq: Output frequency 5288 * @src_freq: Source frequency 5289 * 5290 * Get CGU output config (0x0C65) 5291 * Return: 0 on success or negative value on failure. 5292 */ 5293 int 5294 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5295 u8 *src_sel, u32 *freq, u32 *src_freq) 5296 { 5297 struct ice_aqc_get_cgu_output_config *cmd; 5298 struct ice_aq_desc desc; 5299 int ret; 5300 5301 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5302 cmd = &desc.params.get_cgu_output_config; 5303 cmd->output_idx = output_idx; 5304 5305 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5306 if (!ret) { 5307 if (flags) 5308 *flags = cmd->flags; 5309 if (src_sel) 5310 *src_sel = cmd->src_sel; 5311 if (freq) 5312 *freq = le32_to_cpu(cmd->freq); 5313 if (src_freq) 5314 *src_freq = le32_to_cpu(cmd->src_freq); 5315 } 5316 5317 return ret; 5318 } 5319 5320 /** 5321 * ice_aq_get_cgu_dpll_status - get dpll status 5322 * @hw: pointer to the HW struct 5323 * @dpll_num: DPLL index 5324 * @ref_state: Reference clock state 5325 * @config: current DPLL config 5326 * @dpll_state: current DPLL state 5327 * @phase_offset: Phase offset in ns 5328 * @eec_mode: EEC_mode 5329 * 5330 * Get CGU DPLL status (0x0C66) 5331 * Return: 0 on success or negative value on failure. 5332 */ 5333 int 5334 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5335 u8 *dpll_state, u8 *config, s64 *phase_offset, 5336 u8 *eec_mode) 5337 { 5338 struct ice_aqc_get_cgu_dpll_status *cmd; 5339 struct ice_aq_desc desc; 5340 int status; 5341 5342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5343 cmd = &desc.params.get_cgu_dpll_status; 5344 cmd->dpll_num = dpll_num; 5345 5346 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5347 if (!status) { 5348 *ref_state = cmd->ref_state; 5349 *dpll_state = cmd->dpll_state; 5350 *config = cmd->config; 5351 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5352 *phase_offset <<= 32; 5353 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5354 *phase_offset = sign_extend64(*phase_offset, 47); 5355 *eec_mode = cmd->eec_mode; 5356 } 5357 5358 return status; 5359 } 5360 5361 /** 5362 * ice_aq_set_cgu_dpll_config - set dpll config 5363 * @hw: pointer to the HW struct 5364 * @dpll_num: DPLL index 5365 * @ref_state: Reference clock state 5366 * @config: DPLL config 5367 * @eec_mode: EEC mode 5368 * 5369 * Set CGU DPLL config (0x0C67) 5370 * Return: 0 on success or negative value on failure. 5371 */ 5372 int 5373 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5374 u8 config, u8 eec_mode) 5375 { 5376 struct ice_aqc_set_cgu_dpll_config *cmd; 5377 struct ice_aq_desc desc; 5378 5379 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5380 cmd = &desc.params.set_cgu_dpll_config; 5381 cmd->dpll_num = dpll_num; 5382 cmd->ref_state = ref_state; 5383 cmd->config = config; 5384 cmd->eec_mode = eec_mode; 5385 5386 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5387 } 5388 5389 /** 5390 * ice_aq_set_cgu_ref_prio - set input reference priority 5391 * @hw: pointer to the HW struct 5392 * @dpll_num: DPLL index 5393 * @ref_idx: Reference pin index 5394 * @ref_priority: Reference input priority 5395 * 5396 * Set CGU reference priority (0x0C68) 5397 * Return: 0 on success or negative value on failure. 5398 */ 5399 int 5400 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5401 u8 ref_priority) 5402 { 5403 struct ice_aqc_set_cgu_ref_prio *cmd; 5404 struct ice_aq_desc desc; 5405 5406 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5407 cmd = &desc.params.set_cgu_ref_prio; 5408 cmd->dpll_num = dpll_num; 5409 cmd->ref_idx = ref_idx; 5410 cmd->ref_priority = ref_priority; 5411 5412 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5413 } 5414 5415 /** 5416 * ice_aq_get_cgu_ref_prio - get input reference priority 5417 * @hw: pointer to the HW struct 5418 * @dpll_num: DPLL index 5419 * @ref_idx: Reference pin index 5420 * @ref_prio: Reference input priority 5421 * 5422 * Get CGU reference priority (0x0C69) 5423 * Return: 0 on success or negative value on failure. 5424 */ 5425 int 5426 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5427 u8 *ref_prio) 5428 { 5429 struct ice_aqc_get_cgu_ref_prio *cmd; 5430 struct ice_aq_desc desc; 5431 int status; 5432 5433 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5434 cmd = &desc.params.get_cgu_ref_prio; 5435 cmd->dpll_num = dpll_num; 5436 cmd->ref_idx = ref_idx; 5437 5438 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5439 if (!status) 5440 *ref_prio = cmd->ref_priority; 5441 5442 return status; 5443 } 5444 5445 /** 5446 * ice_aq_get_cgu_info - get cgu info 5447 * @hw: pointer to the HW struct 5448 * @cgu_id: CGU ID 5449 * @cgu_cfg_ver: CGU config version 5450 * @cgu_fw_ver: CGU firmware version 5451 * 5452 * Get CGU info (0x0C6A) 5453 * Return: 0 on success or negative value on failure. 5454 */ 5455 int 5456 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5457 u32 *cgu_fw_ver) 5458 { 5459 struct ice_aqc_get_cgu_info *cmd; 5460 struct ice_aq_desc desc; 5461 int status; 5462 5463 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5464 cmd = &desc.params.get_cgu_info; 5465 5466 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5467 if (!status) { 5468 *cgu_id = le32_to_cpu(cmd->cgu_id); 5469 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5470 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5471 } 5472 5473 return status; 5474 } 5475 5476 /** 5477 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5478 * @hw: pointer to the HW struct 5479 * @phy_output: PHY reference clock output pin 5480 * @enable: GPIO state to be applied 5481 * @freq: PHY output frequency 5482 * 5483 * Set phy recovered clock as reference (0x0630) 5484 * Return: 0 on success or negative value on failure. 5485 */ 5486 int 5487 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5488 u32 *freq) 5489 { 5490 struct ice_aqc_set_phy_rec_clk_out *cmd; 5491 struct ice_aq_desc desc; 5492 int status; 5493 5494 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5495 cmd = &desc.params.set_phy_rec_clk_out; 5496 cmd->phy_output = phy_output; 5497 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5498 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5499 cmd->freq = cpu_to_le32(*freq); 5500 5501 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5502 if (!status) 5503 *freq = le32_to_cpu(cmd->freq); 5504 5505 return status; 5506 } 5507 5508 /** 5509 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5510 * @hw: pointer to the HW struct 5511 * @phy_output: PHY reference clock output pin 5512 * @port_num: Port number 5513 * @flags: PHY flags 5514 * @node_handle: PHY output frequency 5515 * 5516 * Get PHY recovered clock output info (0x0631) 5517 * Return: 0 on success or negative value on failure. 5518 */ 5519 int 5520 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5521 u8 *flags, u16 *node_handle) 5522 { 5523 struct ice_aqc_get_phy_rec_clk_out *cmd; 5524 struct ice_aq_desc desc; 5525 int status; 5526 5527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5528 cmd = &desc.params.get_phy_rec_clk_out; 5529 cmd->phy_output = *phy_output; 5530 5531 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5532 if (!status) { 5533 *phy_output = cmd->phy_output; 5534 if (port_num) 5535 *port_num = cmd->port_num; 5536 if (flags) 5537 *flags = cmd->flags; 5538 if (node_handle) 5539 *node_handle = le16_to_cpu(cmd->node_handle); 5540 } 5541 5542 return status; 5543 } 5544 5545 /** 5546 * ice_aq_get_sensor_reading 5547 * @hw: pointer to the HW struct 5548 * @data: pointer to data to be read from the sensor 5549 * 5550 * Get sensor reading (0x0632) 5551 */ 5552 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5553 struct ice_aqc_get_sensor_reading_resp *data) 5554 { 5555 struct ice_aqc_get_sensor_reading *cmd; 5556 struct ice_aq_desc desc; 5557 int status; 5558 5559 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5560 cmd = &desc.params.get_sensor_reading; 5561 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5562 #define ICE_INTERNAL_TEMP_SENSOR 0 5563 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5564 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5565 5566 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5567 if (!status) 5568 memcpy(data, &desc.params.get_sensor_reading_resp, 5569 sizeof(*data)); 5570 5571 return status; 5572 } 5573 5574 /** 5575 * ice_replay_pre_init - replay pre initialization 5576 * @hw: pointer to the HW struct 5577 * 5578 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5579 */ 5580 static int ice_replay_pre_init(struct ice_hw *hw) 5581 { 5582 struct ice_switch_info *sw = hw->switch_info; 5583 u8 i; 5584 5585 /* Delete old entries from replay filter list head if there is any */ 5586 ice_rm_all_sw_replay_rule_info(hw); 5587 /* In start of replay, move entries into replay_rules list, it 5588 * will allow adding rules entries back to filt_rules list, 5589 * which is operational list. 5590 */ 5591 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5592 list_replace_init(&sw->recp_list[i].filt_rules, 5593 &sw->recp_list[i].filt_replay_rules); 5594 ice_sched_replay_agg_vsi_preinit(hw); 5595 5596 return 0; 5597 } 5598 5599 /** 5600 * ice_replay_vsi - replay VSI configuration 5601 * @hw: pointer to the HW struct 5602 * @vsi_handle: driver VSI handle 5603 * 5604 * Restore all VSI configuration after reset. It is required to call this 5605 * function with main VSI first. 5606 */ 5607 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5608 { 5609 int status; 5610 5611 if (!ice_is_vsi_valid(hw, vsi_handle)) 5612 return -EINVAL; 5613 5614 /* Replay pre-initialization if there is any */ 5615 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5616 status = ice_replay_pre_init(hw); 5617 if (status) 5618 return status; 5619 } 5620 /* Replay per VSI all RSS configurations */ 5621 status = ice_replay_rss_cfg(hw, vsi_handle); 5622 if (status) 5623 return status; 5624 /* Replay per VSI all filters */ 5625 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5626 if (!status) 5627 status = ice_replay_vsi_agg(hw, vsi_handle); 5628 return status; 5629 } 5630 5631 /** 5632 * ice_replay_post - post replay configuration cleanup 5633 * @hw: pointer to the HW struct 5634 * 5635 * Post replay cleanup. 5636 */ 5637 void ice_replay_post(struct ice_hw *hw) 5638 { 5639 /* Delete old entries from replay filter list head */ 5640 ice_rm_all_sw_replay_rule_info(hw); 5641 ice_sched_replay_agg(hw); 5642 } 5643 5644 /** 5645 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5646 * @hw: ptr to the hardware info 5647 * @reg: offset of 64 bit HW register to read from 5648 * @prev_stat_loaded: bool to specify if previous stats are loaded 5649 * @prev_stat: ptr to previous loaded stat value 5650 * @cur_stat: ptr to current stat value 5651 */ 5652 void 5653 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5654 u64 *prev_stat, u64 *cur_stat) 5655 { 5656 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5657 5658 /* device stats are not reset at PFR, they likely will not be zeroed 5659 * when the driver starts. Thus, save the value from the first read 5660 * without adding to the statistic value so that we report stats which 5661 * count up from zero. 5662 */ 5663 if (!prev_stat_loaded) { 5664 *prev_stat = new_data; 5665 return; 5666 } 5667 5668 /* Calculate the difference between the new and old values, and then 5669 * add it to the software stat value. 5670 */ 5671 if (new_data >= *prev_stat) 5672 *cur_stat += new_data - *prev_stat; 5673 else 5674 /* to manage the potential roll-over */ 5675 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5676 5677 /* Update the previously stored value to prepare for next read */ 5678 *prev_stat = new_data; 5679 } 5680 5681 /** 5682 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5683 * @hw: ptr to the hardware info 5684 * @reg: offset of HW register to read from 5685 * @prev_stat_loaded: bool to specify if previous stats are loaded 5686 * @prev_stat: ptr to previous loaded stat value 5687 * @cur_stat: ptr to current stat value 5688 */ 5689 void 5690 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5691 u64 *prev_stat, u64 *cur_stat) 5692 { 5693 u32 new_data; 5694 5695 new_data = rd32(hw, reg); 5696 5697 /* device stats are not reset at PFR, they likely will not be zeroed 5698 * when the driver starts. Thus, save the value from the first read 5699 * without adding to the statistic value so that we report stats which 5700 * count up from zero. 5701 */ 5702 if (!prev_stat_loaded) { 5703 *prev_stat = new_data; 5704 return; 5705 } 5706 5707 /* Calculate the difference between the new and old values, and then 5708 * add it to the software stat value. 5709 */ 5710 if (new_data >= *prev_stat) 5711 *cur_stat += new_data - *prev_stat; 5712 else 5713 /* to manage the potential roll-over */ 5714 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5715 5716 /* Update the previously stored value to prepare for next read */ 5717 *prev_stat = new_data; 5718 } 5719 5720 /** 5721 * ice_sched_query_elem - query element information from HW 5722 * @hw: pointer to the HW struct 5723 * @node_teid: node TEID to be queried 5724 * @buf: buffer to element information 5725 * 5726 * This function queries HW element information 5727 */ 5728 int 5729 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5730 struct ice_aqc_txsched_elem_data *buf) 5731 { 5732 u16 buf_size, num_elem_ret = 0; 5733 int status; 5734 5735 buf_size = sizeof(*buf); 5736 memset(buf, 0, buf_size); 5737 buf->node_teid = cpu_to_le32(node_teid); 5738 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5739 NULL); 5740 if (status || num_elem_ret != 1) 5741 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5742 return status; 5743 } 5744 5745 /** 5746 * ice_aq_read_i2c 5747 * @hw: pointer to the hw struct 5748 * @topo_addr: topology address for a device to communicate with 5749 * @bus_addr: 7-bit I2C bus address 5750 * @addr: I2C memory address (I2C offset) with up to 16 bits 5751 * @params: I2C parameters: bit [7] - Repeated start, 5752 * bits [6:5] data offset size, 5753 * bit [4] - I2C address type, 5754 * bits [3:0] - data size to read (0-16 bytes) 5755 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5756 * @cd: pointer to command details structure or NULL 5757 * 5758 * Read I2C (0x06E2) 5759 */ 5760 int 5761 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5762 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5763 struct ice_sq_cd *cd) 5764 { 5765 struct ice_aq_desc desc = { 0 }; 5766 struct ice_aqc_i2c *cmd; 5767 u8 data_size; 5768 int status; 5769 5770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5771 cmd = &desc.params.read_write_i2c; 5772 5773 if (!data) 5774 return -EINVAL; 5775 5776 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5777 5778 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5779 cmd->topo_addr = topo_addr; 5780 cmd->i2c_params = params; 5781 cmd->i2c_addr = addr; 5782 5783 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5784 if (!status) { 5785 struct ice_aqc_read_i2c_resp *resp; 5786 u8 i; 5787 5788 resp = &desc.params.read_i2c_resp; 5789 for (i = 0; i < data_size; i++) { 5790 *data = resp->i2c_data[i]; 5791 data++; 5792 } 5793 } 5794 5795 return status; 5796 } 5797 5798 /** 5799 * ice_aq_write_i2c 5800 * @hw: pointer to the hw struct 5801 * @topo_addr: topology address for a device to communicate with 5802 * @bus_addr: 7-bit I2C bus address 5803 * @addr: I2C memory address (I2C offset) with up to 16 bits 5804 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5805 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5806 * @cd: pointer to command details structure or NULL 5807 * 5808 * Write I2C (0x06E3) 5809 * 5810 * * Return: 5811 * * 0 - Successful write to the i2c device 5812 * * -EINVAL - Data size greater than 4 bytes 5813 * * -EIO - FW error 5814 */ 5815 int 5816 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5817 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5818 struct ice_sq_cd *cd) 5819 { 5820 struct ice_aq_desc desc = { 0 }; 5821 struct ice_aqc_i2c *cmd; 5822 u8 data_size; 5823 5824 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5825 cmd = &desc.params.read_write_i2c; 5826 5827 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5828 5829 /* data_size limited to 4 */ 5830 if (data_size > 4) 5831 return -EINVAL; 5832 5833 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5834 cmd->topo_addr = topo_addr; 5835 cmd->i2c_params = params; 5836 cmd->i2c_addr = addr; 5837 5838 memcpy(cmd->i2c_data, data, data_size); 5839 5840 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5841 } 5842 5843 /** 5844 * ice_aq_set_gpio 5845 * @hw: pointer to the hw struct 5846 * @gpio_ctrl_handle: GPIO controller node handle 5847 * @pin_idx: IO Number of the GPIO that needs to be set 5848 * @value: SW provide IO value to set in the LSB 5849 * @cd: pointer to command details structure or NULL 5850 * 5851 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5852 */ 5853 int 5854 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5855 struct ice_sq_cd *cd) 5856 { 5857 struct ice_aqc_gpio *cmd; 5858 struct ice_aq_desc desc; 5859 5860 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5861 cmd = &desc.params.read_write_gpio; 5862 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5863 cmd->gpio_num = pin_idx; 5864 cmd->gpio_val = value ? 1 : 0; 5865 5866 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5867 } 5868 5869 /** 5870 * ice_aq_get_gpio 5871 * @hw: pointer to the hw struct 5872 * @gpio_ctrl_handle: GPIO controller node handle 5873 * @pin_idx: IO Number of the GPIO that needs to be set 5874 * @value: IO value read 5875 * @cd: pointer to command details structure or NULL 5876 * 5877 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5878 * the topology 5879 */ 5880 int 5881 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5882 bool *value, struct ice_sq_cd *cd) 5883 { 5884 struct ice_aqc_gpio *cmd; 5885 struct ice_aq_desc desc; 5886 int status; 5887 5888 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5889 cmd = &desc.params.read_write_gpio; 5890 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5891 cmd->gpio_num = pin_idx; 5892 5893 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5894 if (status) 5895 return status; 5896 5897 *value = !!cmd->gpio_val; 5898 return 0; 5899 } 5900 5901 /** 5902 * ice_is_fw_api_min_ver 5903 * @hw: pointer to the hardware structure 5904 * @maj: major version 5905 * @min: minor version 5906 * @patch: patch version 5907 * 5908 * Checks if the firmware API is minimum version 5909 */ 5910 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5911 { 5912 if (hw->api_maj_ver == maj) { 5913 if (hw->api_min_ver > min) 5914 return true; 5915 if (hw->api_min_ver == min && hw->api_patch >= patch) 5916 return true; 5917 } else if (hw->api_maj_ver > maj) { 5918 return true; 5919 } 5920 5921 return false; 5922 } 5923 5924 /** 5925 * ice_fw_supports_link_override 5926 * @hw: pointer to the hardware structure 5927 * 5928 * Checks if the firmware supports link override 5929 */ 5930 bool ice_fw_supports_link_override(struct ice_hw *hw) 5931 { 5932 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5933 ICE_FW_API_LINK_OVERRIDE_MIN, 5934 ICE_FW_API_LINK_OVERRIDE_PATCH); 5935 } 5936 5937 /** 5938 * ice_get_link_default_override 5939 * @ldo: pointer to the link default override struct 5940 * @pi: pointer to the port info struct 5941 * 5942 * Gets the link default override for a port 5943 */ 5944 int 5945 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5946 struct ice_port_info *pi) 5947 { 5948 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5949 struct ice_hw *hw = pi->hw; 5950 int status; 5951 5952 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5953 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5954 if (status) { 5955 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5956 return status; 5957 } 5958 5959 /* Each port has its own config; calculate for our port */ 5960 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5961 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5962 5963 /* link options first */ 5964 status = ice_read_sr_word(hw, tlv_start, &buf); 5965 if (status) { 5966 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5967 return status; 5968 } 5969 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5970 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5971 ICE_LINK_OVERRIDE_PHY_CFG_S; 5972 5973 /* link PHY config */ 5974 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5975 status = ice_read_sr_word(hw, offset, &buf); 5976 if (status) { 5977 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5978 return status; 5979 } 5980 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5981 5982 /* PHY types low */ 5983 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5984 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5985 status = ice_read_sr_word(hw, (offset + i), &buf); 5986 if (status) { 5987 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5988 return status; 5989 } 5990 /* shift 16 bits at a time to fill 64 bits */ 5991 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5992 } 5993 5994 /* PHY types high */ 5995 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5996 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5997 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5998 status = ice_read_sr_word(hw, (offset + i), &buf); 5999 if (status) { 6000 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6001 return status; 6002 } 6003 /* shift 16 bits at a time to fill 64 bits */ 6004 ldo->phy_type_high |= ((u64)buf << (i * 16)); 6005 } 6006 6007 return status; 6008 } 6009 6010 /** 6011 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 6012 * @caps: get PHY capability data 6013 */ 6014 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 6015 { 6016 if (caps->caps & ICE_AQC_PHY_AN_MODE || 6017 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 6018 ICE_AQC_PHY_AN_EN_CLAUSE73 | 6019 ICE_AQC_PHY_AN_EN_CLAUSE37)) 6020 return true; 6021 6022 return false; 6023 } 6024 6025 /** 6026 * ice_aq_set_lldp_mib - Set the LLDP MIB 6027 * @hw: pointer to the HW struct 6028 * @mib_type: Local, Remote or both Local and Remote MIBs 6029 * @buf: pointer to the caller-supplied buffer to store the MIB block 6030 * @buf_size: size of the buffer (in bytes) 6031 * @cd: pointer to command details structure or NULL 6032 * 6033 * Set the LLDP MIB. (0x0A08) 6034 */ 6035 int 6036 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6037 struct ice_sq_cd *cd) 6038 { 6039 struct ice_aqc_lldp_set_local_mib *cmd; 6040 struct ice_aq_desc desc; 6041 6042 cmd = &desc.params.lldp_set_mib; 6043 6044 if (buf_size == 0 || !buf) 6045 return -EINVAL; 6046 6047 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6048 6049 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 6050 desc.datalen = cpu_to_le16(buf_size); 6051 6052 cmd->type = mib_type; 6053 cmd->length = cpu_to_le16(buf_size); 6054 6055 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6056 } 6057 6058 /** 6059 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6060 * @hw: pointer to HW struct 6061 */ 6062 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6063 { 6064 if (hw->mac_type != ICE_MAC_E810) 6065 return false; 6066 6067 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6068 ICE_FW_API_LLDP_FLTR_MIN, 6069 ICE_FW_API_LLDP_FLTR_PATCH); 6070 } 6071 6072 /** 6073 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6074 * @hw: pointer to HW struct 6075 * @vsi_num: absolute HW index for VSI 6076 * @add: boolean for if adding or removing a filter 6077 */ 6078 int 6079 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6080 { 6081 struct ice_aqc_lldp_filter_ctrl *cmd; 6082 struct ice_aq_desc desc; 6083 6084 cmd = &desc.params.lldp_filter_ctrl; 6085 6086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6087 6088 if (add) 6089 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6090 else 6091 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6092 6093 cmd->vsi_num = cpu_to_le16(vsi_num); 6094 6095 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6096 } 6097 6098 /** 6099 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6100 * @hw: pointer to HW struct 6101 */ 6102 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6103 { 6104 struct ice_aq_desc desc; 6105 6106 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6107 6108 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6109 } 6110 6111 /** 6112 * ice_fw_supports_report_dflt_cfg 6113 * @hw: pointer to the hardware structure 6114 * 6115 * Checks if the firmware supports report default configuration 6116 */ 6117 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6118 { 6119 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6120 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6121 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6122 } 6123 6124 /* each of the indexes into the following array match the speed of a return 6125 * value from the list of AQ returned speeds like the range: 6126 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6127 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6128 * array. The array is defined as 15 elements long because the link_speed 6129 * returned by the firmware is a 16 bit * value, but is indexed 6130 * by [fls(speed) - 1] 6131 */ 6132 static const u32 ice_aq_to_link_speed[] = { 6133 SPEED_10, /* BIT(0) */ 6134 SPEED_100, 6135 SPEED_1000, 6136 SPEED_2500, 6137 SPEED_5000, 6138 SPEED_10000, 6139 SPEED_20000, 6140 SPEED_25000, 6141 SPEED_40000, 6142 SPEED_50000, 6143 SPEED_100000, /* BIT(10) */ 6144 SPEED_200000, 6145 }; 6146 6147 /** 6148 * ice_get_link_speed - get integer speed from table 6149 * @index: array index from fls(aq speed) - 1 6150 * 6151 * Returns: u32 value containing integer speed 6152 */ 6153 u32 ice_get_link_speed(u16 index) 6154 { 6155 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6156 return 0; 6157 6158 return ice_aq_to_link_speed[index]; 6159 } 6160