1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830CC_BACKPLANE: 164 case ICE_DEV_ID_E830CC_QSFP56: 165 case ICE_DEV_ID_E830CC_SFP: 166 case ICE_DEV_ID_E830CC_SFP_DD: 167 case ICE_DEV_ID_E830C_BACKPLANE: 168 case ICE_DEV_ID_E830_XXV_BACKPLANE: 169 case ICE_DEV_ID_E830C_QSFP: 170 case ICE_DEV_ID_E830_XXV_QSFP: 171 case ICE_DEV_ID_E830C_SFP: 172 case ICE_DEV_ID_E830_XXV_SFP: 173 hw->mac_type = ICE_MAC_E830; 174 break; 175 default: 176 hw->mac_type = ICE_MAC_UNKNOWN; 177 break; 178 } 179 180 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 181 return 0; 182 } 183 184 /** 185 * ice_is_generic_mac - check if device's mac_type is generic 186 * @hw: pointer to the hardware structure 187 * 188 * Return: true if mac_type is generic (with SBQ support), false if not 189 */ 190 bool ice_is_generic_mac(struct ice_hw *hw) 191 { 192 return (hw->mac_type == ICE_MAC_GENERIC || 193 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 194 } 195 196 /** 197 * ice_is_e810 198 * @hw: pointer to the hardware structure 199 * 200 * returns true if the device is E810 based, false if not. 201 */ 202 bool ice_is_e810(struct ice_hw *hw) 203 { 204 return hw->mac_type == ICE_MAC_E810; 205 } 206 207 /** 208 * ice_is_e810t 209 * @hw: pointer to the hardware structure 210 * 211 * returns true if the device is E810T based, false if not. 212 */ 213 bool ice_is_e810t(struct ice_hw *hw) 214 { 215 switch (hw->device_id) { 216 case ICE_DEV_ID_E810C_SFP: 217 switch (hw->subsystem_device_id) { 218 case ICE_SUBDEV_ID_E810T: 219 case ICE_SUBDEV_ID_E810T2: 220 case ICE_SUBDEV_ID_E810T3: 221 case ICE_SUBDEV_ID_E810T4: 222 case ICE_SUBDEV_ID_E810T6: 223 case ICE_SUBDEV_ID_E810T7: 224 return true; 225 } 226 break; 227 case ICE_DEV_ID_E810C_QSFP: 228 switch (hw->subsystem_device_id) { 229 case ICE_SUBDEV_ID_E810T2: 230 case ICE_SUBDEV_ID_E810T3: 231 case ICE_SUBDEV_ID_E810T5: 232 return true; 233 } 234 break; 235 default: 236 break; 237 } 238 239 return false; 240 } 241 242 /** 243 * ice_is_e822 - Check if a device is E822 family device 244 * @hw: pointer to the hardware structure 245 * 246 * Return: true if the device is E822 based, false if not. 247 */ 248 bool ice_is_e822(struct ice_hw *hw) 249 { 250 switch (hw->device_id) { 251 case ICE_DEV_ID_E822C_BACKPLANE: 252 case ICE_DEV_ID_E822C_QSFP: 253 case ICE_DEV_ID_E822C_SFP: 254 case ICE_DEV_ID_E822C_10G_BASE_T: 255 case ICE_DEV_ID_E822C_SGMII: 256 case ICE_DEV_ID_E822L_BACKPLANE: 257 case ICE_DEV_ID_E822L_SFP: 258 case ICE_DEV_ID_E822L_10G_BASE_T: 259 case ICE_DEV_ID_E822L_SGMII: 260 return true; 261 default: 262 return false; 263 } 264 } 265 266 /** 267 * ice_is_e823 268 * @hw: pointer to the hardware structure 269 * 270 * returns true if the device is E823-L or E823-C based, false if not. 271 */ 272 bool ice_is_e823(struct ice_hw *hw) 273 { 274 switch (hw->device_id) { 275 case ICE_DEV_ID_E823L_BACKPLANE: 276 case ICE_DEV_ID_E823L_SFP: 277 case ICE_DEV_ID_E823L_10G_BASE_T: 278 case ICE_DEV_ID_E823L_1GBE: 279 case ICE_DEV_ID_E823L_QSFP: 280 case ICE_DEV_ID_E823C_BACKPLANE: 281 case ICE_DEV_ID_E823C_QSFP: 282 case ICE_DEV_ID_E823C_SFP: 283 case ICE_DEV_ID_E823C_10G_BASE_T: 284 case ICE_DEV_ID_E823C_SGMII: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 /** 292 * ice_is_e825c - Check if a device is E825C family device 293 * @hw: pointer to the hardware structure 294 * 295 * Return: true if the device is E825-C based, false if not. 296 */ 297 bool ice_is_e825c(struct ice_hw *hw) 298 { 299 switch (hw->device_id) { 300 case ICE_DEV_ID_E825C_BACKPLANE: 301 case ICE_DEV_ID_E825C_QSFP: 302 case ICE_DEV_ID_E825C_SFP: 303 case ICE_DEV_ID_E825C_SGMII: 304 return true; 305 default: 306 return false; 307 } 308 } 309 310 /** 311 * ice_clear_pf_cfg - Clear PF configuration 312 * @hw: pointer to the hardware structure 313 * 314 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 315 * configuration, flow director filters, etc.). 316 */ 317 int ice_clear_pf_cfg(struct ice_hw *hw) 318 { 319 struct ice_aq_desc desc; 320 321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 322 323 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 324 } 325 326 /** 327 * ice_aq_manage_mac_read - manage MAC address read command 328 * @hw: pointer to the HW struct 329 * @buf: a virtual buffer to hold the manage MAC read response 330 * @buf_size: Size of the virtual buffer 331 * @cd: pointer to command details structure or NULL 332 * 333 * This function is used to return per PF station MAC address (0x0107). 334 * NOTE: Upon successful completion of this command, MAC address information 335 * is returned in user specified buffer. Please interpret user specified 336 * buffer as "manage_mac_read" response. 337 * Response such as various MAC addresses are stored in HW struct (port.mac) 338 * ice_discover_dev_caps is expected to be called before this function is 339 * called. 340 */ 341 static int 342 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 343 struct ice_sq_cd *cd) 344 { 345 struct ice_aqc_manage_mac_read_resp *resp; 346 struct ice_aqc_manage_mac_read *cmd; 347 struct ice_aq_desc desc; 348 int status; 349 u16 flags; 350 u8 i; 351 352 cmd = &desc.params.mac_read; 353 354 if (buf_size < sizeof(*resp)) 355 return -EINVAL; 356 357 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 358 359 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 360 if (status) 361 return status; 362 363 resp = buf; 364 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 365 366 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 367 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 368 return -EIO; 369 } 370 371 /* A single port can report up to two (LAN and WoL) addresses */ 372 for (i = 0; i < cmd->num_addr; i++) 373 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 374 ether_addr_copy(hw->port_info->mac.lan_addr, 375 resp[i].mac_addr); 376 ether_addr_copy(hw->port_info->mac.perm_addr, 377 resp[i].mac_addr); 378 break; 379 } 380 381 return 0; 382 } 383 384 /** 385 * ice_aq_get_phy_caps - returns PHY capabilities 386 * @pi: port information structure 387 * @qual_mods: report qualified modules 388 * @report_mode: report mode capabilities 389 * @pcaps: structure for PHY capabilities to be filled 390 * @cd: pointer to command details structure or NULL 391 * 392 * Returns the various PHY capabilities supported on the Port (0x0600) 393 */ 394 int 395 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 396 struct ice_aqc_get_phy_caps_data *pcaps, 397 struct ice_sq_cd *cd) 398 { 399 struct ice_aqc_get_phy_caps *cmd; 400 u16 pcaps_size = sizeof(*pcaps); 401 struct ice_aq_desc desc; 402 const char *prefix; 403 struct ice_hw *hw; 404 int status; 405 406 cmd = &desc.params.get_phy; 407 408 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 409 return -EINVAL; 410 hw = pi->hw; 411 412 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 413 !ice_fw_supports_report_dflt_cfg(hw)) 414 return -EINVAL; 415 416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 417 418 if (qual_mods) 419 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 420 421 cmd->param0 |= cpu_to_le16(report_mode); 422 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 423 424 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 425 426 switch (report_mode) { 427 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 428 prefix = "phy_caps_media"; 429 break; 430 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 431 prefix = "phy_caps_no_media"; 432 break; 433 case ICE_AQC_REPORT_ACTIVE_CFG: 434 prefix = "phy_caps_active"; 435 break; 436 case ICE_AQC_REPORT_DFLT_CFG: 437 prefix = "phy_caps_default"; 438 break; 439 default: 440 prefix = "phy_caps_invalid"; 441 } 442 443 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 444 le64_to_cpu(pcaps->phy_type_high), prefix); 445 446 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 447 prefix, report_mode); 448 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 449 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 450 pcaps->low_power_ctrl_an); 451 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 452 pcaps->eee_cap); 453 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 454 pcaps->eeer_value); 455 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 456 pcaps->link_fec_options); 457 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 458 prefix, pcaps->module_compliance_enforcement); 459 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 460 prefix, pcaps->extended_compliance_code); 461 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 462 pcaps->module_type[0]); 463 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 464 pcaps->module_type[1]); 465 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 466 pcaps->module_type[2]); 467 468 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 469 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 470 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 471 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 472 sizeof(pi->phy.link_info.module_type)); 473 } 474 475 return status; 476 } 477 478 /** 479 * ice_aq_get_link_topo_handle - get link topology node return status 480 * @pi: port information structure 481 * @node_type: requested node type 482 * @cd: pointer to command details structure or NULL 483 * 484 * Get link topology node return status for specified node type (0x06E0) 485 * 486 * Node type cage can be used to determine if cage is present. If AQC 487 * returns error (ENOENT), then no cage present. If no cage present, then 488 * connection type is backplane or BASE-T. 489 */ 490 static int 491 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 492 struct ice_sq_cd *cd) 493 { 494 struct ice_aqc_get_link_topo *cmd; 495 struct ice_aq_desc desc; 496 497 cmd = &desc.params.get_link_topo; 498 499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 500 501 cmd->addr.topo_params.node_type_ctx = 502 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 503 ICE_AQC_LINK_TOPO_NODE_CTX_S); 504 505 /* set node type */ 506 cmd->addr.topo_params.node_type_ctx |= 507 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 508 509 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 510 } 511 512 /** 513 * ice_aq_get_netlist_node 514 * @hw: pointer to the hw struct 515 * @cmd: get_link_topo AQ structure 516 * @node_part_number: output node part number if node found 517 * @node_handle: output node handle parameter if node found 518 * 519 * Get netlist node handle. 520 */ 521 int 522 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 523 u8 *node_part_number, u16 *node_handle) 524 { 525 struct ice_aq_desc desc; 526 527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 528 desc.params.get_link_topo = *cmd; 529 530 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 531 return -EINTR; 532 533 if (node_handle) 534 *node_handle = 535 le16_to_cpu(desc.params.get_link_topo.addr.handle); 536 if (node_part_number) 537 *node_part_number = desc.params.get_link_topo.node_part_num; 538 539 return 0; 540 } 541 542 /** 543 * ice_find_netlist_node 544 * @hw: pointer to the hw struct 545 * @node_type_ctx: type of netlist node to look for 546 * @node_part_number: node part number to look for 547 * @node_handle: output parameter if node found - optional 548 * 549 * Scan the netlist for a node handle of the given node type and part number. 550 * 551 * If node_handle is non-NULL it will be modified on function exit. It is only 552 * valid if the function returns zero, and should be ignored on any non-zero 553 * return value. 554 * 555 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 556 * a negative error code on failure to access the AQ. 557 */ 558 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 559 u8 node_part_number, u16 *node_handle) 560 { 561 u8 idx; 562 563 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 564 struct ice_aqc_get_link_topo cmd = {}; 565 u8 rec_node_part_number; 566 int status; 567 568 cmd.addr.topo_params.node_type_ctx = 569 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 570 node_type_ctx); 571 cmd.addr.topo_params.index = idx; 572 573 status = ice_aq_get_netlist_node(hw, &cmd, 574 &rec_node_part_number, 575 node_handle); 576 if (status) 577 return status; 578 579 if (rec_node_part_number == node_part_number) 580 return 0; 581 } 582 583 return -ENOENT; 584 } 585 586 /** 587 * ice_is_media_cage_present 588 * @pi: port information structure 589 * 590 * Returns true if media cage is present, else false. If no cage, then 591 * media type is backplane or BASE-T. 592 */ 593 static bool ice_is_media_cage_present(struct ice_port_info *pi) 594 { 595 /* Node type cage can be used to determine if cage is present. If AQC 596 * returns error (ENOENT), then no cage present. If no cage present then 597 * connection type is backplane or BASE-T. 598 */ 599 return !ice_aq_get_link_topo_handle(pi, 600 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 601 NULL); 602 } 603 604 /** 605 * ice_get_media_type - Gets media type 606 * @pi: port information structure 607 */ 608 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 609 { 610 struct ice_link_status *hw_link_info; 611 612 if (!pi) 613 return ICE_MEDIA_UNKNOWN; 614 615 hw_link_info = &pi->phy.link_info; 616 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 617 /* If more than one media type is selected, report unknown */ 618 return ICE_MEDIA_UNKNOWN; 619 620 if (hw_link_info->phy_type_low) { 621 /* 1G SGMII is a special case where some DA cable PHYs 622 * may show this as an option when it really shouldn't 623 * be since SGMII is meant to be between a MAC and a PHY 624 * in a backplane. Try to detect this case and handle it 625 */ 626 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 627 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 628 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 629 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 630 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 631 return ICE_MEDIA_DA; 632 633 switch (hw_link_info->phy_type_low) { 634 case ICE_PHY_TYPE_LOW_1000BASE_SX: 635 case ICE_PHY_TYPE_LOW_1000BASE_LX: 636 case ICE_PHY_TYPE_LOW_10GBASE_SR: 637 case ICE_PHY_TYPE_LOW_10GBASE_LR: 638 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 639 case ICE_PHY_TYPE_LOW_25GBASE_SR: 640 case ICE_PHY_TYPE_LOW_25GBASE_LR: 641 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 642 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 644 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 645 case ICE_PHY_TYPE_LOW_50GBASE_SR: 646 case ICE_PHY_TYPE_LOW_50GBASE_FR: 647 case ICE_PHY_TYPE_LOW_50GBASE_LR: 648 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 649 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 650 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 651 case ICE_PHY_TYPE_LOW_100GBASE_DR: 652 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 653 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 654 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 655 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 656 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 657 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 658 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 659 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 660 return ICE_MEDIA_FIBER; 661 case ICE_PHY_TYPE_LOW_100BASE_TX: 662 case ICE_PHY_TYPE_LOW_1000BASE_T: 663 case ICE_PHY_TYPE_LOW_2500BASE_T: 664 case ICE_PHY_TYPE_LOW_5GBASE_T: 665 case ICE_PHY_TYPE_LOW_10GBASE_T: 666 case ICE_PHY_TYPE_LOW_25GBASE_T: 667 return ICE_MEDIA_BASET; 668 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 669 case ICE_PHY_TYPE_LOW_25GBASE_CR: 670 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 671 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 672 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 673 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 674 case ICE_PHY_TYPE_LOW_50GBASE_CP: 675 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 676 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 677 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 678 return ICE_MEDIA_DA; 679 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 680 case ICE_PHY_TYPE_LOW_40G_XLAUI: 681 case ICE_PHY_TYPE_LOW_50G_LAUI2: 682 case ICE_PHY_TYPE_LOW_50G_AUI2: 683 case ICE_PHY_TYPE_LOW_50G_AUI1: 684 case ICE_PHY_TYPE_LOW_100G_AUI4: 685 case ICE_PHY_TYPE_LOW_100G_CAUI4: 686 if (ice_is_media_cage_present(pi)) 687 return ICE_MEDIA_DA; 688 fallthrough; 689 case ICE_PHY_TYPE_LOW_1000BASE_KX: 690 case ICE_PHY_TYPE_LOW_2500BASE_KX: 691 case ICE_PHY_TYPE_LOW_2500BASE_X: 692 case ICE_PHY_TYPE_LOW_5GBASE_KR: 693 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 694 case ICE_PHY_TYPE_LOW_25GBASE_KR: 695 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 696 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 697 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 698 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 699 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 700 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 701 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 702 return ICE_MEDIA_BACKPLANE; 703 } 704 } else { 705 switch (hw_link_info->phy_type_high) { 706 case ICE_PHY_TYPE_HIGH_100G_AUI2: 707 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 708 if (ice_is_media_cage_present(pi)) 709 return ICE_MEDIA_DA; 710 fallthrough; 711 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 712 return ICE_MEDIA_BACKPLANE; 713 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 714 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 715 return ICE_MEDIA_FIBER; 716 } 717 } 718 return ICE_MEDIA_UNKNOWN; 719 } 720 721 /** 722 * ice_get_link_status_datalen 723 * @hw: pointer to the HW struct 724 * 725 * Returns datalength for the Get Link Status AQ command, which is bigger for 726 * newer adapter families handled by ice driver. 727 */ 728 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 729 { 730 switch (hw->mac_type) { 731 case ICE_MAC_E830: 732 return ICE_AQC_LS_DATA_SIZE_V2; 733 case ICE_MAC_E810: 734 default: 735 return ICE_AQC_LS_DATA_SIZE_V1; 736 } 737 } 738 739 /** 740 * ice_aq_get_link_info 741 * @pi: port information structure 742 * @ena_lse: enable/disable LinkStatusEvent reporting 743 * @link: pointer to link status structure - optional 744 * @cd: pointer to command details structure or NULL 745 * 746 * Get Link Status (0x607). Returns the link status of the adapter. 747 */ 748 int 749 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 750 struct ice_link_status *link, struct ice_sq_cd *cd) 751 { 752 struct ice_aqc_get_link_status_data link_data = { 0 }; 753 struct ice_aqc_get_link_status *resp; 754 struct ice_link_status *li_old, *li; 755 enum ice_media_type *hw_media_type; 756 struct ice_fc_info *hw_fc_info; 757 bool tx_pause, rx_pause; 758 struct ice_aq_desc desc; 759 struct ice_hw *hw; 760 u16 cmd_flags; 761 int status; 762 763 if (!pi) 764 return -EINVAL; 765 hw = pi->hw; 766 li_old = &pi->phy.link_info_old; 767 hw_media_type = &pi->phy.media_type; 768 li = &pi->phy.link_info; 769 hw_fc_info = &pi->fc; 770 771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 772 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 773 resp = &desc.params.get_link_status; 774 resp->cmd_flags = cpu_to_le16(cmd_flags); 775 resp->lport_num = pi->lport; 776 777 status = ice_aq_send_cmd(hw, &desc, &link_data, 778 ice_get_link_status_datalen(hw), cd); 779 if (status) 780 return status; 781 782 /* save off old link status information */ 783 *li_old = *li; 784 785 /* update current link status information */ 786 li->link_speed = le16_to_cpu(link_data.link_speed); 787 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 788 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 789 *hw_media_type = ice_get_media_type(pi); 790 li->link_info = link_data.link_info; 791 li->link_cfg_err = link_data.link_cfg_err; 792 li->an_info = link_data.an_info; 793 li->ext_info = link_data.ext_info; 794 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 795 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 796 li->topo_media_conflict = link_data.topo_media_conflict; 797 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 798 ICE_AQ_CFG_PACING_TYPE_M); 799 800 /* update fc info */ 801 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 802 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 803 if (tx_pause && rx_pause) 804 hw_fc_info->current_mode = ICE_FC_FULL; 805 else if (tx_pause) 806 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 807 else if (rx_pause) 808 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 809 else 810 hw_fc_info->current_mode = ICE_FC_NONE; 811 812 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 813 814 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 815 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 816 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 817 (unsigned long long)li->phy_type_low); 818 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 819 (unsigned long long)li->phy_type_high); 820 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 821 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 822 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 823 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 824 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 825 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 826 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 827 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 828 li->max_frame_size); 829 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 830 831 /* save link status information */ 832 if (link) 833 *link = *li; 834 835 /* flag cleared so calling functions don't call AQ again */ 836 pi->phy.get_link_info = false; 837 838 return 0; 839 } 840 841 /** 842 * ice_fill_tx_timer_and_fc_thresh 843 * @hw: pointer to the HW struct 844 * @cmd: pointer to MAC cfg structure 845 * 846 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 847 * descriptor 848 */ 849 static void 850 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 851 struct ice_aqc_set_mac_cfg *cmd) 852 { 853 u32 val, fc_thres_m; 854 855 /* We read back the transmit timer and FC threshold value of 856 * LFC. Thus, we will use index = 857 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 858 * 859 * Also, because we are operating on transmit timer and FC 860 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 861 */ 862 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 863 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 864 865 if (hw->mac_type == ICE_MAC_E830) { 866 /* Retrieve the transmit timer */ 867 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 868 cmd->tx_tmr_value = 869 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 870 871 /* Retrieve the fc threshold */ 872 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 873 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 874 } else { 875 /* Retrieve the transmit timer */ 876 val = rd32(hw, 877 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 878 cmd->tx_tmr_value = 879 le16_encode_bits(val, 880 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 881 882 /* Retrieve the fc threshold */ 883 val = rd32(hw, 884 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 885 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 886 } 887 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 888 } 889 890 /** 891 * ice_aq_set_mac_cfg 892 * @hw: pointer to the HW struct 893 * @max_frame_size: Maximum Frame Size to be supported 894 * @cd: pointer to command details structure or NULL 895 * 896 * Set MAC configuration (0x0603) 897 */ 898 int 899 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 900 { 901 struct ice_aqc_set_mac_cfg *cmd; 902 struct ice_aq_desc desc; 903 904 cmd = &desc.params.set_mac_cfg; 905 906 if (max_frame_size == 0) 907 return -EINVAL; 908 909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 910 911 cmd->max_frame_size = cpu_to_le16(max_frame_size); 912 913 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 914 915 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 916 } 917 918 /** 919 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 920 * @hw: pointer to the HW struct 921 */ 922 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 923 { 924 struct ice_switch_info *sw; 925 int status; 926 927 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 928 sizeof(*hw->switch_info), GFP_KERNEL); 929 sw = hw->switch_info; 930 931 if (!sw) 932 return -ENOMEM; 933 934 INIT_LIST_HEAD(&sw->vsi_list_map_head); 935 sw->prof_res_bm_init = 0; 936 937 status = ice_init_def_sw_recp(hw); 938 if (status) { 939 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 940 return status; 941 } 942 return 0; 943 } 944 945 /** 946 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 947 * @hw: pointer to the HW struct 948 */ 949 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 950 { 951 struct ice_switch_info *sw = hw->switch_info; 952 struct ice_vsi_list_map_info *v_pos_map; 953 struct ice_vsi_list_map_info *v_tmp_map; 954 struct ice_sw_recipe *recps; 955 u8 i; 956 957 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 958 list_entry) { 959 list_del(&v_pos_map->list_entry); 960 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 961 } 962 recps = sw->recp_list; 963 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 964 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 965 966 recps[i].root_rid = i; 967 list_for_each_entry_safe(rg_entry, tmprg_entry, 968 &recps[i].rg_list, l_entry) { 969 list_del(&rg_entry->l_entry); 970 devm_kfree(ice_hw_to_dev(hw), rg_entry); 971 } 972 973 if (recps[i].adv_rule) { 974 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 975 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 976 977 mutex_destroy(&recps[i].filt_rule_lock); 978 list_for_each_entry_safe(lst_itr, tmp_entry, 979 &recps[i].filt_rules, 980 list_entry) { 981 list_del(&lst_itr->list_entry); 982 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 983 devm_kfree(ice_hw_to_dev(hw), lst_itr); 984 } 985 } else { 986 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 987 988 mutex_destroy(&recps[i].filt_rule_lock); 989 list_for_each_entry_safe(lst_itr, tmp_entry, 990 &recps[i].filt_rules, 991 list_entry) { 992 list_del(&lst_itr->list_entry); 993 devm_kfree(ice_hw_to_dev(hw), lst_itr); 994 } 995 } 996 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 997 } 998 ice_rm_all_sw_replay_rule_info(hw); 999 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 1000 devm_kfree(ice_hw_to_dev(hw), sw); 1001 } 1002 1003 /** 1004 * ice_get_itr_intrl_gran 1005 * @hw: pointer to the HW struct 1006 * 1007 * Determines the ITR/INTRL granularities based on the maximum aggregate 1008 * bandwidth according to the device's configuration during power-on. 1009 */ 1010 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1011 { 1012 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 1013 rd32(hw, GL_PWR_MODE_CTL)); 1014 1015 switch (max_agg_bw) { 1016 case ICE_MAX_AGG_BW_200G: 1017 case ICE_MAX_AGG_BW_100G: 1018 case ICE_MAX_AGG_BW_50G: 1019 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1020 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1021 break; 1022 case ICE_MAX_AGG_BW_25G: 1023 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1024 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1025 break; 1026 } 1027 } 1028 1029 /** 1030 * ice_init_hw - main hardware initialization routine 1031 * @hw: pointer to the hardware structure 1032 */ 1033 int ice_init_hw(struct ice_hw *hw) 1034 { 1035 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 1036 void *mac_buf __free(kfree) = NULL; 1037 u16 mac_buf_len; 1038 int status; 1039 1040 /* Set MAC type based on DeviceID */ 1041 status = ice_set_mac_type(hw); 1042 if (status) 1043 return status; 1044 1045 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1046 1047 status = ice_reset(hw, ICE_RESET_PFR); 1048 if (status) 1049 return status; 1050 1051 ice_get_itr_intrl_gran(hw); 1052 1053 status = ice_create_all_ctrlq(hw); 1054 if (status) 1055 goto err_unroll_cqinit; 1056 1057 status = ice_fwlog_init(hw); 1058 if (status) 1059 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1060 status); 1061 1062 status = ice_clear_pf_cfg(hw); 1063 if (status) 1064 goto err_unroll_cqinit; 1065 1066 /* Set bit to enable Flow Director filters */ 1067 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1068 INIT_LIST_HEAD(&hw->fdir_list_head); 1069 1070 ice_clear_pxe_mode(hw); 1071 1072 status = ice_init_nvm(hw); 1073 if (status) 1074 goto err_unroll_cqinit; 1075 1076 status = ice_get_caps(hw); 1077 if (status) 1078 goto err_unroll_cqinit; 1079 1080 if (!hw->port_info) 1081 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1082 sizeof(*hw->port_info), 1083 GFP_KERNEL); 1084 if (!hw->port_info) { 1085 status = -ENOMEM; 1086 goto err_unroll_cqinit; 1087 } 1088 1089 /* set the back pointer to HW */ 1090 hw->port_info->hw = hw; 1091 1092 /* Initialize port_info struct with switch configuration data */ 1093 status = ice_get_initial_sw_cfg(hw); 1094 if (status) 1095 goto err_unroll_alloc; 1096 1097 hw->evb_veb = true; 1098 1099 /* init xarray for identifying scheduling nodes uniquely */ 1100 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1101 1102 /* Query the allocated resources for Tx scheduler */ 1103 status = ice_sched_query_res_alloc(hw); 1104 if (status) { 1105 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1106 goto err_unroll_alloc; 1107 } 1108 ice_sched_get_psm_clk_freq(hw); 1109 1110 /* Initialize port_info struct with scheduler data */ 1111 status = ice_sched_init_port(hw->port_info); 1112 if (status) 1113 goto err_unroll_sched; 1114 1115 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1116 if (!pcaps) { 1117 status = -ENOMEM; 1118 goto err_unroll_sched; 1119 } 1120 1121 /* Initialize port_info struct with PHY capabilities */ 1122 status = ice_aq_get_phy_caps(hw->port_info, false, 1123 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1124 NULL); 1125 if (status) 1126 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1127 status); 1128 1129 /* Initialize port_info struct with link information */ 1130 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1131 if (status) 1132 goto err_unroll_sched; 1133 1134 /* need a valid SW entry point to build a Tx tree */ 1135 if (!hw->sw_entry_point_layer) { 1136 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1137 status = -EIO; 1138 goto err_unroll_sched; 1139 } 1140 INIT_LIST_HEAD(&hw->agg_list); 1141 /* Initialize max burst size */ 1142 if (!hw->max_burst_size) 1143 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1144 1145 status = ice_init_fltr_mgmt_struct(hw); 1146 if (status) 1147 goto err_unroll_sched; 1148 1149 /* Get MAC information */ 1150 /* A single port can report up to two (LAN and WoL) addresses */ 1151 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1152 GFP_KERNEL); 1153 if (!mac_buf) { 1154 status = -ENOMEM; 1155 goto err_unroll_fltr_mgmt_struct; 1156 } 1157 1158 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1159 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1160 1161 if (status) 1162 goto err_unroll_fltr_mgmt_struct; 1163 /* enable jumbo frame support at MAC level */ 1164 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1165 if (status) 1166 goto err_unroll_fltr_mgmt_struct; 1167 /* Obtain counter base index which would be used by flow director */ 1168 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1169 if (status) 1170 goto err_unroll_fltr_mgmt_struct; 1171 status = ice_init_hw_tbls(hw); 1172 if (status) 1173 goto err_unroll_fltr_mgmt_struct; 1174 mutex_init(&hw->tnl_lock); 1175 ice_init_chk_recipe_reuse_support(hw); 1176 1177 return 0; 1178 1179 err_unroll_fltr_mgmt_struct: 1180 ice_cleanup_fltr_mgmt_struct(hw); 1181 err_unroll_sched: 1182 ice_sched_cleanup_all(hw); 1183 err_unroll_alloc: 1184 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1185 err_unroll_cqinit: 1186 ice_destroy_all_ctrlq(hw); 1187 return status; 1188 } 1189 1190 /** 1191 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1192 * @hw: pointer to the hardware structure 1193 * 1194 * This should be called only during nominal operation, not as a result of 1195 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1196 * applicable initializations if it fails for any reason. 1197 */ 1198 void ice_deinit_hw(struct ice_hw *hw) 1199 { 1200 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1201 ice_cleanup_fltr_mgmt_struct(hw); 1202 1203 ice_sched_cleanup_all(hw); 1204 ice_sched_clear_agg(hw); 1205 ice_free_seg(hw); 1206 ice_free_hw_tbls(hw); 1207 mutex_destroy(&hw->tnl_lock); 1208 1209 ice_fwlog_deinit(hw); 1210 ice_destroy_all_ctrlq(hw); 1211 1212 /* Clear VSI contexts if not already cleared */ 1213 ice_clear_all_vsi_ctx(hw); 1214 } 1215 1216 /** 1217 * ice_check_reset - Check to see if a global reset is complete 1218 * @hw: pointer to the hardware structure 1219 */ 1220 int ice_check_reset(struct ice_hw *hw) 1221 { 1222 u32 cnt, reg = 0, grst_timeout, uld_mask; 1223 1224 /* Poll for Device Active state in case a recent CORER, GLOBR, 1225 * or EMPR has occurred. The grst delay value is in 100ms units. 1226 * Add 1sec for outstanding AQ commands that can take a long time. 1227 */ 1228 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1229 rd32(hw, GLGEN_RSTCTL)) + 10; 1230 1231 for (cnt = 0; cnt < grst_timeout; cnt++) { 1232 mdelay(100); 1233 reg = rd32(hw, GLGEN_RSTAT); 1234 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1235 break; 1236 } 1237 1238 if (cnt == grst_timeout) { 1239 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1240 return -EIO; 1241 } 1242 1243 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1244 GLNVM_ULD_PCIER_DONE_1_M |\ 1245 GLNVM_ULD_CORER_DONE_M |\ 1246 GLNVM_ULD_GLOBR_DONE_M |\ 1247 GLNVM_ULD_POR_DONE_M |\ 1248 GLNVM_ULD_POR_DONE_1_M |\ 1249 GLNVM_ULD_PCIER_DONE_2_M) 1250 1251 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1252 GLNVM_ULD_PE_DONE_M : 0); 1253 1254 /* Device is Active; check Global Reset processes are done */ 1255 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1256 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1257 if (reg == uld_mask) { 1258 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1259 break; 1260 } 1261 mdelay(10); 1262 } 1263 1264 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1265 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1266 reg); 1267 return -EIO; 1268 } 1269 1270 return 0; 1271 } 1272 1273 /** 1274 * ice_pf_reset - Reset the PF 1275 * @hw: pointer to the hardware structure 1276 * 1277 * If a global reset has been triggered, this function checks 1278 * for its completion and then issues the PF reset 1279 */ 1280 static int ice_pf_reset(struct ice_hw *hw) 1281 { 1282 u32 cnt, reg; 1283 1284 /* If at function entry a global reset was already in progress, i.e. 1285 * state is not 'device active' or any of the reset done bits are not 1286 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1287 * global reset is done. 1288 */ 1289 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1290 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1291 /* poll on global reset currently in progress until done */ 1292 if (ice_check_reset(hw)) 1293 return -EIO; 1294 1295 return 0; 1296 } 1297 1298 /* Reset the PF */ 1299 reg = rd32(hw, PFGEN_CTRL); 1300 1301 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1302 1303 /* Wait for the PFR to complete. The wait time is the global config lock 1304 * timeout plus the PFR timeout which will account for a possible reset 1305 * that is occurring during a download package operation. 1306 */ 1307 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1308 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1309 reg = rd32(hw, PFGEN_CTRL); 1310 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1311 break; 1312 1313 mdelay(1); 1314 } 1315 1316 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1317 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1318 return -EIO; 1319 } 1320 1321 return 0; 1322 } 1323 1324 /** 1325 * ice_reset - Perform different types of reset 1326 * @hw: pointer to the hardware structure 1327 * @req: reset request 1328 * 1329 * This function triggers a reset as specified by the req parameter. 1330 * 1331 * Note: 1332 * If anything other than a PF reset is triggered, PXE mode is restored. 1333 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1334 * interface has been restored in the rebuild flow. 1335 */ 1336 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1337 { 1338 u32 val = 0; 1339 1340 switch (req) { 1341 case ICE_RESET_PFR: 1342 return ice_pf_reset(hw); 1343 case ICE_RESET_CORER: 1344 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1345 val = GLGEN_RTRIG_CORER_M; 1346 break; 1347 case ICE_RESET_GLOBR: 1348 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1349 val = GLGEN_RTRIG_GLOBR_M; 1350 break; 1351 default: 1352 return -EINVAL; 1353 } 1354 1355 val |= rd32(hw, GLGEN_RTRIG); 1356 wr32(hw, GLGEN_RTRIG, val); 1357 ice_flush(hw); 1358 1359 /* wait for the FW to be ready */ 1360 return ice_check_reset(hw); 1361 } 1362 1363 /** 1364 * ice_copy_rxq_ctx_to_hw 1365 * @hw: pointer to the hardware structure 1366 * @ice_rxq_ctx: pointer to the rxq context 1367 * @rxq_index: the index of the Rx queue 1368 * 1369 * Copies rxq context from dense structure to HW register space 1370 */ 1371 static int 1372 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1373 { 1374 u8 i; 1375 1376 if (!ice_rxq_ctx) 1377 return -EINVAL; 1378 1379 if (rxq_index > QRX_CTRL_MAX_INDEX) 1380 return -EINVAL; 1381 1382 /* Copy each dword separately to HW */ 1383 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1384 wr32(hw, QRX_CONTEXT(i, rxq_index), 1385 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1386 1387 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1388 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1389 } 1390 1391 return 0; 1392 } 1393 1394 /* LAN Rx Queue Context */ 1395 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1396 /* Field Width LSB */ 1397 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1398 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1399 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1400 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1401 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1402 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1403 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1404 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1405 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1406 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1407 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1408 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1409 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1410 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1411 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1412 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1413 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1414 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1415 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1416 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1417 { 0 } 1418 }; 1419 1420 /** 1421 * ice_write_rxq_ctx 1422 * @hw: pointer to the hardware structure 1423 * @rlan_ctx: pointer to the rxq context 1424 * @rxq_index: the index of the Rx queue 1425 * 1426 * Converts rxq context from sparse to dense structure and then writes 1427 * it to HW register space and enables the hardware to prefetch descriptors 1428 * instead of only fetching them on demand 1429 */ 1430 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1431 u32 rxq_index) 1432 { 1433 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1434 1435 if (!rlan_ctx) 1436 return -EINVAL; 1437 1438 rlan_ctx->prefena = 1; 1439 1440 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1441 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1442 } 1443 1444 /* LAN Tx Queue Context */ 1445 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1446 /* Field Width LSB */ 1447 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1448 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1449 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1450 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1451 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1452 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1453 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1454 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1455 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1456 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1457 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1458 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1459 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1460 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1461 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1462 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1463 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1464 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1465 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1466 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1467 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1468 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1469 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1470 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1471 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1472 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1473 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1474 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1475 { 0 } 1476 }; 1477 1478 /* Sideband Queue command wrappers */ 1479 1480 /** 1481 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1482 * @hw: pointer to the HW struct 1483 * @desc: descriptor describing the command 1484 * @buf: buffer to use for indirect commands (NULL for direct commands) 1485 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1486 * @cd: pointer to command details structure 1487 */ 1488 static int 1489 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1490 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1491 { 1492 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1493 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1494 } 1495 1496 /** 1497 * ice_sbq_rw_reg - Fill Sideband Queue command 1498 * @hw: pointer to the HW struct 1499 * @in: message info to be filled in descriptor 1500 * @flags: control queue descriptor flags 1501 */ 1502 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) 1503 { 1504 struct ice_sbq_cmd_desc desc = {0}; 1505 struct ice_sbq_msg_req msg = {0}; 1506 u16 msg_len; 1507 int status; 1508 1509 msg_len = sizeof(msg); 1510 1511 msg.dest_dev = in->dest_dev; 1512 msg.opcode = in->opcode; 1513 msg.flags = ICE_SBQ_MSG_FLAGS; 1514 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1515 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1516 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1517 1518 if (in->opcode) 1519 msg.data = cpu_to_le32(in->data); 1520 else 1521 /* data read comes back in completion, so shorten the struct by 1522 * sizeof(msg.data) 1523 */ 1524 msg_len -= sizeof(msg.data); 1525 1526 desc.flags = cpu_to_le16(flags); 1527 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1528 desc.param0.cmd_len = cpu_to_le16(msg_len); 1529 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1530 if (!status && !in->opcode) 1531 in->data = le32_to_cpu 1532 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1533 return status; 1534 } 1535 1536 /* FW Admin Queue command wrappers */ 1537 1538 /* Software lock/mutex that is meant to be held while the Global Config Lock 1539 * in firmware is acquired by the software to prevent most (but not all) types 1540 * of AQ commands from being sent to FW 1541 */ 1542 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1543 1544 /** 1545 * ice_should_retry_sq_send_cmd 1546 * @opcode: AQ opcode 1547 * 1548 * Decide if we should retry the send command routine for the ATQ, depending 1549 * on the opcode. 1550 */ 1551 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1552 { 1553 switch (opcode) { 1554 case ice_aqc_opc_get_link_topo: 1555 case ice_aqc_opc_lldp_stop: 1556 case ice_aqc_opc_lldp_start: 1557 case ice_aqc_opc_lldp_filter_ctrl: 1558 return true; 1559 } 1560 1561 return false; 1562 } 1563 1564 /** 1565 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1566 * @hw: pointer to the HW struct 1567 * @cq: pointer to the specific Control queue 1568 * @desc: prefilled descriptor describing the command 1569 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1570 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1571 * @cd: pointer to command details structure 1572 * 1573 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1574 * Queue if the EBUSY AQ error is returned. 1575 */ 1576 static int 1577 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1578 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1579 struct ice_sq_cd *cd) 1580 { 1581 struct ice_aq_desc desc_cpy; 1582 bool is_cmd_for_retry; 1583 u8 idx = 0; 1584 u16 opcode; 1585 int status; 1586 1587 opcode = le16_to_cpu(desc->opcode); 1588 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1589 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1590 1591 if (is_cmd_for_retry) { 1592 /* All retryable cmds are direct, without buf. */ 1593 WARN_ON(buf); 1594 1595 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1596 } 1597 1598 do { 1599 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1600 1601 if (!is_cmd_for_retry || !status || 1602 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1603 break; 1604 1605 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1606 1607 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1608 1609 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1610 1611 return status; 1612 } 1613 1614 /** 1615 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1616 * @hw: pointer to the HW struct 1617 * @desc: descriptor describing the command 1618 * @buf: buffer to use for indirect commands (NULL for direct commands) 1619 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1620 * @cd: pointer to command details structure 1621 * 1622 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1623 */ 1624 int 1625 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1626 u16 buf_size, struct ice_sq_cd *cd) 1627 { 1628 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1629 bool lock_acquired = false; 1630 int status; 1631 1632 /* When a package download is in process (i.e. when the firmware's 1633 * Global Configuration Lock resource is held), only the Download 1634 * Package, Get Version, Get Package Info List, Upload Section, 1635 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1636 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1637 * Recipes to Profile Association, and Release Resource (with resource 1638 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1639 * must block until the package download completes and the Global Config 1640 * Lock is released. See also ice_acquire_global_cfg_lock(). 1641 */ 1642 switch (le16_to_cpu(desc->opcode)) { 1643 case ice_aqc_opc_download_pkg: 1644 case ice_aqc_opc_get_pkg_info_list: 1645 case ice_aqc_opc_get_ver: 1646 case ice_aqc_opc_upload_section: 1647 case ice_aqc_opc_update_pkg: 1648 case ice_aqc_opc_set_port_params: 1649 case ice_aqc_opc_get_vlan_mode_parameters: 1650 case ice_aqc_opc_set_vlan_mode_parameters: 1651 case ice_aqc_opc_set_tx_topo: 1652 case ice_aqc_opc_get_tx_topo: 1653 case ice_aqc_opc_add_recipe: 1654 case ice_aqc_opc_recipe_to_profile: 1655 case ice_aqc_opc_get_recipe: 1656 case ice_aqc_opc_get_recipe_to_profile: 1657 break; 1658 case ice_aqc_opc_release_res: 1659 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1660 break; 1661 fallthrough; 1662 default: 1663 mutex_lock(&ice_global_cfg_lock_sw); 1664 lock_acquired = true; 1665 break; 1666 } 1667 1668 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1669 if (lock_acquired) 1670 mutex_unlock(&ice_global_cfg_lock_sw); 1671 1672 return status; 1673 } 1674 1675 /** 1676 * ice_aq_get_fw_ver 1677 * @hw: pointer to the HW struct 1678 * @cd: pointer to command details structure or NULL 1679 * 1680 * Get the firmware version (0x0001) from the admin queue commands 1681 */ 1682 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1683 { 1684 struct ice_aqc_get_ver *resp; 1685 struct ice_aq_desc desc; 1686 int status; 1687 1688 resp = &desc.params.get_ver; 1689 1690 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1691 1692 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1693 1694 if (!status) { 1695 hw->fw_branch = resp->fw_branch; 1696 hw->fw_maj_ver = resp->fw_major; 1697 hw->fw_min_ver = resp->fw_minor; 1698 hw->fw_patch = resp->fw_patch; 1699 hw->fw_build = le32_to_cpu(resp->fw_build); 1700 hw->api_branch = resp->api_branch; 1701 hw->api_maj_ver = resp->api_major; 1702 hw->api_min_ver = resp->api_minor; 1703 hw->api_patch = resp->api_patch; 1704 } 1705 1706 return status; 1707 } 1708 1709 /** 1710 * ice_aq_send_driver_ver 1711 * @hw: pointer to the HW struct 1712 * @dv: driver's major, minor version 1713 * @cd: pointer to command details structure or NULL 1714 * 1715 * Send the driver version (0x0002) to the firmware 1716 */ 1717 int 1718 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1719 struct ice_sq_cd *cd) 1720 { 1721 struct ice_aqc_driver_ver *cmd; 1722 struct ice_aq_desc desc; 1723 u16 len; 1724 1725 cmd = &desc.params.driver_ver; 1726 1727 if (!dv) 1728 return -EINVAL; 1729 1730 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1731 1732 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1733 cmd->major_ver = dv->major_ver; 1734 cmd->minor_ver = dv->minor_ver; 1735 cmd->build_ver = dv->build_ver; 1736 cmd->subbuild_ver = dv->subbuild_ver; 1737 1738 len = 0; 1739 while (len < sizeof(dv->driver_string) && 1740 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1741 len++; 1742 1743 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1744 } 1745 1746 /** 1747 * ice_aq_q_shutdown 1748 * @hw: pointer to the HW struct 1749 * @unloading: is the driver unloading itself 1750 * 1751 * Tell the Firmware that we're shutting down the AdminQ and whether 1752 * or not the driver is unloading as well (0x0003). 1753 */ 1754 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1755 { 1756 struct ice_aqc_q_shutdown *cmd; 1757 struct ice_aq_desc desc; 1758 1759 cmd = &desc.params.q_shutdown; 1760 1761 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1762 1763 if (unloading) 1764 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1765 1766 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1767 } 1768 1769 /** 1770 * ice_aq_req_res 1771 * @hw: pointer to the HW struct 1772 * @res: resource ID 1773 * @access: access type 1774 * @sdp_number: resource number 1775 * @timeout: the maximum time in ms that the driver may hold the resource 1776 * @cd: pointer to command details structure or NULL 1777 * 1778 * Requests common resource using the admin queue commands (0x0008). 1779 * When attempting to acquire the Global Config Lock, the driver can 1780 * learn of three states: 1781 * 1) 0 - acquired lock, and can perform download package 1782 * 2) -EIO - did not get lock, driver should fail to load 1783 * 3) -EALREADY - did not get lock, but another driver has 1784 * successfully downloaded the package; the driver does 1785 * not have to download the package and can continue 1786 * loading 1787 * 1788 * Note that if the caller is in an acquire lock, perform action, release lock 1789 * phase of operation, it is possible that the FW may detect a timeout and issue 1790 * a CORER. In this case, the driver will receive a CORER interrupt and will 1791 * have to determine its cause. The calling thread that is handling this flow 1792 * will likely get an error propagated back to it indicating the Download 1793 * Package, Update Package or the Release Resource AQ commands timed out. 1794 */ 1795 static int 1796 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1797 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1798 struct ice_sq_cd *cd) 1799 { 1800 struct ice_aqc_req_res *cmd_resp; 1801 struct ice_aq_desc desc; 1802 int status; 1803 1804 cmd_resp = &desc.params.res_owner; 1805 1806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1807 1808 cmd_resp->res_id = cpu_to_le16(res); 1809 cmd_resp->access_type = cpu_to_le16(access); 1810 cmd_resp->res_number = cpu_to_le32(sdp_number); 1811 cmd_resp->timeout = cpu_to_le32(*timeout); 1812 *timeout = 0; 1813 1814 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1815 1816 /* The completion specifies the maximum time in ms that the driver 1817 * may hold the resource in the Timeout field. 1818 */ 1819 1820 /* Global config lock response utilizes an additional status field. 1821 * 1822 * If the Global config lock resource is held by some other driver, the 1823 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1824 * and the timeout field indicates the maximum time the current owner 1825 * of the resource has to free it. 1826 */ 1827 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1828 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1829 *timeout = le32_to_cpu(cmd_resp->timeout); 1830 return 0; 1831 } else if (le16_to_cpu(cmd_resp->status) == 1832 ICE_AQ_RES_GLBL_IN_PROG) { 1833 *timeout = le32_to_cpu(cmd_resp->timeout); 1834 return -EIO; 1835 } else if (le16_to_cpu(cmd_resp->status) == 1836 ICE_AQ_RES_GLBL_DONE) { 1837 return -EALREADY; 1838 } 1839 1840 /* invalid FW response, force a timeout immediately */ 1841 *timeout = 0; 1842 return -EIO; 1843 } 1844 1845 /* If the resource is held by some other driver, the command completes 1846 * with a busy return value and the timeout field indicates the maximum 1847 * time the current owner of the resource has to free it. 1848 */ 1849 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1850 *timeout = le32_to_cpu(cmd_resp->timeout); 1851 1852 return status; 1853 } 1854 1855 /** 1856 * ice_aq_release_res 1857 * @hw: pointer to the HW struct 1858 * @res: resource ID 1859 * @sdp_number: resource number 1860 * @cd: pointer to command details structure or NULL 1861 * 1862 * release common resource using the admin queue commands (0x0009) 1863 */ 1864 static int 1865 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1866 struct ice_sq_cd *cd) 1867 { 1868 struct ice_aqc_req_res *cmd; 1869 struct ice_aq_desc desc; 1870 1871 cmd = &desc.params.res_owner; 1872 1873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1874 1875 cmd->res_id = cpu_to_le16(res); 1876 cmd->res_number = cpu_to_le32(sdp_number); 1877 1878 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1879 } 1880 1881 /** 1882 * ice_acquire_res 1883 * @hw: pointer to the HW structure 1884 * @res: resource ID 1885 * @access: access type (read or write) 1886 * @timeout: timeout in milliseconds 1887 * 1888 * This function will attempt to acquire the ownership of a resource. 1889 */ 1890 int 1891 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1892 enum ice_aq_res_access_type access, u32 timeout) 1893 { 1894 #define ICE_RES_POLLING_DELAY_MS 10 1895 u32 delay = ICE_RES_POLLING_DELAY_MS; 1896 u32 time_left = timeout; 1897 int status; 1898 1899 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1900 1901 /* A return code of -EALREADY means that another driver has 1902 * previously acquired the resource and performed any necessary updates; 1903 * in this case the caller does not obtain the resource and has no 1904 * further work to do. 1905 */ 1906 if (status == -EALREADY) 1907 goto ice_acquire_res_exit; 1908 1909 if (status) 1910 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1911 1912 /* If necessary, poll until the current lock owner timeouts */ 1913 timeout = time_left; 1914 while (status && timeout && time_left) { 1915 mdelay(delay); 1916 timeout = (timeout > delay) ? timeout - delay : 0; 1917 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1918 1919 if (status == -EALREADY) 1920 /* lock free, but no work to do */ 1921 break; 1922 1923 if (!status) 1924 /* lock acquired */ 1925 break; 1926 } 1927 if (status && status != -EALREADY) 1928 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1929 1930 ice_acquire_res_exit: 1931 if (status == -EALREADY) { 1932 if (access == ICE_RES_WRITE) 1933 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1934 else 1935 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1936 } 1937 return status; 1938 } 1939 1940 /** 1941 * ice_release_res 1942 * @hw: pointer to the HW structure 1943 * @res: resource ID 1944 * 1945 * This function will release a resource using the proper Admin Command. 1946 */ 1947 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1948 { 1949 unsigned long timeout; 1950 int status; 1951 1952 /* there are some rare cases when trying to release the resource 1953 * results in an admin queue timeout, so handle them correctly 1954 */ 1955 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1956 do { 1957 status = ice_aq_release_res(hw, res, 0, NULL); 1958 if (status != -EIO) 1959 break; 1960 usleep_range(1000, 2000); 1961 } while (time_before(jiffies, timeout)); 1962 } 1963 1964 /** 1965 * ice_aq_alloc_free_res - command to allocate/free resources 1966 * @hw: pointer to the HW struct 1967 * @buf: Indirect buffer to hold data parameters and response 1968 * @buf_size: size of buffer for indirect commands 1969 * @opc: pass in the command opcode 1970 * 1971 * Helper function to allocate/free resources using the admin queue commands 1972 */ 1973 int ice_aq_alloc_free_res(struct ice_hw *hw, 1974 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1975 enum ice_adminq_opc opc) 1976 { 1977 struct ice_aqc_alloc_free_res_cmd *cmd; 1978 struct ice_aq_desc desc; 1979 1980 cmd = &desc.params.sw_res_ctrl; 1981 1982 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1983 return -EINVAL; 1984 1985 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1986 1987 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1988 1989 cmd->num_entries = cpu_to_le16(1); 1990 1991 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1992 } 1993 1994 /** 1995 * ice_alloc_hw_res - allocate resource 1996 * @hw: pointer to the HW struct 1997 * @type: type of resource 1998 * @num: number of resources to allocate 1999 * @btm: allocate from bottom 2000 * @res: pointer to array that will receive the resources 2001 */ 2002 int 2003 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2004 { 2005 struct ice_aqc_alloc_free_res_elem *buf; 2006 u16 buf_len; 2007 int status; 2008 2009 buf_len = struct_size(buf, elem, num); 2010 buf = kzalloc(buf_len, GFP_KERNEL); 2011 if (!buf) 2012 return -ENOMEM; 2013 2014 /* Prepare buffer to allocate resource. */ 2015 buf->num_elems = cpu_to_le16(num); 2016 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2017 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2018 if (btm) 2019 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2020 2021 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2022 if (status) 2023 goto ice_alloc_res_exit; 2024 2025 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2026 2027 ice_alloc_res_exit: 2028 kfree(buf); 2029 return status; 2030 } 2031 2032 /** 2033 * ice_free_hw_res - free allocated HW resource 2034 * @hw: pointer to the HW struct 2035 * @type: type of resource to free 2036 * @num: number of resources 2037 * @res: pointer to array that contains the resources to free 2038 */ 2039 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2040 { 2041 struct ice_aqc_alloc_free_res_elem *buf; 2042 u16 buf_len; 2043 int status; 2044 2045 buf_len = struct_size(buf, elem, num); 2046 buf = kzalloc(buf_len, GFP_KERNEL); 2047 if (!buf) 2048 return -ENOMEM; 2049 2050 /* Prepare buffer to free resource. */ 2051 buf->num_elems = cpu_to_le16(num); 2052 buf->res_type = cpu_to_le16(type); 2053 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2054 2055 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2056 if (status) 2057 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2058 2059 kfree(buf); 2060 return status; 2061 } 2062 2063 /** 2064 * ice_get_num_per_func - determine number of resources per PF 2065 * @hw: pointer to the HW structure 2066 * @max: value to be evenly split between each PF 2067 * 2068 * Determine the number of valid functions by going through the bitmap returned 2069 * from parsing capabilities and use this to calculate the number of resources 2070 * per PF based on the max value passed in. 2071 */ 2072 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2073 { 2074 u8 funcs; 2075 2076 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2077 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2078 ICE_CAPS_VALID_FUNCS_M); 2079 2080 if (!funcs) 2081 return 0; 2082 2083 return max / funcs; 2084 } 2085 2086 /** 2087 * ice_parse_common_caps - parse common device/function capabilities 2088 * @hw: pointer to the HW struct 2089 * @caps: pointer to common capabilities structure 2090 * @elem: the capability element to parse 2091 * @prefix: message prefix for tracing capabilities 2092 * 2093 * Given a capability element, extract relevant details into the common 2094 * capability structure. 2095 * 2096 * Returns: true if the capability matches one of the common capability ids, 2097 * false otherwise. 2098 */ 2099 static bool 2100 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2101 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2102 { 2103 u32 logical_id = le32_to_cpu(elem->logical_id); 2104 u32 phys_id = le32_to_cpu(elem->phys_id); 2105 u32 number = le32_to_cpu(elem->number); 2106 u16 cap = le16_to_cpu(elem->cap); 2107 bool found = true; 2108 2109 switch (cap) { 2110 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2111 caps->valid_functions = number; 2112 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2113 caps->valid_functions); 2114 break; 2115 case ICE_AQC_CAPS_SRIOV: 2116 caps->sr_iov_1_1 = (number == 1); 2117 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2118 caps->sr_iov_1_1); 2119 break; 2120 case ICE_AQC_CAPS_DCB: 2121 caps->dcb = (number == 1); 2122 caps->active_tc_bitmap = logical_id; 2123 caps->maxtc = phys_id; 2124 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2125 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2126 caps->active_tc_bitmap); 2127 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2128 break; 2129 case ICE_AQC_CAPS_RSS: 2130 caps->rss_table_size = number; 2131 caps->rss_table_entry_width = logical_id; 2132 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2133 caps->rss_table_size); 2134 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2135 caps->rss_table_entry_width); 2136 break; 2137 case ICE_AQC_CAPS_RXQS: 2138 caps->num_rxq = number; 2139 caps->rxq_first_id = phys_id; 2140 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2141 caps->num_rxq); 2142 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2143 caps->rxq_first_id); 2144 break; 2145 case ICE_AQC_CAPS_TXQS: 2146 caps->num_txq = number; 2147 caps->txq_first_id = phys_id; 2148 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2149 caps->num_txq); 2150 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2151 caps->txq_first_id); 2152 break; 2153 case ICE_AQC_CAPS_MSIX: 2154 caps->num_msix_vectors = number; 2155 caps->msix_vector_first_id = phys_id; 2156 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2157 caps->num_msix_vectors); 2158 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2159 caps->msix_vector_first_id); 2160 break; 2161 case ICE_AQC_CAPS_PENDING_NVM_VER: 2162 caps->nvm_update_pending_nvm = true; 2163 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2164 break; 2165 case ICE_AQC_CAPS_PENDING_OROM_VER: 2166 caps->nvm_update_pending_orom = true; 2167 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2168 break; 2169 case ICE_AQC_CAPS_PENDING_NET_VER: 2170 caps->nvm_update_pending_netlist = true; 2171 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2172 break; 2173 case ICE_AQC_CAPS_NVM_MGMT: 2174 caps->nvm_unified_update = 2175 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2176 true : false; 2177 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2178 caps->nvm_unified_update); 2179 break; 2180 case ICE_AQC_CAPS_RDMA: 2181 caps->rdma = (number == 1); 2182 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2183 break; 2184 case ICE_AQC_CAPS_MAX_MTU: 2185 caps->max_mtu = number; 2186 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2187 prefix, caps->max_mtu); 2188 break; 2189 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2190 caps->pcie_reset_avoidance = (number > 0); 2191 ice_debug(hw, ICE_DBG_INIT, 2192 "%s: pcie_reset_avoidance = %d\n", prefix, 2193 caps->pcie_reset_avoidance); 2194 break; 2195 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2196 caps->reset_restrict_support = (number == 1); 2197 ice_debug(hw, ICE_DBG_INIT, 2198 "%s: reset_restrict_support = %d\n", prefix, 2199 caps->reset_restrict_support); 2200 break; 2201 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2202 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2203 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2204 prefix, caps->roce_lag); 2205 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2206 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2207 prefix, caps->sriov_lag); 2208 break; 2209 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2210 caps->tx_sched_topo_comp_mode_en = (number == 1); 2211 break; 2212 default: 2213 /* Not one of the recognized common capabilities */ 2214 found = false; 2215 } 2216 2217 return found; 2218 } 2219 2220 /** 2221 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2222 * @hw: pointer to the HW structure 2223 * @caps: pointer to capabilities structure to fix 2224 * 2225 * Re-calculate the capabilities that are dependent on the number of physical 2226 * ports; i.e. some features are not supported or function differently on 2227 * devices with more than 4 ports. 2228 */ 2229 static void 2230 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2231 { 2232 /* This assumes device capabilities are always scanned before function 2233 * capabilities during the initialization flow. 2234 */ 2235 if (hw->dev_caps.num_funcs > 4) { 2236 /* Max 4 TCs per port */ 2237 caps->maxtc = 4; 2238 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2239 caps->maxtc); 2240 if (caps->rdma) { 2241 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2242 caps->rdma = 0; 2243 } 2244 2245 /* print message only when processing device capabilities 2246 * during initialization. 2247 */ 2248 if (caps == &hw->dev_caps.common_cap) 2249 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2250 } 2251 } 2252 2253 /** 2254 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2255 * @hw: pointer to the HW struct 2256 * @func_p: pointer to function capabilities structure 2257 * @cap: pointer to the capability element to parse 2258 * 2259 * Extract function capabilities for ICE_AQC_CAPS_VF. 2260 */ 2261 static void 2262 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2263 struct ice_aqc_list_caps_elem *cap) 2264 { 2265 u32 logical_id = le32_to_cpu(cap->logical_id); 2266 u32 number = le32_to_cpu(cap->number); 2267 2268 func_p->num_allocd_vfs = number; 2269 func_p->vf_base_id = logical_id; 2270 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2271 func_p->num_allocd_vfs); 2272 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2273 func_p->vf_base_id); 2274 } 2275 2276 /** 2277 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2278 * @hw: pointer to the HW struct 2279 * @func_p: pointer to function capabilities structure 2280 * @cap: pointer to the capability element to parse 2281 * 2282 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2283 */ 2284 static void 2285 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2286 struct ice_aqc_list_caps_elem *cap) 2287 { 2288 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2289 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2290 le32_to_cpu(cap->number)); 2291 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2292 func_p->guar_num_vsi); 2293 } 2294 2295 /** 2296 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2297 * @hw: pointer to the HW struct 2298 * @func_p: pointer to function capabilities structure 2299 * @cap: pointer to the capability element to parse 2300 * 2301 * Extract function capabilities for ICE_AQC_CAPS_1588. 2302 */ 2303 static void 2304 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2305 struct ice_aqc_list_caps_elem *cap) 2306 { 2307 struct ice_ts_func_info *info = &func_p->ts_func_info; 2308 u32 number = le32_to_cpu(cap->number); 2309 2310 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2311 func_p->common_cap.ieee_1588 = info->ena; 2312 2313 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2314 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2315 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2316 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2317 2318 if (!ice_is_e825c(hw)) { 2319 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2320 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2321 } else { 2322 info->clk_freq = ICE_TIME_REF_FREQ_156_250; 2323 info->clk_src = ICE_CLK_SRC_TCXO; 2324 } 2325 2326 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2327 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2328 } else { 2329 /* Unknown clock frequency, so assume a (probably incorrect) 2330 * default to avoid out-of-bounds look ups of frequency 2331 * related information. 2332 */ 2333 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2334 info->clk_freq); 2335 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2336 } 2337 2338 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2339 func_p->common_cap.ieee_1588); 2340 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2341 info->src_tmr_owned); 2342 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2343 info->tmr_ena); 2344 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2345 info->tmr_index_owned); 2346 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2347 info->tmr_index_assoc); 2348 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2349 info->clk_freq); 2350 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2351 info->clk_src); 2352 } 2353 2354 /** 2355 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2356 * @hw: pointer to the HW struct 2357 * @func_p: pointer to function capabilities structure 2358 * 2359 * Extract function capabilities for ICE_AQC_CAPS_FD. 2360 */ 2361 static void 2362 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2363 { 2364 u32 reg_val, gsize, bsize; 2365 2366 reg_val = rd32(hw, GLQF_FD_SIZE); 2367 switch (hw->mac_type) { 2368 case ICE_MAC_E830: 2369 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2370 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2371 break; 2372 case ICE_MAC_E810: 2373 default: 2374 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2375 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2376 } 2377 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2378 func_p->fd_fltr_best_effort = bsize; 2379 2380 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2381 func_p->fd_fltr_guar); 2382 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2383 func_p->fd_fltr_best_effort); 2384 } 2385 2386 /** 2387 * ice_parse_func_caps - Parse function capabilities 2388 * @hw: pointer to the HW struct 2389 * @func_p: pointer to function capabilities structure 2390 * @buf: buffer containing the function capability records 2391 * @cap_count: the number of capabilities 2392 * 2393 * Helper function to parse function (0x000A) capabilities list. For 2394 * capabilities shared between device and function, this relies on 2395 * ice_parse_common_caps. 2396 * 2397 * Loop through the list of provided capabilities and extract the relevant 2398 * data into the function capabilities structured. 2399 */ 2400 static void 2401 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2402 void *buf, u32 cap_count) 2403 { 2404 struct ice_aqc_list_caps_elem *cap_resp; 2405 u32 i; 2406 2407 cap_resp = buf; 2408 2409 memset(func_p, 0, sizeof(*func_p)); 2410 2411 for (i = 0; i < cap_count; i++) { 2412 u16 cap = le16_to_cpu(cap_resp[i].cap); 2413 bool found; 2414 2415 found = ice_parse_common_caps(hw, &func_p->common_cap, 2416 &cap_resp[i], "func caps"); 2417 2418 switch (cap) { 2419 case ICE_AQC_CAPS_VF: 2420 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2421 break; 2422 case ICE_AQC_CAPS_VSI: 2423 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2424 break; 2425 case ICE_AQC_CAPS_1588: 2426 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2427 break; 2428 case ICE_AQC_CAPS_FD: 2429 ice_parse_fdir_func_caps(hw, func_p); 2430 break; 2431 default: 2432 /* Don't list common capabilities as unknown */ 2433 if (!found) 2434 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2435 i, cap); 2436 break; 2437 } 2438 } 2439 2440 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2441 } 2442 2443 /** 2444 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2445 * @hw: pointer to the HW struct 2446 * @dev_p: pointer to device capabilities structure 2447 * @cap: capability element to parse 2448 * 2449 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2450 */ 2451 static void 2452 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2453 struct ice_aqc_list_caps_elem *cap) 2454 { 2455 u32 number = le32_to_cpu(cap->number); 2456 2457 dev_p->num_funcs = hweight32(number); 2458 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2459 dev_p->num_funcs); 2460 } 2461 2462 /** 2463 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2464 * @hw: pointer to the HW struct 2465 * @dev_p: pointer to device capabilities structure 2466 * @cap: capability element to parse 2467 * 2468 * Parse ICE_AQC_CAPS_VF for device capabilities. 2469 */ 2470 static void 2471 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2472 struct ice_aqc_list_caps_elem *cap) 2473 { 2474 u32 number = le32_to_cpu(cap->number); 2475 2476 dev_p->num_vfs_exposed = number; 2477 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2478 dev_p->num_vfs_exposed); 2479 } 2480 2481 /** 2482 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2483 * @hw: pointer to the HW struct 2484 * @dev_p: pointer to device capabilities structure 2485 * @cap: capability element to parse 2486 * 2487 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2488 */ 2489 static void 2490 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2491 struct ice_aqc_list_caps_elem *cap) 2492 { 2493 u32 number = le32_to_cpu(cap->number); 2494 2495 dev_p->num_vsi_allocd_to_host = number; 2496 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2497 dev_p->num_vsi_allocd_to_host); 2498 } 2499 2500 /** 2501 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2502 * @hw: pointer to the HW struct 2503 * @dev_p: pointer to device capabilities structure 2504 * @cap: capability element to parse 2505 * 2506 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2507 */ 2508 static void 2509 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2510 struct ice_aqc_list_caps_elem *cap) 2511 { 2512 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2513 u32 logical_id = le32_to_cpu(cap->logical_id); 2514 u32 phys_id = le32_to_cpu(cap->phys_id); 2515 u32 number = le32_to_cpu(cap->number); 2516 2517 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2518 dev_p->common_cap.ieee_1588 = info->ena; 2519 2520 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2521 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2522 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2523 2524 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2525 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2526 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2527 2528 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2529 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2530 2531 info->ena_ports = logical_id; 2532 info->tmr_own_map = phys_id; 2533 2534 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2535 dev_p->common_cap.ieee_1588); 2536 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2537 info->tmr0_owner); 2538 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2539 info->tmr0_owned); 2540 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2541 info->tmr0_ena); 2542 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2543 info->tmr1_owner); 2544 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2545 info->tmr1_owned); 2546 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2547 info->tmr1_ena); 2548 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2549 info->ts_ll_read); 2550 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2551 info->ts_ll_int_read); 2552 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2553 info->ena_ports); 2554 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2555 info->tmr_own_map); 2556 } 2557 2558 /** 2559 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2560 * @hw: pointer to the HW struct 2561 * @dev_p: pointer to device capabilities structure 2562 * @cap: capability element to parse 2563 * 2564 * Parse ICE_AQC_CAPS_FD for device capabilities. 2565 */ 2566 static void 2567 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2568 struct ice_aqc_list_caps_elem *cap) 2569 { 2570 u32 number = le32_to_cpu(cap->number); 2571 2572 dev_p->num_flow_director_fltr = number; 2573 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2574 dev_p->num_flow_director_fltr); 2575 } 2576 2577 /** 2578 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2579 * @hw: pointer to the HW struct 2580 * @dev_p: pointer to device capabilities structure 2581 * @cap: capability element to parse 2582 * 2583 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2584 * enabled sensors. 2585 */ 2586 static void 2587 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2588 struct ice_aqc_list_caps_elem *cap) 2589 { 2590 dev_p->supported_sensors = le32_to_cpu(cap->number); 2591 2592 ice_debug(hw, ICE_DBG_INIT, 2593 "dev caps: supported sensors (bitmap) = 0x%x\n", 2594 dev_p->supported_sensors); 2595 } 2596 2597 /** 2598 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2599 * @hw: pointer to the HW struct 2600 * @dev_p: pointer to device capabilities structure 2601 * @cap: capability element to parse 2602 * 2603 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2604 */ 2605 static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, 2606 struct ice_hw_dev_caps *dev_p, 2607 struct ice_aqc_list_caps_elem *cap) 2608 { 2609 dev_p->nac_topo.mode = le32_to_cpu(cap->number); 2610 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2611 2612 dev_info(ice_hw_to_dev(hw), 2613 "PF is configured in %s mode with IP instance ID %d\n", 2614 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? 2615 "primary" : "secondary", dev_p->nac_topo.id); 2616 2617 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2618 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2619 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2620 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2621 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2622 dev_p->nac_topo.id); 2623 } 2624 2625 /** 2626 * ice_parse_dev_caps - Parse device capabilities 2627 * @hw: pointer to the HW struct 2628 * @dev_p: pointer to device capabilities structure 2629 * @buf: buffer containing the device capability records 2630 * @cap_count: the number of capabilities 2631 * 2632 * Helper device to parse device (0x000B) capabilities list. For 2633 * capabilities shared between device and function, this relies on 2634 * ice_parse_common_caps. 2635 * 2636 * Loop through the list of provided capabilities and extract the relevant 2637 * data into the device capabilities structured. 2638 */ 2639 static void 2640 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2641 void *buf, u32 cap_count) 2642 { 2643 struct ice_aqc_list_caps_elem *cap_resp; 2644 u32 i; 2645 2646 cap_resp = buf; 2647 2648 memset(dev_p, 0, sizeof(*dev_p)); 2649 2650 for (i = 0; i < cap_count; i++) { 2651 u16 cap = le16_to_cpu(cap_resp[i].cap); 2652 bool found; 2653 2654 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2655 &cap_resp[i], "dev caps"); 2656 2657 switch (cap) { 2658 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2659 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2660 break; 2661 case ICE_AQC_CAPS_VF: 2662 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2663 break; 2664 case ICE_AQC_CAPS_VSI: 2665 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2666 break; 2667 case ICE_AQC_CAPS_1588: 2668 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2669 break; 2670 case ICE_AQC_CAPS_FD: 2671 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2672 break; 2673 case ICE_AQC_CAPS_SENSOR_READING: 2674 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2675 break; 2676 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2677 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2678 break; 2679 default: 2680 /* Don't list common capabilities as unknown */ 2681 if (!found) 2682 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2683 i, cap); 2684 break; 2685 } 2686 } 2687 2688 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2689 } 2690 2691 /** 2692 * ice_is_pf_c827 - check if pf contains c827 phy 2693 * @hw: pointer to the hw struct 2694 */ 2695 bool ice_is_pf_c827(struct ice_hw *hw) 2696 { 2697 struct ice_aqc_get_link_topo cmd = {}; 2698 u8 node_part_number; 2699 u16 node_handle; 2700 int status; 2701 2702 if (hw->mac_type != ICE_MAC_E810) 2703 return false; 2704 2705 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2706 return true; 2707 2708 cmd.addr.topo_params.node_type_ctx = 2709 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2710 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2711 cmd.addr.topo_params.index = 0; 2712 2713 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2714 &node_handle); 2715 2716 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2717 return false; 2718 2719 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2720 return true; 2721 2722 return false; 2723 } 2724 2725 /** 2726 * ice_is_phy_rclk_in_netlist 2727 * @hw: pointer to the hw struct 2728 * 2729 * Check if the PHY Recovered Clock device is present in the netlist 2730 */ 2731 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2732 { 2733 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2734 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2735 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2736 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2737 return false; 2738 2739 return true; 2740 } 2741 2742 /** 2743 * ice_is_clock_mux_in_netlist 2744 * @hw: pointer to the hw struct 2745 * 2746 * Check if the Clock Multiplexer device is present in the netlist 2747 */ 2748 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2749 { 2750 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2751 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2752 NULL)) 2753 return false; 2754 2755 return true; 2756 } 2757 2758 /** 2759 * ice_is_cgu_in_netlist - check for CGU presence 2760 * @hw: pointer to the hw struct 2761 * 2762 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2763 * Save the CGU part number in the hw structure for later use. 2764 * Return: 2765 * * true - cgu is present 2766 * * false - cgu is not present 2767 */ 2768 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2769 { 2770 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2771 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2772 NULL)) { 2773 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2774 return true; 2775 } else if (!ice_find_netlist_node(hw, 2776 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2777 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2778 NULL)) { 2779 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2780 return true; 2781 } 2782 2783 return false; 2784 } 2785 2786 /** 2787 * ice_is_gps_in_netlist 2788 * @hw: pointer to the hw struct 2789 * 2790 * Check if the GPS generic device is present in the netlist 2791 */ 2792 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2793 { 2794 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2795 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2796 return false; 2797 2798 return true; 2799 } 2800 2801 /** 2802 * ice_aq_list_caps - query function/device capabilities 2803 * @hw: pointer to the HW struct 2804 * @buf: a buffer to hold the capabilities 2805 * @buf_size: size of the buffer 2806 * @cap_count: if not NULL, set to the number of capabilities reported 2807 * @opc: capabilities type to discover, device or function 2808 * @cd: pointer to command details structure or NULL 2809 * 2810 * Get the function (0x000A) or device (0x000B) capabilities description from 2811 * firmware and store it in the buffer. 2812 * 2813 * If the cap_count pointer is not NULL, then it is set to the number of 2814 * capabilities firmware will report. Note that if the buffer size is too 2815 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2816 * cap_count will still be updated in this case. It is recommended that the 2817 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2818 * firmware could return) to avoid this. 2819 */ 2820 int 2821 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2822 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2823 { 2824 struct ice_aqc_list_caps *cmd; 2825 struct ice_aq_desc desc; 2826 int status; 2827 2828 cmd = &desc.params.get_cap; 2829 2830 if (opc != ice_aqc_opc_list_func_caps && 2831 opc != ice_aqc_opc_list_dev_caps) 2832 return -EINVAL; 2833 2834 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2835 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2836 2837 if (cap_count) 2838 *cap_count = le32_to_cpu(cmd->count); 2839 2840 return status; 2841 } 2842 2843 /** 2844 * ice_discover_dev_caps - Read and extract device capabilities 2845 * @hw: pointer to the hardware structure 2846 * @dev_caps: pointer to device capabilities structure 2847 * 2848 * Read the device capabilities and extract them into the dev_caps structure 2849 * for later use. 2850 */ 2851 int 2852 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2853 { 2854 u32 cap_count = 0; 2855 void *cbuf; 2856 int status; 2857 2858 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2859 if (!cbuf) 2860 return -ENOMEM; 2861 2862 /* Although the driver doesn't know the number of capabilities the 2863 * device will return, we can simply send a 4KB buffer, the maximum 2864 * possible size that firmware can return. 2865 */ 2866 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2867 2868 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2869 ice_aqc_opc_list_dev_caps, NULL); 2870 if (!status) 2871 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2872 kfree(cbuf); 2873 2874 return status; 2875 } 2876 2877 /** 2878 * ice_discover_func_caps - Read and extract function capabilities 2879 * @hw: pointer to the hardware structure 2880 * @func_caps: pointer to function capabilities structure 2881 * 2882 * Read the function capabilities and extract them into the func_caps structure 2883 * for later use. 2884 */ 2885 static int 2886 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2887 { 2888 u32 cap_count = 0; 2889 void *cbuf; 2890 int status; 2891 2892 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2893 if (!cbuf) 2894 return -ENOMEM; 2895 2896 /* Although the driver doesn't know the number of capabilities the 2897 * device will return, we can simply send a 4KB buffer, the maximum 2898 * possible size that firmware can return. 2899 */ 2900 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2901 2902 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2903 ice_aqc_opc_list_func_caps, NULL); 2904 if (!status) 2905 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2906 kfree(cbuf); 2907 2908 return status; 2909 } 2910 2911 /** 2912 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2913 * @hw: pointer to the hardware structure 2914 */ 2915 void ice_set_safe_mode_caps(struct ice_hw *hw) 2916 { 2917 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2918 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2919 struct ice_hw_common_caps cached_caps; 2920 u32 num_funcs; 2921 2922 /* cache some func_caps values that should be restored after memset */ 2923 cached_caps = func_caps->common_cap; 2924 2925 /* unset func capabilities */ 2926 memset(func_caps, 0, sizeof(*func_caps)); 2927 2928 #define ICE_RESTORE_FUNC_CAP(name) \ 2929 func_caps->common_cap.name = cached_caps.name 2930 2931 /* restore cached values */ 2932 ICE_RESTORE_FUNC_CAP(valid_functions); 2933 ICE_RESTORE_FUNC_CAP(txq_first_id); 2934 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2935 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2936 ICE_RESTORE_FUNC_CAP(max_mtu); 2937 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2938 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2939 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2940 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2941 2942 /* one Tx and one Rx queue in safe mode */ 2943 func_caps->common_cap.num_rxq = 1; 2944 func_caps->common_cap.num_txq = 1; 2945 2946 /* two MSIX vectors, one for traffic and one for misc causes */ 2947 func_caps->common_cap.num_msix_vectors = 2; 2948 func_caps->guar_num_vsi = 1; 2949 2950 /* cache some dev_caps values that should be restored after memset */ 2951 cached_caps = dev_caps->common_cap; 2952 num_funcs = dev_caps->num_funcs; 2953 2954 /* unset dev capabilities */ 2955 memset(dev_caps, 0, sizeof(*dev_caps)); 2956 2957 #define ICE_RESTORE_DEV_CAP(name) \ 2958 dev_caps->common_cap.name = cached_caps.name 2959 2960 /* restore cached values */ 2961 ICE_RESTORE_DEV_CAP(valid_functions); 2962 ICE_RESTORE_DEV_CAP(txq_first_id); 2963 ICE_RESTORE_DEV_CAP(rxq_first_id); 2964 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2965 ICE_RESTORE_DEV_CAP(max_mtu); 2966 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2967 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2968 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2969 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2970 dev_caps->num_funcs = num_funcs; 2971 2972 /* one Tx and one Rx queue per function in safe mode */ 2973 dev_caps->common_cap.num_rxq = num_funcs; 2974 dev_caps->common_cap.num_txq = num_funcs; 2975 2976 /* two MSIX vectors per function */ 2977 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2978 } 2979 2980 /** 2981 * ice_get_caps - get info about the HW 2982 * @hw: pointer to the hardware structure 2983 */ 2984 int ice_get_caps(struct ice_hw *hw) 2985 { 2986 int status; 2987 2988 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2989 if (status) 2990 return status; 2991 2992 return ice_discover_func_caps(hw, &hw->func_caps); 2993 } 2994 2995 /** 2996 * ice_aq_manage_mac_write - manage MAC address write command 2997 * @hw: pointer to the HW struct 2998 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2999 * @flags: flags to control write behavior 3000 * @cd: pointer to command details structure or NULL 3001 * 3002 * This function is used to write MAC address to the NVM (0x0108). 3003 */ 3004 int 3005 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3006 struct ice_sq_cd *cd) 3007 { 3008 struct ice_aqc_manage_mac_write *cmd; 3009 struct ice_aq_desc desc; 3010 3011 cmd = &desc.params.mac_write; 3012 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3013 3014 cmd->flags = flags; 3015 ether_addr_copy(cmd->mac_addr, mac_addr); 3016 3017 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3018 } 3019 3020 /** 3021 * ice_aq_clear_pxe_mode 3022 * @hw: pointer to the HW struct 3023 * 3024 * Tell the firmware that the driver is taking over from PXE (0x0110). 3025 */ 3026 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3027 { 3028 struct ice_aq_desc desc; 3029 3030 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3031 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3032 3033 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3034 } 3035 3036 /** 3037 * ice_clear_pxe_mode - clear pxe operations mode 3038 * @hw: pointer to the HW struct 3039 * 3040 * Make sure all PXE mode settings are cleared, including things 3041 * like descriptor fetch/write-back mode. 3042 */ 3043 void ice_clear_pxe_mode(struct ice_hw *hw) 3044 { 3045 if (ice_check_sq_alive(hw, &hw->adminq)) 3046 ice_aq_clear_pxe_mode(hw); 3047 } 3048 3049 /** 3050 * ice_aq_set_port_params - set physical port parameters. 3051 * @pi: pointer to the port info struct 3052 * @double_vlan: if set double VLAN is enabled 3053 * @cd: pointer to command details structure or NULL 3054 * 3055 * Set Physical port parameters (0x0203) 3056 */ 3057 int 3058 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3059 struct ice_sq_cd *cd) 3060 3061 { 3062 struct ice_aqc_set_port_params *cmd; 3063 struct ice_hw *hw = pi->hw; 3064 struct ice_aq_desc desc; 3065 u16 cmd_flags = 0; 3066 3067 cmd = &desc.params.set_port_params; 3068 3069 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3070 if (double_vlan) 3071 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3072 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3073 3074 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3075 } 3076 3077 /** 3078 * ice_is_100m_speed_supported 3079 * @hw: pointer to the HW struct 3080 * 3081 * returns true if 100M speeds are supported by the device, 3082 * false otherwise. 3083 */ 3084 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3085 { 3086 switch (hw->device_id) { 3087 case ICE_DEV_ID_E822C_SGMII: 3088 case ICE_DEV_ID_E822L_SGMII: 3089 case ICE_DEV_ID_E823L_1GBE: 3090 case ICE_DEV_ID_E823C_SGMII: 3091 return true; 3092 default: 3093 return false; 3094 } 3095 } 3096 3097 /** 3098 * ice_get_link_speed_based_on_phy_type - returns link speed 3099 * @phy_type_low: lower part of phy_type 3100 * @phy_type_high: higher part of phy_type 3101 * 3102 * This helper function will convert an entry in PHY type structure 3103 * [phy_type_low, phy_type_high] to its corresponding link speed. 3104 * Note: In the structure of [phy_type_low, phy_type_high], there should 3105 * be one bit set, as this function will convert one PHY type to its 3106 * speed. 3107 * 3108 * Return: 3109 * * PHY speed for recognized PHY type 3110 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3111 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3112 */ 3113 u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3114 { 3115 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3116 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3117 3118 switch (phy_type_low) { 3119 case ICE_PHY_TYPE_LOW_100BASE_TX: 3120 case ICE_PHY_TYPE_LOW_100M_SGMII: 3121 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3122 break; 3123 case ICE_PHY_TYPE_LOW_1000BASE_T: 3124 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3125 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3126 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3127 case ICE_PHY_TYPE_LOW_1G_SGMII: 3128 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3129 break; 3130 case ICE_PHY_TYPE_LOW_2500BASE_T: 3131 case ICE_PHY_TYPE_LOW_2500BASE_X: 3132 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3133 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3134 break; 3135 case ICE_PHY_TYPE_LOW_5GBASE_T: 3136 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3137 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3138 break; 3139 case ICE_PHY_TYPE_LOW_10GBASE_T: 3140 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3141 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3142 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3143 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3144 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3145 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3146 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3147 break; 3148 case ICE_PHY_TYPE_LOW_25GBASE_T: 3149 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3150 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3151 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3152 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3153 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3154 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3155 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3156 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3157 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3158 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3159 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3160 break; 3161 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3162 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3163 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3164 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3165 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3166 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3167 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3168 break; 3169 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3170 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3171 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3172 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3173 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3174 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3175 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3176 case ICE_PHY_TYPE_LOW_50G_AUI2: 3177 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3178 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3179 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3180 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3181 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3182 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3183 case ICE_PHY_TYPE_LOW_50G_AUI1: 3184 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3185 break; 3186 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3187 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3188 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3189 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3190 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3191 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3192 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3193 case ICE_PHY_TYPE_LOW_100G_AUI4: 3194 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3195 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3196 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3197 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3198 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3199 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3200 break; 3201 default: 3202 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3203 break; 3204 } 3205 3206 switch (phy_type_high) { 3207 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3208 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3209 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3210 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3211 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3212 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3213 break; 3214 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3215 case ICE_PHY_TYPE_HIGH_200G_SR4: 3216 case ICE_PHY_TYPE_HIGH_200G_FR4: 3217 case ICE_PHY_TYPE_HIGH_200G_LR4: 3218 case ICE_PHY_TYPE_HIGH_200G_DR4: 3219 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3220 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3221 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3222 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3223 break; 3224 default: 3225 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3226 break; 3227 } 3228 3229 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3230 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3231 return ICE_AQ_LINK_SPEED_UNKNOWN; 3232 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3233 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3234 return ICE_AQ_LINK_SPEED_UNKNOWN; 3235 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3236 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3237 return speed_phy_type_low; 3238 else 3239 return speed_phy_type_high; 3240 } 3241 3242 /** 3243 * ice_update_phy_type 3244 * @phy_type_low: pointer to the lower part of phy_type 3245 * @phy_type_high: pointer to the higher part of phy_type 3246 * @link_speeds_bitmap: targeted link speeds bitmap 3247 * 3248 * Note: For the link_speeds_bitmap structure, you can check it at 3249 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3250 * link_speeds_bitmap include multiple speeds. 3251 * 3252 * Each entry in this [phy_type_low, phy_type_high] structure will 3253 * present a certain link speed. This helper function will turn on bits 3254 * in [phy_type_low, phy_type_high] structure based on the value of 3255 * link_speeds_bitmap input parameter. 3256 */ 3257 void 3258 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3259 u16 link_speeds_bitmap) 3260 { 3261 u64 pt_high; 3262 u64 pt_low; 3263 int index; 3264 u16 speed; 3265 3266 /* We first check with low part of phy_type */ 3267 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3268 pt_low = BIT_ULL(index); 3269 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3270 3271 if (link_speeds_bitmap & speed) 3272 *phy_type_low |= BIT_ULL(index); 3273 } 3274 3275 /* We then check with high part of phy_type */ 3276 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3277 pt_high = BIT_ULL(index); 3278 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3279 3280 if (link_speeds_bitmap & speed) 3281 *phy_type_high |= BIT_ULL(index); 3282 } 3283 } 3284 3285 /** 3286 * ice_aq_set_phy_cfg 3287 * @hw: pointer to the HW struct 3288 * @pi: port info structure of the interested logical port 3289 * @cfg: structure with PHY configuration data to be set 3290 * @cd: pointer to command details structure or NULL 3291 * 3292 * Set the various PHY configuration parameters supported on the Port. 3293 * One or more of the Set PHY config parameters may be ignored in an MFP 3294 * mode as the PF may not have the privilege to set some of the PHY Config 3295 * parameters. This status will be indicated by the command response (0x0601). 3296 */ 3297 int 3298 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3299 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3300 { 3301 struct ice_aq_desc desc; 3302 int status; 3303 3304 if (!cfg) 3305 return -EINVAL; 3306 3307 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3308 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3309 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3310 cfg->caps); 3311 3312 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3313 } 3314 3315 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3316 desc.params.set_phy.lport_num = pi->lport; 3317 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3318 3319 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3320 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3321 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3322 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3323 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3324 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3325 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3326 cfg->low_power_ctrl_an); 3327 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3328 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3329 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3330 cfg->link_fec_opt); 3331 3332 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3333 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3334 status = 0; 3335 3336 if (!status) 3337 pi->phy.curr_user_phy_cfg = *cfg; 3338 3339 return status; 3340 } 3341 3342 /** 3343 * ice_update_link_info - update status of the HW network link 3344 * @pi: port info structure of the interested logical port 3345 */ 3346 int ice_update_link_info(struct ice_port_info *pi) 3347 { 3348 struct ice_link_status *li; 3349 int status; 3350 3351 if (!pi) 3352 return -EINVAL; 3353 3354 li = &pi->phy.link_info; 3355 3356 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3357 if (status) 3358 return status; 3359 3360 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3361 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3362 3363 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3364 if (!pcaps) 3365 return -ENOMEM; 3366 3367 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3368 pcaps, NULL); 3369 } 3370 3371 return status; 3372 } 3373 3374 /** 3375 * ice_aq_get_phy_equalization - function to read serdes equaliser 3376 * value from firmware using admin queue command. 3377 * @hw: pointer to the HW struct 3378 * @data_in: represents the serdes equalization parameter requested 3379 * @op_code: represents the serdes number and flag to represent tx or rx 3380 * @serdes_num: represents the serdes number 3381 * @output: pointer to the caller-supplied buffer to return serdes equaliser 3382 * 3383 * Return: non-zero status on error and 0 on success. 3384 */ 3385 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, 3386 u8 serdes_num, int *output) 3387 { 3388 struct ice_aqc_dnl_call_command *cmd; 3389 struct ice_aqc_dnl_call buf = {}; 3390 struct ice_aq_desc desc; 3391 int err; 3392 3393 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); 3394 buf.sto.txrx_equa_reqs.op_code_serdes_sel = 3395 cpu_to_le16(op_code | (serdes_num & 0xF)); 3396 cmd = &desc.params.dnl_call; 3397 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); 3398 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | 3399 ICE_AQ_FLAG_RD | 3400 ICE_AQ_FLAG_SI); 3401 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); 3402 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); 3403 3404 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), 3405 NULL); 3406 *output = err ? 0 : buf.sto.txrx_equa_resp.val; 3407 3408 return err; 3409 } 3410 3411 #define FEC_REG_PORT(port) { \ 3412 FEC_CORR_LOW_REG_PORT##port, \ 3413 FEC_CORR_HIGH_REG_PORT##port, \ 3414 FEC_UNCORR_LOW_REG_PORT##port, \ 3415 FEC_UNCORR_HIGH_REG_PORT##port, \ 3416 } 3417 3418 static const u32 fec_reg[][ICE_FEC_MAX] = { 3419 FEC_REG_PORT(0), 3420 FEC_REG_PORT(1), 3421 FEC_REG_PORT(2), 3422 FEC_REG_PORT(3) 3423 }; 3424 3425 /** 3426 * ice_aq_get_fec_stats - reads fec stats from phy 3427 * @hw: pointer to the HW struct 3428 * @pcs_quad: represents pcsquad of user input serdes 3429 * @pcs_port: represents the pcs port number part of above pcs quad 3430 * @fec_type: represents FEC stats type 3431 * @output: pointer to the caller-supplied buffer to return requested fec stats 3432 * 3433 * Return: non-zero status on error and 0 on success. 3434 */ 3435 int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 3436 enum ice_fec_stats_types fec_type, u32 *output) 3437 { 3438 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); 3439 struct ice_sbq_msg_input msg = {}; 3440 u32 receiver_id, reg_offset; 3441 int err; 3442 3443 if (pcs_port > 3) 3444 return -EINVAL; 3445 3446 reg_offset = fec_reg[pcs_port][fec_type]; 3447 3448 if (pcs_quad == 0) 3449 receiver_id = FEC_RECEIVER_ID_PCS0; 3450 else if (pcs_quad == 1) 3451 receiver_id = FEC_RECEIVER_ID_PCS1; 3452 else 3453 return -EINVAL; 3454 3455 msg.msg_addr_low = lower_16_bits(reg_offset); 3456 msg.msg_addr_high = receiver_id; 3457 msg.opcode = ice_sbq_msg_rd; 3458 msg.dest_dev = rmn_0; 3459 3460 err = ice_sbq_rw_reg(hw, &msg, flag); 3461 if (err) 3462 return err; 3463 3464 *output = msg.data; 3465 return 0; 3466 } 3467 3468 /** 3469 * ice_cache_phy_user_req 3470 * @pi: port information structure 3471 * @cache_data: PHY logging data 3472 * @cache_mode: PHY logging mode 3473 * 3474 * Log the user request on (FC, FEC, SPEED) for later use. 3475 */ 3476 static void 3477 ice_cache_phy_user_req(struct ice_port_info *pi, 3478 struct ice_phy_cache_mode_data cache_data, 3479 enum ice_phy_cache_mode cache_mode) 3480 { 3481 if (!pi) 3482 return; 3483 3484 switch (cache_mode) { 3485 case ICE_FC_MODE: 3486 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3487 break; 3488 case ICE_SPEED_MODE: 3489 pi->phy.curr_user_speed_req = 3490 cache_data.data.curr_user_speed_req; 3491 break; 3492 case ICE_FEC_MODE: 3493 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3494 break; 3495 default: 3496 break; 3497 } 3498 } 3499 3500 /** 3501 * ice_caps_to_fc_mode 3502 * @caps: PHY capabilities 3503 * 3504 * Convert PHY FC capabilities to ice FC mode 3505 */ 3506 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3507 { 3508 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3509 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3510 return ICE_FC_FULL; 3511 3512 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3513 return ICE_FC_TX_PAUSE; 3514 3515 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3516 return ICE_FC_RX_PAUSE; 3517 3518 return ICE_FC_NONE; 3519 } 3520 3521 /** 3522 * ice_caps_to_fec_mode 3523 * @caps: PHY capabilities 3524 * @fec_options: Link FEC options 3525 * 3526 * Convert PHY FEC capabilities to ice FEC mode 3527 */ 3528 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3529 { 3530 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3531 return ICE_FEC_AUTO; 3532 3533 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3534 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3535 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3536 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3537 return ICE_FEC_BASER; 3538 3539 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3540 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3541 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3542 return ICE_FEC_RS; 3543 3544 return ICE_FEC_NONE; 3545 } 3546 3547 /** 3548 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3549 * @pi: port information structure 3550 * @cfg: PHY configuration data to set FC mode 3551 * @req_mode: FC mode to configure 3552 */ 3553 int 3554 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3555 enum ice_fc_mode req_mode) 3556 { 3557 struct ice_phy_cache_mode_data cache_data; 3558 u8 pause_mask = 0x0; 3559 3560 if (!pi || !cfg) 3561 return -EINVAL; 3562 3563 switch (req_mode) { 3564 case ICE_FC_FULL: 3565 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3566 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3567 break; 3568 case ICE_FC_RX_PAUSE: 3569 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3570 break; 3571 case ICE_FC_TX_PAUSE: 3572 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3573 break; 3574 default: 3575 break; 3576 } 3577 3578 /* clear the old pause settings */ 3579 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3580 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3581 3582 /* set the new capabilities */ 3583 cfg->caps |= pause_mask; 3584 3585 /* Cache user FC request */ 3586 cache_data.data.curr_user_fc_req = req_mode; 3587 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3588 3589 return 0; 3590 } 3591 3592 /** 3593 * ice_set_fc 3594 * @pi: port information structure 3595 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3596 * @ena_auto_link_update: enable automatic link update 3597 * 3598 * Set the requested flow control mode. 3599 */ 3600 int 3601 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3602 { 3603 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3604 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3605 struct ice_hw *hw; 3606 int status; 3607 3608 if (!pi || !aq_failures) 3609 return -EINVAL; 3610 3611 *aq_failures = 0; 3612 hw = pi->hw; 3613 3614 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3615 if (!pcaps) 3616 return -ENOMEM; 3617 3618 /* Get the current PHY config */ 3619 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3620 pcaps, NULL); 3621 if (status) { 3622 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3623 goto out; 3624 } 3625 3626 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3627 3628 /* Configure the set PHY data */ 3629 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3630 if (status) 3631 goto out; 3632 3633 /* If the capabilities have changed, then set the new config */ 3634 if (cfg.caps != pcaps->caps) { 3635 int retry_count, retry_max = 10; 3636 3637 /* Auto restart link so settings take effect */ 3638 if (ena_auto_link_update) 3639 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3640 3641 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3642 if (status) { 3643 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3644 goto out; 3645 } 3646 3647 /* Update the link info 3648 * It sometimes takes a really long time for link to 3649 * come back from the atomic reset. Thus, we wait a 3650 * little bit. 3651 */ 3652 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3653 status = ice_update_link_info(pi); 3654 3655 if (!status) 3656 break; 3657 3658 mdelay(100); 3659 } 3660 3661 if (status) 3662 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3663 } 3664 3665 out: 3666 return status; 3667 } 3668 3669 /** 3670 * ice_phy_caps_equals_cfg 3671 * @phy_caps: PHY capabilities 3672 * @phy_cfg: PHY configuration 3673 * 3674 * Helper function to determine if PHY capabilities matches PHY 3675 * configuration 3676 */ 3677 bool 3678 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3679 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3680 { 3681 u8 caps_mask, cfg_mask; 3682 3683 if (!phy_caps || !phy_cfg) 3684 return false; 3685 3686 /* These bits are not common between capabilities and configuration. 3687 * Do not use them to determine equality. 3688 */ 3689 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3690 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3691 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3692 3693 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3694 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3695 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3696 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3697 phy_caps->eee_cap != phy_cfg->eee_cap || 3698 phy_caps->eeer_value != phy_cfg->eeer_value || 3699 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3700 return false; 3701 3702 return true; 3703 } 3704 3705 /** 3706 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3707 * @pi: port information structure 3708 * @caps: PHY ability structure to copy date from 3709 * @cfg: PHY configuration structure to copy data to 3710 * 3711 * Helper function to copy AQC PHY get ability data to PHY set configuration 3712 * data structure 3713 */ 3714 void 3715 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3716 struct ice_aqc_get_phy_caps_data *caps, 3717 struct ice_aqc_set_phy_cfg_data *cfg) 3718 { 3719 if (!pi || !caps || !cfg) 3720 return; 3721 3722 memset(cfg, 0, sizeof(*cfg)); 3723 cfg->phy_type_low = caps->phy_type_low; 3724 cfg->phy_type_high = caps->phy_type_high; 3725 cfg->caps = caps->caps; 3726 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3727 cfg->eee_cap = caps->eee_cap; 3728 cfg->eeer_value = caps->eeer_value; 3729 cfg->link_fec_opt = caps->link_fec_options; 3730 cfg->module_compliance_enforcement = 3731 caps->module_compliance_enforcement; 3732 } 3733 3734 /** 3735 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3736 * @pi: port information structure 3737 * @cfg: PHY configuration data to set FEC mode 3738 * @fec: FEC mode to configure 3739 */ 3740 int 3741 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3742 enum ice_fec_mode fec) 3743 { 3744 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3745 struct ice_hw *hw; 3746 int status; 3747 3748 if (!pi || !cfg) 3749 return -EINVAL; 3750 3751 hw = pi->hw; 3752 3753 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3754 if (!pcaps) 3755 return -ENOMEM; 3756 3757 status = ice_aq_get_phy_caps(pi, false, 3758 (ice_fw_supports_report_dflt_cfg(hw) ? 3759 ICE_AQC_REPORT_DFLT_CFG : 3760 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3761 if (status) 3762 goto out; 3763 3764 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3765 cfg->link_fec_opt = pcaps->link_fec_options; 3766 3767 switch (fec) { 3768 case ICE_FEC_BASER: 3769 /* Clear RS bits, and AND BASE-R ability 3770 * bits and OR request bits. 3771 */ 3772 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3773 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3774 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3775 ICE_AQC_PHY_FEC_25G_KR_REQ; 3776 break; 3777 case ICE_FEC_RS: 3778 /* Clear BASE-R bits, and AND RS ability 3779 * bits and OR request bits. 3780 */ 3781 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3782 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3783 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3784 break; 3785 case ICE_FEC_NONE: 3786 /* Clear all FEC option bits. */ 3787 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3788 break; 3789 case ICE_FEC_AUTO: 3790 /* AND auto FEC bit, and all caps bits. */ 3791 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3792 cfg->link_fec_opt |= pcaps->link_fec_options; 3793 break; 3794 default: 3795 status = -EINVAL; 3796 break; 3797 } 3798 3799 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3800 !ice_fw_supports_report_dflt_cfg(hw)) { 3801 struct ice_link_default_override_tlv tlv = { 0 }; 3802 3803 status = ice_get_link_default_override(&tlv, pi); 3804 if (status) 3805 goto out; 3806 3807 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3808 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3809 cfg->link_fec_opt = tlv.fec_options; 3810 } 3811 3812 out: 3813 return status; 3814 } 3815 3816 /** 3817 * ice_get_link_status - get status of the HW network link 3818 * @pi: port information structure 3819 * @link_up: pointer to bool (true/false = linkup/linkdown) 3820 * 3821 * Variable link_up is true if link is up, false if link is down. 3822 * The variable link_up is invalid if status is non zero. As a 3823 * result of this call, link status reporting becomes enabled 3824 */ 3825 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3826 { 3827 struct ice_phy_info *phy_info; 3828 int status = 0; 3829 3830 if (!pi || !link_up) 3831 return -EINVAL; 3832 3833 phy_info = &pi->phy; 3834 3835 if (phy_info->get_link_info) { 3836 status = ice_update_link_info(pi); 3837 3838 if (status) 3839 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3840 status); 3841 } 3842 3843 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3844 3845 return status; 3846 } 3847 3848 /** 3849 * ice_aq_set_link_restart_an 3850 * @pi: pointer to the port information structure 3851 * @ena_link: if true: enable link, if false: disable link 3852 * @cd: pointer to command details structure or NULL 3853 * 3854 * Sets up the link and restarts the Auto-Negotiation over the link. 3855 */ 3856 int 3857 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3858 struct ice_sq_cd *cd) 3859 { 3860 struct ice_aqc_restart_an *cmd; 3861 struct ice_aq_desc desc; 3862 3863 cmd = &desc.params.restart_an; 3864 3865 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3866 3867 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3868 cmd->lport_num = pi->lport; 3869 if (ena_link) 3870 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3871 else 3872 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3873 3874 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3875 } 3876 3877 /** 3878 * ice_aq_set_event_mask 3879 * @hw: pointer to the HW struct 3880 * @port_num: port number of the physical function 3881 * @mask: event mask to be set 3882 * @cd: pointer to command details structure or NULL 3883 * 3884 * Set event mask (0x0613) 3885 */ 3886 int 3887 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3888 struct ice_sq_cd *cd) 3889 { 3890 struct ice_aqc_set_event_mask *cmd; 3891 struct ice_aq_desc desc; 3892 3893 cmd = &desc.params.set_event_mask; 3894 3895 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3896 3897 cmd->lport_num = port_num; 3898 3899 cmd->event_mask = cpu_to_le16(mask); 3900 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3901 } 3902 3903 /** 3904 * ice_aq_set_mac_loopback 3905 * @hw: pointer to the HW struct 3906 * @ena_lpbk: Enable or Disable loopback 3907 * @cd: pointer to command details structure or NULL 3908 * 3909 * Enable/disable loopback on a given port 3910 */ 3911 int 3912 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3913 { 3914 struct ice_aqc_set_mac_lb *cmd; 3915 struct ice_aq_desc desc; 3916 3917 cmd = &desc.params.set_mac_lb; 3918 3919 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3920 if (ena_lpbk) 3921 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3922 3923 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3924 } 3925 3926 /** 3927 * ice_aq_set_port_id_led 3928 * @pi: pointer to the port information 3929 * @is_orig_mode: is this LED set to original mode (by the net-list) 3930 * @cd: pointer to command details structure or NULL 3931 * 3932 * Set LED value for the given port (0x06e9) 3933 */ 3934 int 3935 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3936 struct ice_sq_cd *cd) 3937 { 3938 struct ice_aqc_set_port_id_led *cmd; 3939 struct ice_hw *hw = pi->hw; 3940 struct ice_aq_desc desc; 3941 3942 cmd = &desc.params.set_port_id_led; 3943 3944 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3945 3946 if (is_orig_mode) 3947 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3948 else 3949 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3950 3951 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3952 } 3953 3954 /** 3955 * ice_aq_get_port_options 3956 * @hw: pointer to the HW struct 3957 * @options: buffer for the resultant port options 3958 * @option_count: input - size of the buffer in port options structures, 3959 * output - number of returned port options 3960 * @lport: logical port to call the command with (optional) 3961 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3962 * when PF owns more than 1 port it must be true 3963 * @active_option_idx: index of active port option in returned buffer 3964 * @active_option_valid: active option in returned buffer is valid 3965 * @pending_option_idx: index of pending port option in returned buffer 3966 * @pending_option_valid: pending option in returned buffer is valid 3967 * 3968 * Calls Get Port Options AQC (0x06ea) and verifies result. 3969 */ 3970 int 3971 ice_aq_get_port_options(struct ice_hw *hw, 3972 struct ice_aqc_get_port_options_elem *options, 3973 u8 *option_count, u8 lport, bool lport_valid, 3974 u8 *active_option_idx, bool *active_option_valid, 3975 u8 *pending_option_idx, bool *pending_option_valid) 3976 { 3977 struct ice_aqc_get_port_options *cmd; 3978 struct ice_aq_desc desc; 3979 int status; 3980 u8 i; 3981 3982 /* options buffer shall be able to hold max returned options */ 3983 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3984 return -EINVAL; 3985 3986 cmd = &desc.params.get_port_options; 3987 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3988 3989 if (lport_valid) 3990 cmd->lport_num = lport; 3991 cmd->lport_num_valid = lport_valid; 3992 3993 status = ice_aq_send_cmd(hw, &desc, options, 3994 *option_count * sizeof(*options), NULL); 3995 if (status) 3996 return status; 3997 3998 /* verify direct FW response & set output parameters */ 3999 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 4000 cmd->port_options_count); 4001 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 4002 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 4003 cmd->port_options); 4004 if (*active_option_valid) { 4005 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 4006 cmd->port_options); 4007 if (*active_option_idx > (*option_count - 1)) 4008 return -EIO; 4009 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 4010 *active_option_idx); 4011 } 4012 4013 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 4014 cmd->pending_port_option_status); 4015 if (*pending_option_valid) { 4016 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4017 cmd->pending_port_option_status); 4018 if (*pending_option_idx > (*option_count - 1)) 4019 return -EIO; 4020 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4021 *pending_option_idx); 4022 } 4023 4024 /* mask output options fields */ 4025 for (i = 0; i < *option_count; i++) { 4026 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4027 options[i].pmd); 4028 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4029 options[i].max_lane_speed); 4030 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4031 options[i].pmd, options[i].max_lane_speed); 4032 } 4033 4034 return 0; 4035 } 4036 4037 /** 4038 * ice_aq_set_port_option 4039 * @hw: pointer to the HW struct 4040 * @lport: logical port to call the command with 4041 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4042 * when PF owns more than 1 port it must be true 4043 * @new_option: new port option to be written 4044 * 4045 * Calls Set Port Options AQC (0x06eb). 4046 */ 4047 int 4048 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4049 u8 new_option) 4050 { 4051 struct ice_aqc_set_port_option *cmd; 4052 struct ice_aq_desc desc; 4053 4054 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4055 return -EINVAL; 4056 4057 cmd = &desc.params.set_port_option; 4058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4059 4060 if (lport_valid) 4061 cmd->lport_num = lport; 4062 4063 cmd->lport_num_valid = lport_valid; 4064 cmd->selected_port_option = new_option; 4065 4066 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4067 } 4068 4069 /** 4070 * ice_aq_sff_eeprom 4071 * @hw: pointer to the HW struct 4072 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4073 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4074 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4075 * @page: QSFP page 4076 * @set_page: set or ignore the page 4077 * @data: pointer to data buffer to be read/written to the I2C device. 4078 * @length: 1-16 for read, 1 for write. 4079 * @write: 0 read, 1 for write. 4080 * @cd: pointer to command details structure or NULL 4081 * 4082 * Read/Write SFF EEPROM (0x06EE) 4083 */ 4084 int 4085 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4086 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4087 bool write, struct ice_sq_cd *cd) 4088 { 4089 struct ice_aqc_sff_eeprom *cmd; 4090 struct ice_aq_desc desc; 4091 u16 i2c_bus_addr; 4092 int status; 4093 4094 if (!data || (mem_addr & 0xff00)) 4095 return -EINVAL; 4096 4097 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4098 cmd = &desc.params.read_write_sff_param; 4099 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4100 cmd->lport_num = (u8)(lport & 0xff); 4101 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4102 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 4103 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 4104 if (write) 4105 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 4106 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 4107 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4108 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 4109 4110 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4111 return status; 4112 } 4113 4114 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4115 { 4116 switch (type) { 4117 case ICE_LUT_VSI: 4118 return ICE_LUT_VSI_SIZE; 4119 case ICE_LUT_GLOBAL: 4120 return ICE_LUT_GLOBAL_SIZE; 4121 case ICE_LUT_PF: 4122 return ICE_LUT_PF_SIZE; 4123 } 4124 WARN_ONCE(1, "incorrect type passed"); 4125 return ICE_LUT_VSI_SIZE; 4126 } 4127 4128 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4129 { 4130 switch (size) { 4131 case ICE_LUT_VSI_SIZE: 4132 return ICE_AQC_LUT_SIZE_SMALL; 4133 case ICE_LUT_GLOBAL_SIZE: 4134 return ICE_AQC_LUT_SIZE_512; 4135 case ICE_LUT_PF_SIZE: 4136 return ICE_AQC_LUT_SIZE_2K; 4137 } 4138 WARN_ONCE(1, "incorrect size passed"); 4139 return 0; 4140 } 4141 4142 /** 4143 * __ice_aq_get_set_rss_lut 4144 * @hw: pointer to the hardware structure 4145 * @params: RSS LUT parameters 4146 * @set: set true to set the table, false to get the table 4147 * 4148 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4149 */ 4150 static int 4151 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4152 struct ice_aq_get_set_rss_lut_params *params, bool set) 4153 { 4154 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4155 enum ice_lut_type lut_type = params->lut_type; 4156 struct ice_aqc_get_set_rss_lut *desc_params; 4157 enum ice_aqc_lut_flags flags; 4158 enum ice_lut_size lut_size; 4159 struct ice_aq_desc desc; 4160 u8 *lut = params->lut; 4161 4162 4163 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4164 return -EINVAL; 4165 4166 lut_size = ice_lut_type_to_size(lut_type); 4167 if (lut_size > params->lut_size) 4168 return -EINVAL; 4169 else if (set && lut_size != params->lut_size) 4170 return -EINVAL; 4171 4172 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4173 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4174 if (set) 4175 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4176 4177 desc_params = &desc.params.get_set_rss_lut; 4178 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4179 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4180 4181 if (lut_type == ICE_LUT_GLOBAL) 4182 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4183 params->global_lut_id); 4184 4185 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4186 desc_params->flags = cpu_to_le16(flags); 4187 4188 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4189 } 4190 4191 /** 4192 * ice_aq_get_rss_lut 4193 * @hw: pointer to the hardware structure 4194 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4195 * 4196 * get the RSS lookup table, PF or VSI type 4197 */ 4198 int 4199 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4200 { 4201 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4202 } 4203 4204 /** 4205 * ice_aq_set_rss_lut 4206 * @hw: pointer to the hardware structure 4207 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4208 * 4209 * set the RSS lookup table, PF or VSI type 4210 */ 4211 int 4212 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4213 { 4214 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4215 } 4216 4217 /** 4218 * __ice_aq_get_set_rss_key 4219 * @hw: pointer to the HW struct 4220 * @vsi_id: VSI FW index 4221 * @key: pointer to key info struct 4222 * @set: set true to set the key, false to get the key 4223 * 4224 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4225 */ 4226 static int 4227 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4228 struct ice_aqc_get_set_rss_keys *key, bool set) 4229 { 4230 struct ice_aqc_get_set_rss_key *desc_params; 4231 u16 key_size = sizeof(*key); 4232 struct ice_aq_desc desc; 4233 4234 if (set) { 4235 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4236 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4237 } else { 4238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4239 } 4240 4241 desc_params = &desc.params.get_set_rss_key; 4242 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4243 4244 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4245 } 4246 4247 /** 4248 * ice_aq_get_rss_key 4249 * @hw: pointer to the HW struct 4250 * @vsi_handle: software VSI handle 4251 * @key: pointer to key info struct 4252 * 4253 * get the RSS key per VSI 4254 */ 4255 int 4256 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4257 struct ice_aqc_get_set_rss_keys *key) 4258 { 4259 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4260 return -EINVAL; 4261 4262 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4263 key, false); 4264 } 4265 4266 /** 4267 * ice_aq_set_rss_key 4268 * @hw: pointer to the HW struct 4269 * @vsi_handle: software VSI handle 4270 * @keys: pointer to key info struct 4271 * 4272 * set the RSS key per VSI 4273 */ 4274 int 4275 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4276 struct ice_aqc_get_set_rss_keys *keys) 4277 { 4278 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4279 return -EINVAL; 4280 4281 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4282 keys, true); 4283 } 4284 4285 /** 4286 * ice_aq_add_lan_txq 4287 * @hw: pointer to the hardware structure 4288 * @num_qgrps: Number of added queue groups 4289 * @qg_list: list of queue groups to be added 4290 * @buf_size: size of buffer for indirect command 4291 * @cd: pointer to command details structure or NULL 4292 * 4293 * Add Tx LAN queue (0x0C30) 4294 * 4295 * NOTE: 4296 * Prior to calling add Tx LAN queue: 4297 * Initialize the following as part of the Tx queue context: 4298 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4299 * Cache profile and Packet shaper profile. 4300 * 4301 * After add Tx LAN queue AQ command is completed: 4302 * Interrupts should be associated with specific queues, 4303 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4304 * flow. 4305 */ 4306 static int 4307 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4308 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4309 struct ice_sq_cd *cd) 4310 { 4311 struct ice_aqc_add_tx_qgrp *list; 4312 struct ice_aqc_add_txqs *cmd; 4313 struct ice_aq_desc desc; 4314 u16 i, sum_size = 0; 4315 4316 cmd = &desc.params.add_txqs; 4317 4318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4319 4320 if (!qg_list) 4321 return -EINVAL; 4322 4323 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4324 return -EINVAL; 4325 4326 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4327 sum_size += struct_size(list, txqs, list->num_txqs); 4328 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4329 list->num_txqs); 4330 } 4331 4332 if (buf_size != sum_size) 4333 return -EINVAL; 4334 4335 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4336 4337 cmd->num_qgrps = num_qgrps; 4338 4339 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4340 } 4341 4342 /** 4343 * ice_aq_dis_lan_txq 4344 * @hw: pointer to the hardware structure 4345 * @num_qgrps: number of groups in the list 4346 * @qg_list: the list of groups to disable 4347 * @buf_size: the total size of the qg_list buffer in bytes 4348 * @rst_src: if called due to reset, specifies the reset source 4349 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4350 * @cd: pointer to command details structure or NULL 4351 * 4352 * Disable LAN Tx queue (0x0C31) 4353 */ 4354 static int 4355 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4356 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4357 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4358 struct ice_sq_cd *cd) 4359 { 4360 struct ice_aqc_dis_txq_item *item; 4361 struct ice_aqc_dis_txqs *cmd; 4362 struct ice_aq_desc desc; 4363 u16 vmvf_and_timeout; 4364 u16 i, sz = 0; 4365 int status; 4366 4367 cmd = &desc.params.dis_txqs; 4368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4369 4370 /* qg_list can be NULL only in VM/VF reset flow */ 4371 if (!qg_list && !rst_src) 4372 return -EINVAL; 4373 4374 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4375 return -EINVAL; 4376 4377 cmd->num_entries = num_qgrps; 4378 4379 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4380 4381 switch (rst_src) { 4382 case ICE_VM_RESET: 4383 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4384 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4385 break; 4386 case ICE_VF_RESET: 4387 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4388 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4389 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4390 ICE_AQC_Q_DIS_VMVF_NUM_M; 4391 break; 4392 case ICE_NO_RESET: 4393 default: 4394 break; 4395 } 4396 4397 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4398 4399 /* flush pipe on time out */ 4400 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4401 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4402 if (!qg_list) 4403 goto do_aq; 4404 4405 /* set RD bit to indicate that command buffer is provided by the driver 4406 * and it needs to be read by the firmware 4407 */ 4408 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4409 4410 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4411 u16 item_size = struct_size(item, q_id, item->num_qs); 4412 4413 /* If the num of queues is even, add 2 bytes of padding */ 4414 if ((item->num_qs % 2) == 0) 4415 item_size += 2; 4416 4417 sz += item_size; 4418 4419 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4420 } 4421 4422 if (buf_size != sz) 4423 return -EINVAL; 4424 4425 do_aq: 4426 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4427 if (status) { 4428 if (!qg_list) 4429 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4430 vmvf_num, hw->adminq.sq_last_status); 4431 else 4432 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4433 le16_to_cpu(qg_list[0].q_id[0]), 4434 hw->adminq.sq_last_status); 4435 } 4436 return status; 4437 } 4438 4439 /** 4440 * ice_aq_cfg_lan_txq 4441 * @hw: pointer to the hardware structure 4442 * @buf: buffer for command 4443 * @buf_size: size of buffer in bytes 4444 * @num_qs: number of queues being configured 4445 * @oldport: origination lport 4446 * @newport: destination lport 4447 * @cd: pointer to command details structure or NULL 4448 * 4449 * Move/Configure LAN Tx queue (0x0C32) 4450 * 4451 * There is a better AQ command to use for moving nodes, so only coding 4452 * this one for configuring the node. 4453 */ 4454 int 4455 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4456 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4457 struct ice_sq_cd *cd) 4458 { 4459 struct ice_aqc_cfg_txqs *cmd; 4460 struct ice_aq_desc desc; 4461 int status; 4462 4463 cmd = &desc.params.cfg_txqs; 4464 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4465 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4466 4467 if (!buf) 4468 return -EINVAL; 4469 4470 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4471 cmd->num_qs = num_qs; 4472 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4473 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4474 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4475 cmd->blocked_cgds = 0; 4476 4477 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4478 if (status) 4479 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4480 hw->adminq.sq_last_status); 4481 return status; 4482 } 4483 4484 /** 4485 * ice_aq_add_rdma_qsets 4486 * @hw: pointer to the hardware structure 4487 * @num_qset_grps: Number of RDMA Qset groups 4488 * @qset_list: list of Qset groups to be added 4489 * @buf_size: size of buffer for indirect command 4490 * @cd: pointer to command details structure or NULL 4491 * 4492 * Add Tx RDMA Qsets (0x0C33) 4493 */ 4494 static int 4495 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4496 struct ice_aqc_add_rdma_qset_data *qset_list, 4497 u16 buf_size, struct ice_sq_cd *cd) 4498 { 4499 struct ice_aqc_add_rdma_qset_data *list; 4500 struct ice_aqc_add_rdma_qset *cmd; 4501 struct ice_aq_desc desc; 4502 u16 i, sum_size = 0; 4503 4504 cmd = &desc.params.add_rdma_qset; 4505 4506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4507 4508 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4509 return -EINVAL; 4510 4511 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4512 u16 num_qsets = le16_to_cpu(list->num_qsets); 4513 4514 sum_size += struct_size(list, rdma_qsets, num_qsets); 4515 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4516 num_qsets); 4517 } 4518 4519 if (buf_size != sum_size) 4520 return -EINVAL; 4521 4522 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4523 4524 cmd->num_qset_grps = num_qset_grps; 4525 4526 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4527 } 4528 4529 /* End of FW Admin Queue command wrappers */ 4530 4531 /** 4532 * ice_pack_ctx_byte - write a byte to a packed context structure 4533 * @src_ctx: unpacked source context structure 4534 * @dest_ctx: packed destination context data 4535 * @ce_info: context element description 4536 */ 4537 static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, 4538 const struct ice_ctx_ele *ce_info) 4539 { 4540 u8 src_byte, dest_byte, mask; 4541 u8 *from, *dest; 4542 u16 shift_width; 4543 4544 /* copy from the next struct field */ 4545 from = src_ctx + ce_info->offset; 4546 4547 /* prepare the bits and mask */ 4548 shift_width = ce_info->lsb % 8; 4549 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4550 4551 src_byte = *from; 4552 src_byte <<= shift_width; 4553 src_byte &= mask; 4554 4555 /* get the current bits from the target bit string */ 4556 dest = dest_ctx + (ce_info->lsb / 8); 4557 4558 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4559 4560 dest_byte &= ~mask; /* get the bits not changing */ 4561 dest_byte |= src_byte; /* add in the new bits */ 4562 4563 /* put it all back */ 4564 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4565 } 4566 4567 /** 4568 * ice_pack_ctx_word - write a word to a packed context structure 4569 * @src_ctx: unpacked source context structure 4570 * @dest_ctx: packed destination context data 4571 * @ce_info: context element description 4572 */ 4573 static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, 4574 const struct ice_ctx_ele *ce_info) 4575 { 4576 u16 src_word, mask; 4577 __le16 dest_word; 4578 u8 *from, *dest; 4579 u16 shift_width; 4580 4581 /* copy from the next struct field */ 4582 from = src_ctx + ce_info->offset; 4583 4584 /* prepare the bits and mask */ 4585 shift_width = ce_info->lsb % 8; 4586 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4587 4588 /* don't swizzle the bits until after the mask because the mask bits 4589 * will be in a different bit position on big endian machines 4590 */ 4591 src_word = *(u16 *)from; 4592 src_word <<= shift_width; 4593 src_word &= mask; 4594 4595 /* get the current bits from the target bit string */ 4596 dest = dest_ctx + (ce_info->lsb / 8); 4597 4598 memcpy(&dest_word, dest, sizeof(dest_word)); 4599 4600 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4601 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4602 4603 /* put it all back */ 4604 memcpy(dest, &dest_word, sizeof(dest_word)); 4605 } 4606 4607 /** 4608 * ice_pack_ctx_dword - write a dword to a packed context structure 4609 * @src_ctx: unpacked source context structure 4610 * @dest_ctx: packed destination context data 4611 * @ce_info: context element description 4612 */ 4613 static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, 4614 const struct ice_ctx_ele *ce_info) 4615 { 4616 u32 src_dword, mask; 4617 __le32 dest_dword; 4618 u8 *from, *dest; 4619 u16 shift_width; 4620 4621 /* copy from the next struct field */ 4622 from = src_ctx + ce_info->offset; 4623 4624 /* prepare the bits and mask */ 4625 shift_width = ce_info->lsb % 8; 4626 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4627 4628 /* don't swizzle the bits until after the mask because the mask bits 4629 * will be in a different bit position on big endian machines 4630 */ 4631 src_dword = *(u32 *)from; 4632 src_dword <<= shift_width; 4633 src_dword &= mask; 4634 4635 /* get the current bits from the target bit string */ 4636 dest = dest_ctx + (ce_info->lsb / 8); 4637 4638 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4639 4640 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4641 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4642 4643 /* put it all back */ 4644 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4645 } 4646 4647 /** 4648 * ice_pack_ctx_qword - write a qword to a packed context structure 4649 * @src_ctx: unpacked source context structure 4650 * @dest_ctx: packed destination context data 4651 * @ce_info: context element description 4652 */ 4653 static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, 4654 const struct ice_ctx_ele *ce_info) 4655 { 4656 u64 src_qword, mask; 4657 __le64 dest_qword; 4658 u8 *from, *dest; 4659 u16 shift_width; 4660 4661 /* copy from the next struct field */ 4662 from = src_ctx + ce_info->offset; 4663 4664 /* prepare the bits and mask */ 4665 shift_width = ce_info->lsb % 8; 4666 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); 4667 4668 /* don't swizzle the bits until after the mask because the mask bits 4669 * will be in a different bit position on big endian machines 4670 */ 4671 src_qword = *(u64 *)from; 4672 src_qword <<= shift_width; 4673 src_qword &= mask; 4674 4675 /* get the current bits from the target bit string */ 4676 dest = dest_ctx + (ce_info->lsb / 8); 4677 4678 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4679 4680 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4681 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4682 4683 /* put it all back */ 4684 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4685 } 4686 4687 /** 4688 * ice_set_ctx - set context bits in packed structure 4689 * @hw: pointer to the hardware structure 4690 * @src_ctx: pointer to a generic non-packed context structure 4691 * @dest_ctx: pointer to memory for the packed structure 4692 * @ce_info: List of Rx context elements 4693 */ 4694 int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4695 const struct ice_ctx_ele *ce_info) 4696 { 4697 int f; 4698 4699 for (f = 0; ce_info[f].width; f++) { 4700 /* We have to deal with each element of the FW response 4701 * using the correct size so that we are correct regardless 4702 * of the endianness of the machine. 4703 */ 4704 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4705 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4706 f, ce_info[f].width, ce_info[f].size_of); 4707 continue; 4708 } 4709 switch (ce_info[f].size_of) { 4710 case sizeof(u8): 4711 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); 4712 break; 4713 case sizeof(u16): 4714 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); 4715 break; 4716 case sizeof(u32): 4717 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); 4718 break; 4719 case sizeof(u64): 4720 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); 4721 break; 4722 default: 4723 return -EINVAL; 4724 } 4725 } 4726 4727 return 0; 4728 } 4729 4730 /** 4731 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4732 * @hw: pointer to the HW struct 4733 * @vsi_handle: software VSI handle 4734 * @tc: TC number 4735 * @q_handle: software queue handle 4736 */ 4737 struct ice_q_ctx * 4738 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4739 { 4740 struct ice_vsi_ctx *vsi; 4741 struct ice_q_ctx *q_ctx; 4742 4743 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4744 if (!vsi) 4745 return NULL; 4746 if (q_handle >= vsi->num_lan_q_entries[tc]) 4747 return NULL; 4748 if (!vsi->lan_q_ctx[tc]) 4749 return NULL; 4750 q_ctx = vsi->lan_q_ctx[tc]; 4751 return &q_ctx[q_handle]; 4752 } 4753 4754 /** 4755 * ice_ena_vsi_txq 4756 * @pi: port information structure 4757 * @vsi_handle: software VSI handle 4758 * @tc: TC number 4759 * @q_handle: software queue handle 4760 * @num_qgrps: Number of added queue groups 4761 * @buf: list of queue groups to be added 4762 * @buf_size: size of buffer for indirect command 4763 * @cd: pointer to command details structure or NULL 4764 * 4765 * This function adds one LAN queue 4766 */ 4767 int 4768 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4769 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4770 struct ice_sq_cd *cd) 4771 { 4772 struct ice_aqc_txsched_elem_data node = { 0 }; 4773 struct ice_sched_node *parent; 4774 struct ice_q_ctx *q_ctx; 4775 struct ice_hw *hw; 4776 int status; 4777 4778 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4779 return -EIO; 4780 4781 if (num_qgrps > 1 || buf->num_txqs > 1) 4782 return -ENOSPC; 4783 4784 hw = pi->hw; 4785 4786 if (!ice_is_vsi_valid(hw, vsi_handle)) 4787 return -EINVAL; 4788 4789 mutex_lock(&pi->sched_lock); 4790 4791 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4792 if (!q_ctx) { 4793 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4794 q_handle); 4795 status = -EINVAL; 4796 goto ena_txq_exit; 4797 } 4798 4799 /* find a parent node */ 4800 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4801 ICE_SCHED_NODE_OWNER_LAN); 4802 if (!parent) { 4803 status = -EINVAL; 4804 goto ena_txq_exit; 4805 } 4806 4807 buf->parent_teid = parent->info.node_teid; 4808 node.parent_teid = parent->info.node_teid; 4809 /* Mark that the values in the "generic" section as valid. The default 4810 * value in the "generic" section is zero. This means that : 4811 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4812 * - 0 priority among siblings, indicated by Bit 1-3. 4813 * - WFQ, indicated by Bit 4. 4814 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4815 * Bit 5-6. 4816 * - Bit 7 is reserved. 4817 * Without setting the generic section as valid in valid_sections, the 4818 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4819 */ 4820 buf->txqs[0].info.valid_sections = 4821 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4822 ICE_AQC_ELEM_VALID_EIR; 4823 buf->txqs[0].info.generic = 0; 4824 buf->txqs[0].info.cir_bw.bw_profile_idx = 4825 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4826 buf->txqs[0].info.cir_bw.bw_alloc = 4827 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4828 buf->txqs[0].info.eir_bw.bw_profile_idx = 4829 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4830 buf->txqs[0].info.eir_bw.bw_alloc = 4831 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4832 4833 /* add the LAN queue */ 4834 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4835 if (status) { 4836 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4837 le16_to_cpu(buf->txqs[0].txq_id), 4838 hw->adminq.sq_last_status); 4839 goto ena_txq_exit; 4840 } 4841 4842 node.node_teid = buf->txqs[0].q_teid; 4843 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4844 q_ctx->q_handle = q_handle; 4845 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4846 4847 /* add a leaf node into scheduler tree queue layer */ 4848 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4849 if (!status) 4850 status = ice_sched_replay_q_bw(pi, q_ctx); 4851 4852 ena_txq_exit: 4853 mutex_unlock(&pi->sched_lock); 4854 return status; 4855 } 4856 4857 /** 4858 * ice_dis_vsi_txq 4859 * @pi: port information structure 4860 * @vsi_handle: software VSI handle 4861 * @tc: TC number 4862 * @num_queues: number of queues 4863 * @q_handles: pointer to software queue handle array 4864 * @q_ids: pointer to the q_id array 4865 * @q_teids: pointer to queue node teids 4866 * @rst_src: if called due to reset, specifies the reset source 4867 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4868 * @cd: pointer to command details structure or NULL 4869 * 4870 * This function removes queues and their corresponding nodes in SW DB 4871 */ 4872 int 4873 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4874 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4875 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4876 struct ice_sq_cd *cd) 4877 { 4878 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4879 u16 i, buf_size = __struct_size(qg_list); 4880 struct ice_q_ctx *q_ctx; 4881 int status = -ENOENT; 4882 struct ice_hw *hw; 4883 4884 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4885 return -EIO; 4886 4887 hw = pi->hw; 4888 4889 if (!num_queues) { 4890 /* if queue is disabled already yet the disable queue command 4891 * has to be sent to complete the VF reset, then call 4892 * ice_aq_dis_lan_txq without any queue information 4893 */ 4894 if (rst_src) 4895 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4896 vmvf_num, NULL); 4897 return -EIO; 4898 } 4899 4900 mutex_lock(&pi->sched_lock); 4901 4902 for (i = 0; i < num_queues; i++) { 4903 struct ice_sched_node *node; 4904 4905 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4906 if (!node) 4907 continue; 4908 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4909 if (!q_ctx) { 4910 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4911 q_handles[i]); 4912 continue; 4913 } 4914 if (q_ctx->q_handle != q_handles[i]) { 4915 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4916 q_ctx->q_handle, q_handles[i]); 4917 continue; 4918 } 4919 qg_list->parent_teid = node->info.parent_teid; 4920 qg_list->num_qs = 1; 4921 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4922 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4923 vmvf_num, cd); 4924 4925 if (status) 4926 break; 4927 ice_free_sched_node(pi, node); 4928 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4929 q_ctx->q_teid = ICE_INVAL_TEID; 4930 } 4931 mutex_unlock(&pi->sched_lock); 4932 return status; 4933 } 4934 4935 /** 4936 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4937 * @pi: port information structure 4938 * @vsi_handle: software VSI handle 4939 * @tc_bitmap: TC bitmap 4940 * @maxqs: max queues array per TC 4941 * @owner: LAN or RDMA 4942 * 4943 * This function adds/updates the VSI queues per TC. 4944 */ 4945 static int 4946 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4947 u16 *maxqs, u8 owner) 4948 { 4949 int status = 0; 4950 u8 i; 4951 4952 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4953 return -EIO; 4954 4955 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4956 return -EINVAL; 4957 4958 mutex_lock(&pi->sched_lock); 4959 4960 ice_for_each_traffic_class(i) { 4961 /* configuration is possible only if TC node is present */ 4962 if (!ice_sched_get_tc_node(pi, i)) 4963 continue; 4964 4965 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4966 ice_is_tc_ena(tc_bitmap, i)); 4967 if (status) 4968 break; 4969 } 4970 4971 mutex_unlock(&pi->sched_lock); 4972 return status; 4973 } 4974 4975 /** 4976 * ice_cfg_vsi_lan - configure VSI LAN queues 4977 * @pi: port information structure 4978 * @vsi_handle: software VSI handle 4979 * @tc_bitmap: TC bitmap 4980 * @max_lanqs: max LAN queues array per TC 4981 * 4982 * This function adds/updates the VSI LAN queues per TC. 4983 */ 4984 int 4985 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4986 u16 *max_lanqs) 4987 { 4988 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4989 ICE_SCHED_NODE_OWNER_LAN); 4990 } 4991 4992 /** 4993 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4994 * @pi: port information structure 4995 * @vsi_handle: software VSI handle 4996 * @tc_bitmap: TC bitmap 4997 * @max_rdmaqs: max RDMA queues array per TC 4998 * 4999 * This function adds/updates the VSI RDMA queues per TC. 5000 */ 5001 int 5002 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5003 u16 *max_rdmaqs) 5004 { 5005 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5006 ICE_SCHED_NODE_OWNER_RDMA); 5007 } 5008 5009 /** 5010 * ice_ena_vsi_rdma_qset 5011 * @pi: port information structure 5012 * @vsi_handle: software VSI handle 5013 * @tc: TC number 5014 * @rdma_qset: pointer to RDMA Qset 5015 * @num_qsets: number of RDMA Qsets 5016 * @qset_teid: pointer to Qset node TEIDs 5017 * 5018 * This function adds RDMA Qset 5019 */ 5020 int 5021 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5022 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5023 { 5024 struct ice_aqc_txsched_elem_data node = { 0 }; 5025 struct ice_aqc_add_rdma_qset_data *buf; 5026 struct ice_sched_node *parent; 5027 struct ice_hw *hw; 5028 u16 i, buf_size; 5029 int ret; 5030 5031 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5032 return -EIO; 5033 hw = pi->hw; 5034 5035 if (!ice_is_vsi_valid(hw, vsi_handle)) 5036 return -EINVAL; 5037 5038 buf_size = struct_size(buf, rdma_qsets, num_qsets); 5039 buf = kzalloc(buf_size, GFP_KERNEL); 5040 if (!buf) 5041 return -ENOMEM; 5042 mutex_lock(&pi->sched_lock); 5043 5044 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5045 ICE_SCHED_NODE_OWNER_RDMA); 5046 if (!parent) { 5047 ret = -EINVAL; 5048 goto rdma_error_exit; 5049 } 5050 buf->parent_teid = parent->info.node_teid; 5051 node.parent_teid = parent->info.node_teid; 5052 5053 buf->num_qsets = cpu_to_le16(num_qsets); 5054 for (i = 0; i < num_qsets; i++) { 5055 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5056 buf->rdma_qsets[i].info.valid_sections = 5057 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5058 ICE_AQC_ELEM_VALID_EIR; 5059 buf->rdma_qsets[i].info.generic = 0; 5060 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5061 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5062 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5063 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5064 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5065 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5066 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5067 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5068 } 5069 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5070 if (ret) { 5071 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5072 goto rdma_error_exit; 5073 } 5074 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5075 for (i = 0; i < num_qsets; i++) { 5076 node.node_teid = buf->rdma_qsets[i].qset_teid; 5077 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5078 &node, NULL); 5079 if (ret) 5080 break; 5081 qset_teid[i] = le32_to_cpu(node.node_teid); 5082 } 5083 rdma_error_exit: 5084 mutex_unlock(&pi->sched_lock); 5085 kfree(buf); 5086 return ret; 5087 } 5088 5089 /** 5090 * ice_dis_vsi_rdma_qset - free RDMA resources 5091 * @pi: port_info struct 5092 * @count: number of RDMA Qsets to free 5093 * @qset_teid: TEID of Qset node 5094 * @q_id: list of queue IDs being disabled 5095 */ 5096 int 5097 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5098 u16 *q_id) 5099 { 5100 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5101 u16 qg_size = __struct_size(qg_list); 5102 struct ice_hw *hw; 5103 int status = 0; 5104 int i; 5105 5106 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5107 return -EIO; 5108 5109 hw = pi->hw; 5110 5111 mutex_lock(&pi->sched_lock); 5112 5113 for (i = 0; i < count; i++) { 5114 struct ice_sched_node *node; 5115 5116 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5117 if (!node) 5118 continue; 5119 5120 qg_list->parent_teid = node->info.parent_teid; 5121 qg_list->num_qs = 1; 5122 qg_list->q_id[0] = 5123 cpu_to_le16(q_id[i] | 5124 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5125 5126 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5127 ICE_NO_RESET, 0, NULL); 5128 if (status) 5129 break; 5130 5131 ice_free_sched_node(pi, node); 5132 } 5133 5134 mutex_unlock(&pi->sched_lock); 5135 return status; 5136 } 5137 5138 /** 5139 * ice_aq_get_cgu_abilities - get cgu abilities 5140 * @hw: pointer to the HW struct 5141 * @abilities: CGU abilities 5142 * 5143 * Get CGU abilities (0x0C61) 5144 * Return: 0 on success or negative value on failure. 5145 */ 5146 int 5147 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5148 struct ice_aqc_get_cgu_abilities *abilities) 5149 { 5150 struct ice_aq_desc desc; 5151 5152 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5153 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5154 } 5155 5156 /** 5157 * ice_aq_set_input_pin_cfg - set input pin config 5158 * @hw: pointer to the HW struct 5159 * @input_idx: Input index 5160 * @flags1: Input flags 5161 * @flags2: Input flags 5162 * @freq: Frequency in Hz 5163 * @phase_delay: Delay in ps 5164 * 5165 * Set CGU input config (0x0C62) 5166 * Return: 0 on success or negative value on failure. 5167 */ 5168 int 5169 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5170 u32 freq, s32 phase_delay) 5171 { 5172 struct ice_aqc_set_cgu_input_config *cmd; 5173 struct ice_aq_desc desc; 5174 5175 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5176 cmd = &desc.params.set_cgu_input_config; 5177 cmd->input_idx = input_idx; 5178 cmd->flags1 = flags1; 5179 cmd->flags2 = flags2; 5180 cmd->freq = cpu_to_le32(freq); 5181 cmd->phase_delay = cpu_to_le32(phase_delay); 5182 5183 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5184 } 5185 5186 /** 5187 * ice_aq_get_input_pin_cfg - get input pin config 5188 * @hw: pointer to the HW struct 5189 * @input_idx: Input index 5190 * @status: Pin status 5191 * @type: Pin type 5192 * @flags1: Input flags 5193 * @flags2: Input flags 5194 * @freq: Frequency in Hz 5195 * @phase_delay: Delay in ps 5196 * 5197 * Get CGU input config (0x0C63) 5198 * Return: 0 on success or negative value on failure. 5199 */ 5200 int 5201 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5202 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5203 { 5204 struct ice_aqc_get_cgu_input_config *cmd; 5205 struct ice_aq_desc desc; 5206 int ret; 5207 5208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5209 cmd = &desc.params.get_cgu_input_config; 5210 cmd->input_idx = input_idx; 5211 5212 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5213 if (!ret) { 5214 if (status) 5215 *status = cmd->status; 5216 if (type) 5217 *type = cmd->type; 5218 if (flags1) 5219 *flags1 = cmd->flags1; 5220 if (flags2) 5221 *flags2 = cmd->flags2; 5222 if (freq) 5223 *freq = le32_to_cpu(cmd->freq); 5224 if (phase_delay) 5225 *phase_delay = le32_to_cpu(cmd->phase_delay); 5226 } 5227 5228 return ret; 5229 } 5230 5231 /** 5232 * ice_aq_set_output_pin_cfg - set output pin config 5233 * @hw: pointer to the HW struct 5234 * @output_idx: Output index 5235 * @flags: Output flags 5236 * @src_sel: Index of DPLL block 5237 * @freq: Output frequency 5238 * @phase_delay: Output phase compensation 5239 * 5240 * Set CGU output config (0x0C64) 5241 * Return: 0 on success or negative value on failure. 5242 */ 5243 int 5244 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5245 u8 src_sel, u32 freq, s32 phase_delay) 5246 { 5247 struct ice_aqc_set_cgu_output_config *cmd; 5248 struct ice_aq_desc desc; 5249 5250 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5251 cmd = &desc.params.set_cgu_output_config; 5252 cmd->output_idx = output_idx; 5253 cmd->flags = flags; 5254 cmd->src_sel = src_sel; 5255 cmd->freq = cpu_to_le32(freq); 5256 cmd->phase_delay = cpu_to_le32(phase_delay); 5257 5258 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5259 } 5260 5261 /** 5262 * ice_aq_get_output_pin_cfg - get output pin config 5263 * @hw: pointer to the HW struct 5264 * @output_idx: Output index 5265 * @flags: Output flags 5266 * @src_sel: Internal DPLL source 5267 * @freq: Output frequency 5268 * @src_freq: Source frequency 5269 * 5270 * Get CGU output config (0x0C65) 5271 * Return: 0 on success or negative value on failure. 5272 */ 5273 int 5274 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5275 u8 *src_sel, u32 *freq, u32 *src_freq) 5276 { 5277 struct ice_aqc_get_cgu_output_config *cmd; 5278 struct ice_aq_desc desc; 5279 int ret; 5280 5281 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5282 cmd = &desc.params.get_cgu_output_config; 5283 cmd->output_idx = output_idx; 5284 5285 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5286 if (!ret) { 5287 if (flags) 5288 *flags = cmd->flags; 5289 if (src_sel) 5290 *src_sel = cmd->src_sel; 5291 if (freq) 5292 *freq = le32_to_cpu(cmd->freq); 5293 if (src_freq) 5294 *src_freq = le32_to_cpu(cmd->src_freq); 5295 } 5296 5297 return ret; 5298 } 5299 5300 /** 5301 * ice_aq_get_cgu_dpll_status - get dpll status 5302 * @hw: pointer to the HW struct 5303 * @dpll_num: DPLL index 5304 * @ref_state: Reference clock state 5305 * @config: current DPLL config 5306 * @dpll_state: current DPLL state 5307 * @phase_offset: Phase offset in ns 5308 * @eec_mode: EEC_mode 5309 * 5310 * Get CGU DPLL status (0x0C66) 5311 * Return: 0 on success or negative value on failure. 5312 */ 5313 int 5314 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5315 u8 *dpll_state, u8 *config, s64 *phase_offset, 5316 u8 *eec_mode) 5317 { 5318 struct ice_aqc_get_cgu_dpll_status *cmd; 5319 struct ice_aq_desc desc; 5320 int status; 5321 5322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5323 cmd = &desc.params.get_cgu_dpll_status; 5324 cmd->dpll_num = dpll_num; 5325 5326 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5327 if (!status) { 5328 *ref_state = cmd->ref_state; 5329 *dpll_state = cmd->dpll_state; 5330 *config = cmd->config; 5331 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5332 *phase_offset <<= 32; 5333 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5334 *phase_offset = sign_extend64(*phase_offset, 47); 5335 *eec_mode = cmd->eec_mode; 5336 } 5337 5338 return status; 5339 } 5340 5341 /** 5342 * ice_aq_set_cgu_dpll_config - set dpll config 5343 * @hw: pointer to the HW struct 5344 * @dpll_num: DPLL index 5345 * @ref_state: Reference clock state 5346 * @config: DPLL config 5347 * @eec_mode: EEC mode 5348 * 5349 * Set CGU DPLL config (0x0C67) 5350 * Return: 0 on success or negative value on failure. 5351 */ 5352 int 5353 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5354 u8 config, u8 eec_mode) 5355 { 5356 struct ice_aqc_set_cgu_dpll_config *cmd; 5357 struct ice_aq_desc desc; 5358 5359 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5360 cmd = &desc.params.set_cgu_dpll_config; 5361 cmd->dpll_num = dpll_num; 5362 cmd->ref_state = ref_state; 5363 cmd->config = config; 5364 cmd->eec_mode = eec_mode; 5365 5366 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5367 } 5368 5369 /** 5370 * ice_aq_set_cgu_ref_prio - set input reference priority 5371 * @hw: pointer to the HW struct 5372 * @dpll_num: DPLL index 5373 * @ref_idx: Reference pin index 5374 * @ref_priority: Reference input priority 5375 * 5376 * Set CGU reference priority (0x0C68) 5377 * Return: 0 on success or negative value on failure. 5378 */ 5379 int 5380 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5381 u8 ref_priority) 5382 { 5383 struct ice_aqc_set_cgu_ref_prio *cmd; 5384 struct ice_aq_desc desc; 5385 5386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5387 cmd = &desc.params.set_cgu_ref_prio; 5388 cmd->dpll_num = dpll_num; 5389 cmd->ref_idx = ref_idx; 5390 cmd->ref_priority = ref_priority; 5391 5392 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5393 } 5394 5395 /** 5396 * ice_aq_get_cgu_ref_prio - get input reference priority 5397 * @hw: pointer to the HW struct 5398 * @dpll_num: DPLL index 5399 * @ref_idx: Reference pin index 5400 * @ref_prio: Reference input priority 5401 * 5402 * Get CGU reference priority (0x0C69) 5403 * Return: 0 on success or negative value on failure. 5404 */ 5405 int 5406 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5407 u8 *ref_prio) 5408 { 5409 struct ice_aqc_get_cgu_ref_prio *cmd; 5410 struct ice_aq_desc desc; 5411 int status; 5412 5413 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5414 cmd = &desc.params.get_cgu_ref_prio; 5415 cmd->dpll_num = dpll_num; 5416 cmd->ref_idx = ref_idx; 5417 5418 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5419 if (!status) 5420 *ref_prio = cmd->ref_priority; 5421 5422 return status; 5423 } 5424 5425 /** 5426 * ice_aq_get_cgu_info - get cgu info 5427 * @hw: pointer to the HW struct 5428 * @cgu_id: CGU ID 5429 * @cgu_cfg_ver: CGU config version 5430 * @cgu_fw_ver: CGU firmware version 5431 * 5432 * Get CGU info (0x0C6A) 5433 * Return: 0 on success or negative value on failure. 5434 */ 5435 int 5436 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5437 u32 *cgu_fw_ver) 5438 { 5439 struct ice_aqc_get_cgu_info *cmd; 5440 struct ice_aq_desc desc; 5441 int status; 5442 5443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5444 cmd = &desc.params.get_cgu_info; 5445 5446 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5447 if (!status) { 5448 *cgu_id = le32_to_cpu(cmd->cgu_id); 5449 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5450 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5451 } 5452 5453 return status; 5454 } 5455 5456 /** 5457 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5458 * @hw: pointer to the HW struct 5459 * @phy_output: PHY reference clock output pin 5460 * @enable: GPIO state to be applied 5461 * @freq: PHY output frequency 5462 * 5463 * Set phy recovered clock as reference (0x0630) 5464 * Return: 0 on success or negative value on failure. 5465 */ 5466 int 5467 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5468 u32 *freq) 5469 { 5470 struct ice_aqc_set_phy_rec_clk_out *cmd; 5471 struct ice_aq_desc desc; 5472 int status; 5473 5474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5475 cmd = &desc.params.set_phy_rec_clk_out; 5476 cmd->phy_output = phy_output; 5477 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5478 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5479 cmd->freq = cpu_to_le32(*freq); 5480 5481 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5482 if (!status) 5483 *freq = le32_to_cpu(cmd->freq); 5484 5485 return status; 5486 } 5487 5488 /** 5489 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5490 * @hw: pointer to the HW struct 5491 * @phy_output: PHY reference clock output pin 5492 * @port_num: Port number 5493 * @flags: PHY flags 5494 * @node_handle: PHY output frequency 5495 * 5496 * Get PHY recovered clock output info (0x0631) 5497 * Return: 0 on success or negative value on failure. 5498 */ 5499 int 5500 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5501 u8 *flags, u16 *node_handle) 5502 { 5503 struct ice_aqc_get_phy_rec_clk_out *cmd; 5504 struct ice_aq_desc desc; 5505 int status; 5506 5507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5508 cmd = &desc.params.get_phy_rec_clk_out; 5509 cmd->phy_output = *phy_output; 5510 5511 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5512 if (!status) { 5513 *phy_output = cmd->phy_output; 5514 if (port_num) 5515 *port_num = cmd->port_num; 5516 if (flags) 5517 *flags = cmd->flags; 5518 if (node_handle) 5519 *node_handle = le16_to_cpu(cmd->node_handle); 5520 } 5521 5522 return status; 5523 } 5524 5525 /** 5526 * ice_aq_get_sensor_reading 5527 * @hw: pointer to the HW struct 5528 * @data: pointer to data to be read from the sensor 5529 * 5530 * Get sensor reading (0x0632) 5531 */ 5532 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5533 struct ice_aqc_get_sensor_reading_resp *data) 5534 { 5535 struct ice_aqc_get_sensor_reading *cmd; 5536 struct ice_aq_desc desc; 5537 int status; 5538 5539 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5540 cmd = &desc.params.get_sensor_reading; 5541 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5542 #define ICE_INTERNAL_TEMP_SENSOR 0 5543 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5544 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5545 5546 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5547 if (!status) 5548 memcpy(data, &desc.params.get_sensor_reading_resp, 5549 sizeof(*data)); 5550 5551 return status; 5552 } 5553 5554 /** 5555 * ice_replay_pre_init - replay pre initialization 5556 * @hw: pointer to the HW struct 5557 * 5558 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5559 */ 5560 static int ice_replay_pre_init(struct ice_hw *hw) 5561 { 5562 struct ice_switch_info *sw = hw->switch_info; 5563 u8 i; 5564 5565 /* Delete old entries from replay filter list head if there is any */ 5566 ice_rm_all_sw_replay_rule_info(hw); 5567 /* In start of replay, move entries into replay_rules list, it 5568 * will allow adding rules entries back to filt_rules list, 5569 * which is operational list. 5570 */ 5571 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5572 list_replace_init(&sw->recp_list[i].filt_rules, 5573 &sw->recp_list[i].filt_replay_rules); 5574 ice_sched_replay_agg_vsi_preinit(hw); 5575 5576 return 0; 5577 } 5578 5579 /** 5580 * ice_replay_vsi - replay VSI configuration 5581 * @hw: pointer to the HW struct 5582 * @vsi_handle: driver VSI handle 5583 * 5584 * Restore all VSI configuration after reset. It is required to call this 5585 * function with main VSI first. 5586 */ 5587 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5588 { 5589 int status; 5590 5591 if (!ice_is_vsi_valid(hw, vsi_handle)) 5592 return -EINVAL; 5593 5594 /* Replay pre-initialization if there is any */ 5595 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5596 status = ice_replay_pre_init(hw); 5597 if (status) 5598 return status; 5599 } 5600 /* Replay per VSI all RSS configurations */ 5601 status = ice_replay_rss_cfg(hw, vsi_handle); 5602 if (status) 5603 return status; 5604 /* Replay per VSI all filters */ 5605 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5606 if (!status) 5607 status = ice_replay_vsi_agg(hw, vsi_handle); 5608 return status; 5609 } 5610 5611 /** 5612 * ice_replay_post - post replay configuration cleanup 5613 * @hw: pointer to the HW struct 5614 * 5615 * Post replay cleanup. 5616 */ 5617 void ice_replay_post(struct ice_hw *hw) 5618 { 5619 /* Delete old entries from replay filter list head */ 5620 ice_rm_all_sw_replay_rule_info(hw); 5621 ice_sched_replay_agg(hw); 5622 } 5623 5624 /** 5625 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5626 * @hw: ptr to the hardware info 5627 * @reg: offset of 64 bit HW register to read from 5628 * @prev_stat_loaded: bool to specify if previous stats are loaded 5629 * @prev_stat: ptr to previous loaded stat value 5630 * @cur_stat: ptr to current stat value 5631 */ 5632 void 5633 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5634 u64 *prev_stat, u64 *cur_stat) 5635 { 5636 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5637 5638 /* device stats are not reset at PFR, they likely will not be zeroed 5639 * when the driver starts. Thus, save the value from the first read 5640 * without adding to the statistic value so that we report stats which 5641 * count up from zero. 5642 */ 5643 if (!prev_stat_loaded) { 5644 *prev_stat = new_data; 5645 return; 5646 } 5647 5648 /* Calculate the difference between the new and old values, and then 5649 * add it to the software stat value. 5650 */ 5651 if (new_data >= *prev_stat) 5652 *cur_stat += new_data - *prev_stat; 5653 else 5654 /* to manage the potential roll-over */ 5655 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5656 5657 /* Update the previously stored value to prepare for next read */ 5658 *prev_stat = new_data; 5659 } 5660 5661 /** 5662 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5663 * @hw: ptr to the hardware info 5664 * @reg: offset of HW register to read from 5665 * @prev_stat_loaded: bool to specify if previous stats are loaded 5666 * @prev_stat: ptr to previous loaded stat value 5667 * @cur_stat: ptr to current stat value 5668 */ 5669 void 5670 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5671 u64 *prev_stat, u64 *cur_stat) 5672 { 5673 u32 new_data; 5674 5675 new_data = rd32(hw, reg); 5676 5677 /* device stats are not reset at PFR, they likely will not be zeroed 5678 * when the driver starts. Thus, save the value from the first read 5679 * without adding to the statistic value so that we report stats which 5680 * count up from zero. 5681 */ 5682 if (!prev_stat_loaded) { 5683 *prev_stat = new_data; 5684 return; 5685 } 5686 5687 /* Calculate the difference between the new and old values, and then 5688 * add it to the software stat value. 5689 */ 5690 if (new_data >= *prev_stat) 5691 *cur_stat += new_data - *prev_stat; 5692 else 5693 /* to manage the potential roll-over */ 5694 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5695 5696 /* Update the previously stored value to prepare for next read */ 5697 *prev_stat = new_data; 5698 } 5699 5700 /** 5701 * ice_sched_query_elem - query element information from HW 5702 * @hw: pointer to the HW struct 5703 * @node_teid: node TEID to be queried 5704 * @buf: buffer to element information 5705 * 5706 * This function queries HW element information 5707 */ 5708 int 5709 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5710 struct ice_aqc_txsched_elem_data *buf) 5711 { 5712 u16 buf_size, num_elem_ret = 0; 5713 int status; 5714 5715 buf_size = sizeof(*buf); 5716 memset(buf, 0, buf_size); 5717 buf->node_teid = cpu_to_le32(node_teid); 5718 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5719 NULL); 5720 if (status || num_elem_ret != 1) 5721 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5722 return status; 5723 } 5724 5725 /** 5726 * ice_aq_read_i2c 5727 * @hw: pointer to the hw struct 5728 * @topo_addr: topology address for a device to communicate with 5729 * @bus_addr: 7-bit I2C bus address 5730 * @addr: I2C memory address (I2C offset) with up to 16 bits 5731 * @params: I2C parameters: bit [7] - Repeated start, 5732 * bits [6:5] data offset size, 5733 * bit [4] - I2C address type, 5734 * bits [3:0] - data size to read (0-16 bytes) 5735 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5736 * @cd: pointer to command details structure or NULL 5737 * 5738 * Read I2C (0x06E2) 5739 */ 5740 int 5741 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5742 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5743 struct ice_sq_cd *cd) 5744 { 5745 struct ice_aq_desc desc = { 0 }; 5746 struct ice_aqc_i2c *cmd; 5747 u8 data_size; 5748 int status; 5749 5750 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5751 cmd = &desc.params.read_write_i2c; 5752 5753 if (!data) 5754 return -EINVAL; 5755 5756 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5757 5758 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5759 cmd->topo_addr = topo_addr; 5760 cmd->i2c_params = params; 5761 cmd->i2c_addr = addr; 5762 5763 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5764 if (!status) { 5765 struct ice_aqc_read_i2c_resp *resp; 5766 u8 i; 5767 5768 resp = &desc.params.read_i2c_resp; 5769 for (i = 0; i < data_size; i++) { 5770 *data = resp->i2c_data[i]; 5771 data++; 5772 } 5773 } 5774 5775 return status; 5776 } 5777 5778 /** 5779 * ice_aq_write_i2c 5780 * @hw: pointer to the hw struct 5781 * @topo_addr: topology address for a device to communicate with 5782 * @bus_addr: 7-bit I2C bus address 5783 * @addr: I2C memory address (I2C offset) with up to 16 bits 5784 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5785 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5786 * @cd: pointer to command details structure or NULL 5787 * 5788 * Write I2C (0x06E3) 5789 * 5790 * * Return: 5791 * * 0 - Successful write to the i2c device 5792 * * -EINVAL - Data size greater than 4 bytes 5793 * * -EIO - FW error 5794 */ 5795 int 5796 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5797 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5798 struct ice_sq_cd *cd) 5799 { 5800 struct ice_aq_desc desc = { 0 }; 5801 struct ice_aqc_i2c *cmd; 5802 u8 data_size; 5803 5804 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5805 cmd = &desc.params.read_write_i2c; 5806 5807 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5808 5809 /* data_size limited to 4 */ 5810 if (data_size > 4) 5811 return -EINVAL; 5812 5813 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5814 cmd->topo_addr = topo_addr; 5815 cmd->i2c_params = params; 5816 cmd->i2c_addr = addr; 5817 5818 memcpy(cmd->i2c_data, data, data_size); 5819 5820 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5821 } 5822 5823 /** 5824 * ice_aq_set_gpio 5825 * @hw: pointer to the hw struct 5826 * @gpio_ctrl_handle: GPIO controller node handle 5827 * @pin_idx: IO Number of the GPIO that needs to be set 5828 * @value: SW provide IO value to set in the LSB 5829 * @cd: pointer to command details structure or NULL 5830 * 5831 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5832 */ 5833 int 5834 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5835 struct ice_sq_cd *cd) 5836 { 5837 struct ice_aqc_gpio *cmd; 5838 struct ice_aq_desc desc; 5839 5840 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5841 cmd = &desc.params.read_write_gpio; 5842 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5843 cmd->gpio_num = pin_idx; 5844 cmd->gpio_val = value ? 1 : 0; 5845 5846 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5847 } 5848 5849 /** 5850 * ice_aq_get_gpio 5851 * @hw: pointer to the hw struct 5852 * @gpio_ctrl_handle: GPIO controller node handle 5853 * @pin_idx: IO Number of the GPIO that needs to be set 5854 * @value: IO value read 5855 * @cd: pointer to command details structure or NULL 5856 * 5857 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5858 * the topology 5859 */ 5860 int 5861 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5862 bool *value, struct ice_sq_cd *cd) 5863 { 5864 struct ice_aqc_gpio *cmd; 5865 struct ice_aq_desc desc; 5866 int status; 5867 5868 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5869 cmd = &desc.params.read_write_gpio; 5870 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5871 cmd->gpio_num = pin_idx; 5872 5873 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5874 if (status) 5875 return status; 5876 5877 *value = !!cmd->gpio_val; 5878 return 0; 5879 } 5880 5881 /** 5882 * ice_is_fw_api_min_ver 5883 * @hw: pointer to the hardware structure 5884 * @maj: major version 5885 * @min: minor version 5886 * @patch: patch version 5887 * 5888 * Checks if the firmware API is minimum version 5889 */ 5890 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5891 { 5892 if (hw->api_maj_ver == maj) { 5893 if (hw->api_min_ver > min) 5894 return true; 5895 if (hw->api_min_ver == min && hw->api_patch >= patch) 5896 return true; 5897 } else if (hw->api_maj_ver > maj) { 5898 return true; 5899 } 5900 5901 return false; 5902 } 5903 5904 /** 5905 * ice_fw_supports_link_override 5906 * @hw: pointer to the hardware structure 5907 * 5908 * Checks if the firmware supports link override 5909 */ 5910 bool ice_fw_supports_link_override(struct ice_hw *hw) 5911 { 5912 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5913 ICE_FW_API_LINK_OVERRIDE_MIN, 5914 ICE_FW_API_LINK_OVERRIDE_PATCH); 5915 } 5916 5917 /** 5918 * ice_get_link_default_override 5919 * @ldo: pointer to the link default override struct 5920 * @pi: pointer to the port info struct 5921 * 5922 * Gets the link default override for a port 5923 */ 5924 int 5925 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5926 struct ice_port_info *pi) 5927 { 5928 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5929 struct ice_hw *hw = pi->hw; 5930 int status; 5931 5932 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5933 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5934 if (status) { 5935 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5936 return status; 5937 } 5938 5939 /* Each port has its own config; calculate for our port */ 5940 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5941 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5942 5943 /* link options first */ 5944 status = ice_read_sr_word(hw, tlv_start, &buf); 5945 if (status) { 5946 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5947 return status; 5948 } 5949 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5950 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5951 ICE_LINK_OVERRIDE_PHY_CFG_S; 5952 5953 /* link PHY config */ 5954 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5955 status = ice_read_sr_word(hw, offset, &buf); 5956 if (status) { 5957 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5958 return status; 5959 } 5960 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5961 5962 /* PHY types low */ 5963 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5964 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5965 status = ice_read_sr_word(hw, (offset + i), &buf); 5966 if (status) { 5967 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5968 return status; 5969 } 5970 /* shift 16 bits at a time to fill 64 bits */ 5971 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5972 } 5973 5974 /* PHY types high */ 5975 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5976 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5977 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5978 status = ice_read_sr_word(hw, (offset + i), &buf); 5979 if (status) { 5980 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5981 return status; 5982 } 5983 /* shift 16 bits at a time to fill 64 bits */ 5984 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5985 } 5986 5987 return status; 5988 } 5989 5990 /** 5991 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5992 * @caps: get PHY capability data 5993 */ 5994 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5995 { 5996 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5997 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5998 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5999 ICE_AQC_PHY_AN_EN_CLAUSE37)) 6000 return true; 6001 6002 return false; 6003 } 6004 6005 /** 6006 * ice_aq_set_lldp_mib - Set the LLDP MIB 6007 * @hw: pointer to the HW struct 6008 * @mib_type: Local, Remote or both Local and Remote MIBs 6009 * @buf: pointer to the caller-supplied buffer to store the MIB block 6010 * @buf_size: size of the buffer (in bytes) 6011 * @cd: pointer to command details structure or NULL 6012 * 6013 * Set the LLDP MIB. (0x0A08) 6014 */ 6015 int 6016 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6017 struct ice_sq_cd *cd) 6018 { 6019 struct ice_aqc_lldp_set_local_mib *cmd; 6020 struct ice_aq_desc desc; 6021 6022 cmd = &desc.params.lldp_set_mib; 6023 6024 if (buf_size == 0 || !buf) 6025 return -EINVAL; 6026 6027 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6028 6029 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 6030 desc.datalen = cpu_to_le16(buf_size); 6031 6032 cmd->type = mib_type; 6033 cmd->length = cpu_to_le16(buf_size); 6034 6035 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6036 } 6037 6038 /** 6039 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6040 * @hw: pointer to HW struct 6041 */ 6042 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6043 { 6044 if (hw->mac_type != ICE_MAC_E810) 6045 return false; 6046 6047 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6048 ICE_FW_API_LLDP_FLTR_MIN, 6049 ICE_FW_API_LLDP_FLTR_PATCH); 6050 } 6051 6052 /** 6053 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6054 * @hw: pointer to HW struct 6055 * @vsi_num: absolute HW index for VSI 6056 * @add: boolean for if adding or removing a filter 6057 */ 6058 int 6059 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6060 { 6061 struct ice_aqc_lldp_filter_ctrl *cmd; 6062 struct ice_aq_desc desc; 6063 6064 cmd = &desc.params.lldp_filter_ctrl; 6065 6066 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6067 6068 if (add) 6069 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6070 else 6071 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6072 6073 cmd->vsi_num = cpu_to_le16(vsi_num); 6074 6075 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6076 } 6077 6078 /** 6079 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6080 * @hw: pointer to HW struct 6081 */ 6082 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6083 { 6084 struct ice_aq_desc desc; 6085 6086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6087 6088 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6089 } 6090 6091 /** 6092 * ice_fw_supports_report_dflt_cfg 6093 * @hw: pointer to the hardware structure 6094 * 6095 * Checks if the firmware supports report default configuration 6096 */ 6097 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6098 { 6099 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6100 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6101 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6102 } 6103 6104 /* each of the indexes into the following array match the speed of a return 6105 * value from the list of AQ returned speeds like the range: 6106 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6107 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6108 * array. The array is defined as 15 elements long because the link_speed 6109 * returned by the firmware is a 16 bit * value, but is indexed 6110 * by [fls(speed) - 1] 6111 */ 6112 static const u32 ice_aq_to_link_speed[] = { 6113 SPEED_10, /* BIT(0) */ 6114 SPEED_100, 6115 SPEED_1000, 6116 SPEED_2500, 6117 SPEED_5000, 6118 SPEED_10000, 6119 SPEED_20000, 6120 SPEED_25000, 6121 SPEED_40000, 6122 SPEED_50000, 6123 SPEED_100000, /* BIT(10) */ 6124 SPEED_200000, 6125 }; 6126 6127 /** 6128 * ice_get_link_speed - get integer speed from table 6129 * @index: array index from fls(aq speed) - 1 6130 * 6131 * Returns: u32 value containing integer speed 6132 */ 6133 u32 ice_get_link_speed(u16 index) 6134 { 6135 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6136 return 0; 6137 6138 return ice_aq_to_link_speed[index]; 6139 } 6140