1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830CC_BACKPLANE: 164 case ICE_DEV_ID_E830CC_QSFP56: 165 case ICE_DEV_ID_E830CC_SFP: 166 case ICE_DEV_ID_E830CC_SFP_DD: 167 case ICE_DEV_ID_E830C_BACKPLANE: 168 case ICE_DEV_ID_E830_XXV_BACKPLANE: 169 case ICE_DEV_ID_E830C_QSFP: 170 case ICE_DEV_ID_E830_XXV_QSFP: 171 case ICE_DEV_ID_E830C_SFP: 172 case ICE_DEV_ID_E830_XXV_SFP: 173 hw->mac_type = ICE_MAC_E830; 174 break; 175 default: 176 hw->mac_type = ICE_MAC_UNKNOWN; 177 break; 178 } 179 180 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 181 return 0; 182 } 183 184 /** 185 * ice_is_generic_mac - check if device's mac_type is generic 186 * @hw: pointer to the hardware structure 187 * 188 * Return: true if mac_type is generic (with SBQ support), false if not 189 */ 190 bool ice_is_generic_mac(struct ice_hw *hw) 191 { 192 return (hw->mac_type == ICE_MAC_GENERIC || 193 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 194 } 195 196 /** 197 * ice_is_e810 198 * @hw: pointer to the hardware structure 199 * 200 * returns true if the device is E810 based, false if not. 201 */ 202 bool ice_is_e810(struct ice_hw *hw) 203 { 204 return hw->mac_type == ICE_MAC_E810; 205 } 206 207 /** 208 * ice_is_e810t 209 * @hw: pointer to the hardware structure 210 * 211 * returns true if the device is E810T based, false if not. 212 */ 213 bool ice_is_e810t(struct ice_hw *hw) 214 { 215 switch (hw->device_id) { 216 case ICE_DEV_ID_E810C_SFP: 217 switch (hw->subsystem_device_id) { 218 case ICE_SUBDEV_ID_E810T: 219 case ICE_SUBDEV_ID_E810T2: 220 case ICE_SUBDEV_ID_E810T3: 221 case ICE_SUBDEV_ID_E810T4: 222 case ICE_SUBDEV_ID_E810T6: 223 case ICE_SUBDEV_ID_E810T7: 224 return true; 225 } 226 break; 227 case ICE_DEV_ID_E810C_QSFP: 228 switch (hw->subsystem_device_id) { 229 case ICE_SUBDEV_ID_E810T2: 230 case ICE_SUBDEV_ID_E810T3: 231 case ICE_SUBDEV_ID_E810T5: 232 return true; 233 } 234 break; 235 default: 236 break; 237 } 238 239 return false; 240 } 241 242 /** 243 * ice_is_e823 244 * @hw: pointer to the hardware structure 245 * 246 * returns true if the device is E823-L or E823-C based, false if not. 247 */ 248 bool ice_is_e823(struct ice_hw *hw) 249 { 250 switch (hw->device_id) { 251 case ICE_DEV_ID_E823L_BACKPLANE: 252 case ICE_DEV_ID_E823L_SFP: 253 case ICE_DEV_ID_E823L_10G_BASE_T: 254 case ICE_DEV_ID_E823L_1GBE: 255 case ICE_DEV_ID_E823L_QSFP: 256 case ICE_DEV_ID_E823C_BACKPLANE: 257 case ICE_DEV_ID_E823C_QSFP: 258 case ICE_DEV_ID_E823C_SFP: 259 case ICE_DEV_ID_E823C_10G_BASE_T: 260 case ICE_DEV_ID_E823C_SGMII: 261 return true; 262 default: 263 return false; 264 } 265 } 266 267 /** 268 * ice_is_e825c - Check if a device is E825C family device 269 * @hw: pointer to the hardware structure 270 * 271 * Return: true if the device is E825-C based, false if not. 272 */ 273 bool ice_is_e825c(struct ice_hw *hw) 274 { 275 switch (hw->device_id) { 276 case ICE_DEV_ID_E825C_BACKPLANE: 277 case ICE_DEV_ID_E825C_QSFP: 278 case ICE_DEV_ID_E825C_SFP: 279 case ICE_DEV_ID_E825C_SGMII: 280 return true; 281 default: 282 return false; 283 } 284 } 285 286 /** 287 * ice_clear_pf_cfg - Clear PF configuration 288 * @hw: pointer to the hardware structure 289 * 290 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 291 * configuration, flow director filters, etc.). 292 */ 293 int ice_clear_pf_cfg(struct ice_hw *hw) 294 { 295 struct ice_aq_desc desc; 296 297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 298 299 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 300 } 301 302 /** 303 * ice_aq_manage_mac_read - manage MAC address read command 304 * @hw: pointer to the HW struct 305 * @buf: a virtual buffer to hold the manage MAC read response 306 * @buf_size: Size of the virtual buffer 307 * @cd: pointer to command details structure or NULL 308 * 309 * This function is used to return per PF station MAC address (0x0107). 310 * NOTE: Upon successful completion of this command, MAC address information 311 * is returned in user specified buffer. Please interpret user specified 312 * buffer as "manage_mac_read" response. 313 * Response such as various MAC addresses are stored in HW struct (port.mac) 314 * ice_discover_dev_caps is expected to be called before this function is 315 * called. 316 */ 317 static int 318 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 319 struct ice_sq_cd *cd) 320 { 321 struct ice_aqc_manage_mac_read_resp *resp; 322 struct ice_aqc_manage_mac_read *cmd; 323 struct ice_aq_desc desc; 324 int status; 325 u16 flags; 326 u8 i; 327 328 cmd = &desc.params.mac_read; 329 330 if (buf_size < sizeof(*resp)) 331 return -EINVAL; 332 333 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 334 335 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 336 if (status) 337 return status; 338 339 resp = buf; 340 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 341 342 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 343 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 344 return -EIO; 345 } 346 347 /* A single port can report up to two (LAN and WoL) addresses */ 348 for (i = 0; i < cmd->num_addr; i++) 349 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 350 ether_addr_copy(hw->port_info->mac.lan_addr, 351 resp[i].mac_addr); 352 ether_addr_copy(hw->port_info->mac.perm_addr, 353 resp[i].mac_addr); 354 break; 355 } 356 357 return 0; 358 } 359 360 /** 361 * ice_aq_get_phy_caps - returns PHY capabilities 362 * @pi: port information structure 363 * @qual_mods: report qualified modules 364 * @report_mode: report mode capabilities 365 * @pcaps: structure for PHY capabilities to be filled 366 * @cd: pointer to command details structure or NULL 367 * 368 * Returns the various PHY capabilities supported on the Port (0x0600) 369 */ 370 int 371 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 372 struct ice_aqc_get_phy_caps_data *pcaps, 373 struct ice_sq_cd *cd) 374 { 375 struct ice_aqc_get_phy_caps *cmd; 376 u16 pcaps_size = sizeof(*pcaps); 377 struct ice_aq_desc desc; 378 const char *prefix; 379 struct ice_hw *hw; 380 int status; 381 382 cmd = &desc.params.get_phy; 383 384 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 385 return -EINVAL; 386 hw = pi->hw; 387 388 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 389 !ice_fw_supports_report_dflt_cfg(hw)) 390 return -EINVAL; 391 392 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 393 394 if (qual_mods) 395 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 396 397 cmd->param0 |= cpu_to_le16(report_mode); 398 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 399 400 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 401 402 switch (report_mode) { 403 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 404 prefix = "phy_caps_media"; 405 break; 406 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 407 prefix = "phy_caps_no_media"; 408 break; 409 case ICE_AQC_REPORT_ACTIVE_CFG: 410 prefix = "phy_caps_active"; 411 break; 412 case ICE_AQC_REPORT_DFLT_CFG: 413 prefix = "phy_caps_default"; 414 break; 415 default: 416 prefix = "phy_caps_invalid"; 417 } 418 419 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 420 le64_to_cpu(pcaps->phy_type_high), prefix); 421 422 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 423 prefix, report_mode); 424 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 425 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 426 pcaps->low_power_ctrl_an); 427 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 428 pcaps->eee_cap); 429 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 430 pcaps->eeer_value); 431 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 432 pcaps->link_fec_options); 433 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 434 prefix, pcaps->module_compliance_enforcement); 435 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 436 prefix, pcaps->extended_compliance_code); 437 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 438 pcaps->module_type[0]); 439 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 440 pcaps->module_type[1]); 441 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 442 pcaps->module_type[2]); 443 444 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 445 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 446 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 447 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 448 sizeof(pi->phy.link_info.module_type)); 449 } 450 451 return status; 452 } 453 454 /** 455 * ice_aq_get_link_topo_handle - get link topology node return status 456 * @pi: port information structure 457 * @node_type: requested node type 458 * @cd: pointer to command details structure or NULL 459 * 460 * Get link topology node return status for specified node type (0x06E0) 461 * 462 * Node type cage can be used to determine if cage is present. If AQC 463 * returns error (ENOENT), then no cage present. If no cage present, then 464 * connection type is backplane or BASE-T. 465 */ 466 static int 467 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 468 struct ice_sq_cd *cd) 469 { 470 struct ice_aqc_get_link_topo *cmd; 471 struct ice_aq_desc desc; 472 473 cmd = &desc.params.get_link_topo; 474 475 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 476 477 cmd->addr.topo_params.node_type_ctx = 478 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 479 ICE_AQC_LINK_TOPO_NODE_CTX_S); 480 481 /* set node type */ 482 cmd->addr.topo_params.node_type_ctx |= 483 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 484 485 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 486 } 487 488 /** 489 * ice_aq_get_netlist_node 490 * @hw: pointer to the hw struct 491 * @cmd: get_link_topo AQ structure 492 * @node_part_number: output node part number if node found 493 * @node_handle: output node handle parameter if node found 494 * 495 * Get netlist node handle. 496 */ 497 int 498 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 499 u8 *node_part_number, u16 *node_handle) 500 { 501 struct ice_aq_desc desc; 502 503 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 504 desc.params.get_link_topo = *cmd; 505 506 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 507 return -EINTR; 508 509 if (node_handle) 510 *node_handle = 511 le16_to_cpu(desc.params.get_link_topo.addr.handle); 512 if (node_part_number) 513 *node_part_number = desc.params.get_link_topo.node_part_num; 514 515 return 0; 516 } 517 518 /** 519 * ice_find_netlist_node 520 * @hw: pointer to the hw struct 521 * @node_type_ctx: type of netlist node to look for 522 * @node_part_number: node part number to look for 523 * @node_handle: output parameter if node found - optional 524 * 525 * Scan the netlist for a node handle of the given node type and part number. 526 * 527 * If node_handle is non-NULL it will be modified on function exit. It is only 528 * valid if the function returns zero, and should be ignored on any non-zero 529 * return value. 530 * 531 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 532 * a negative error code on failure to access the AQ. 533 */ 534 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 535 u8 node_part_number, u16 *node_handle) 536 { 537 u8 idx; 538 539 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 540 struct ice_aqc_get_link_topo cmd = {}; 541 u8 rec_node_part_number; 542 int status; 543 544 cmd.addr.topo_params.node_type_ctx = 545 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 546 node_type_ctx); 547 cmd.addr.topo_params.index = idx; 548 549 status = ice_aq_get_netlist_node(hw, &cmd, 550 &rec_node_part_number, 551 node_handle); 552 if (status) 553 return status; 554 555 if (rec_node_part_number == node_part_number) 556 return 0; 557 } 558 559 return -ENOENT; 560 } 561 562 /** 563 * ice_is_media_cage_present 564 * @pi: port information structure 565 * 566 * Returns true if media cage is present, else false. If no cage, then 567 * media type is backplane or BASE-T. 568 */ 569 static bool ice_is_media_cage_present(struct ice_port_info *pi) 570 { 571 /* Node type cage can be used to determine if cage is present. If AQC 572 * returns error (ENOENT), then no cage present. If no cage present then 573 * connection type is backplane or BASE-T. 574 */ 575 return !ice_aq_get_link_topo_handle(pi, 576 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 577 NULL); 578 } 579 580 /** 581 * ice_get_media_type - Gets media type 582 * @pi: port information structure 583 */ 584 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 585 { 586 struct ice_link_status *hw_link_info; 587 588 if (!pi) 589 return ICE_MEDIA_UNKNOWN; 590 591 hw_link_info = &pi->phy.link_info; 592 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 593 /* If more than one media type is selected, report unknown */ 594 return ICE_MEDIA_UNKNOWN; 595 596 if (hw_link_info->phy_type_low) { 597 /* 1G SGMII is a special case where some DA cable PHYs 598 * may show this as an option when it really shouldn't 599 * be since SGMII is meant to be between a MAC and a PHY 600 * in a backplane. Try to detect this case and handle it 601 */ 602 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 603 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 604 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 605 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 606 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 607 return ICE_MEDIA_DA; 608 609 switch (hw_link_info->phy_type_low) { 610 case ICE_PHY_TYPE_LOW_1000BASE_SX: 611 case ICE_PHY_TYPE_LOW_1000BASE_LX: 612 case ICE_PHY_TYPE_LOW_10GBASE_SR: 613 case ICE_PHY_TYPE_LOW_10GBASE_LR: 614 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 615 case ICE_PHY_TYPE_LOW_25GBASE_SR: 616 case ICE_PHY_TYPE_LOW_25GBASE_LR: 617 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 618 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 619 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 620 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 621 case ICE_PHY_TYPE_LOW_50GBASE_SR: 622 case ICE_PHY_TYPE_LOW_50GBASE_FR: 623 case ICE_PHY_TYPE_LOW_50GBASE_LR: 624 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 625 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 626 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 627 case ICE_PHY_TYPE_LOW_100GBASE_DR: 628 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 629 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 630 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 631 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 632 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 633 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 634 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 635 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 636 return ICE_MEDIA_FIBER; 637 case ICE_PHY_TYPE_LOW_100BASE_TX: 638 case ICE_PHY_TYPE_LOW_1000BASE_T: 639 case ICE_PHY_TYPE_LOW_2500BASE_T: 640 case ICE_PHY_TYPE_LOW_5GBASE_T: 641 case ICE_PHY_TYPE_LOW_10GBASE_T: 642 case ICE_PHY_TYPE_LOW_25GBASE_T: 643 return ICE_MEDIA_BASET; 644 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 645 case ICE_PHY_TYPE_LOW_25GBASE_CR: 646 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 647 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 648 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 649 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 650 case ICE_PHY_TYPE_LOW_50GBASE_CP: 651 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 652 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 653 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 654 return ICE_MEDIA_DA; 655 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 656 case ICE_PHY_TYPE_LOW_40G_XLAUI: 657 case ICE_PHY_TYPE_LOW_50G_LAUI2: 658 case ICE_PHY_TYPE_LOW_50G_AUI2: 659 case ICE_PHY_TYPE_LOW_50G_AUI1: 660 case ICE_PHY_TYPE_LOW_100G_AUI4: 661 case ICE_PHY_TYPE_LOW_100G_CAUI4: 662 if (ice_is_media_cage_present(pi)) 663 return ICE_MEDIA_DA; 664 fallthrough; 665 case ICE_PHY_TYPE_LOW_1000BASE_KX: 666 case ICE_PHY_TYPE_LOW_2500BASE_KX: 667 case ICE_PHY_TYPE_LOW_2500BASE_X: 668 case ICE_PHY_TYPE_LOW_5GBASE_KR: 669 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 670 case ICE_PHY_TYPE_LOW_25GBASE_KR: 671 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 672 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 673 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 674 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 675 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 676 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 677 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 678 return ICE_MEDIA_BACKPLANE; 679 } 680 } else { 681 switch (hw_link_info->phy_type_high) { 682 case ICE_PHY_TYPE_HIGH_100G_AUI2: 683 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 684 if (ice_is_media_cage_present(pi)) 685 return ICE_MEDIA_DA; 686 fallthrough; 687 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 688 return ICE_MEDIA_BACKPLANE; 689 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 690 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 691 return ICE_MEDIA_FIBER; 692 } 693 } 694 return ICE_MEDIA_UNKNOWN; 695 } 696 697 /** 698 * ice_get_link_status_datalen 699 * @hw: pointer to the HW struct 700 * 701 * Returns datalength for the Get Link Status AQ command, which is bigger for 702 * newer adapter families handled by ice driver. 703 */ 704 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 705 { 706 switch (hw->mac_type) { 707 case ICE_MAC_E830: 708 return ICE_AQC_LS_DATA_SIZE_V2; 709 case ICE_MAC_E810: 710 default: 711 return ICE_AQC_LS_DATA_SIZE_V1; 712 } 713 } 714 715 /** 716 * ice_aq_get_link_info 717 * @pi: port information structure 718 * @ena_lse: enable/disable LinkStatusEvent reporting 719 * @link: pointer to link status structure - optional 720 * @cd: pointer to command details structure or NULL 721 * 722 * Get Link Status (0x607). Returns the link status of the adapter. 723 */ 724 int 725 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 726 struct ice_link_status *link, struct ice_sq_cd *cd) 727 { 728 struct ice_aqc_get_link_status_data link_data = { 0 }; 729 struct ice_aqc_get_link_status *resp; 730 struct ice_link_status *li_old, *li; 731 enum ice_media_type *hw_media_type; 732 struct ice_fc_info *hw_fc_info; 733 bool tx_pause, rx_pause; 734 struct ice_aq_desc desc; 735 struct ice_hw *hw; 736 u16 cmd_flags; 737 int status; 738 739 if (!pi) 740 return -EINVAL; 741 hw = pi->hw; 742 li_old = &pi->phy.link_info_old; 743 hw_media_type = &pi->phy.media_type; 744 li = &pi->phy.link_info; 745 hw_fc_info = &pi->fc; 746 747 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 748 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 749 resp = &desc.params.get_link_status; 750 resp->cmd_flags = cpu_to_le16(cmd_flags); 751 resp->lport_num = pi->lport; 752 753 status = ice_aq_send_cmd(hw, &desc, &link_data, 754 ice_get_link_status_datalen(hw), cd); 755 if (status) 756 return status; 757 758 /* save off old link status information */ 759 *li_old = *li; 760 761 /* update current link status information */ 762 li->link_speed = le16_to_cpu(link_data.link_speed); 763 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 764 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 765 *hw_media_type = ice_get_media_type(pi); 766 li->link_info = link_data.link_info; 767 li->link_cfg_err = link_data.link_cfg_err; 768 li->an_info = link_data.an_info; 769 li->ext_info = link_data.ext_info; 770 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 771 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 772 li->topo_media_conflict = link_data.topo_media_conflict; 773 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 774 ICE_AQ_CFG_PACING_TYPE_M); 775 776 /* update fc info */ 777 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 778 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 779 if (tx_pause && rx_pause) 780 hw_fc_info->current_mode = ICE_FC_FULL; 781 else if (tx_pause) 782 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 783 else if (rx_pause) 784 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 785 else 786 hw_fc_info->current_mode = ICE_FC_NONE; 787 788 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 789 790 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 791 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 792 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 793 (unsigned long long)li->phy_type_low); 794 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 795 (unsigned long long)li->phy_type_high); 796 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 797 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 798 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 799 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 800 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 801 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 802 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 803 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 804 li->max_frame_size); 805 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 806 807 /* save link status information */ 808 if (link) 809 *link = *li; 810 811 /* flag cleared so calling functions don't call AQ again */ 812 pi->phy.get_link_info = false; 813 814 return 0; 815 } 816 817 /** 818 * ice_fill_tx_timer_and_fc_thresh 819 * @hw: pointer to the HW struct 820 * @cmd: pointer to MAC cfg structure 821 * 822 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 823 * descriptor 824 */ 825 static void 826 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 827 struct ice_aqc_set_mac_cfg *cmd) 828 { 829 u32 val, fc_thres_m; 830 831 /* We read back the transmit timer and FC threshold value of 832 * LFC. Thus, we will use index = 833 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 834 * 835 * Also, because we are operating on transmit timer and FC 836 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 837 */ 838 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 839 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 840 841 if (hw->mac_type == ICE_MAC_E830) { 842 /* Retrieve the transmit timer */ 843 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 844 cmd->tx_tmr_value = 845 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 846 847 /* Retrieve the fc threshold */ 848 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 849 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 850 } else { 851 /* Retrieve the transmit timer */ 852 val = rd32(hw, 853 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 854 cmd->tx_tmr_value = 855 le16_encode_bits(val, 856 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 857 858 /* Retrieve the fc threshold */ 859 val = rd32(hw, 860 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 861 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 862 } 863 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 864 } 865 866 /** 867 * ice_aq_set_mac_cfg 868 * @hw: pointer to the HW struct 869 * @max_frame_size: Maximum Frame Size to be supported 870 * @cd: pointer to command details structure or NULL 871 * 872 * Set MAC configuration (0x0603) 873 */ 874 int 875 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 876 { 877 struct ice_aqc_set_mac_cfg *cmd; 878 struct ice_aq_desc desc; 879 880 cmd = &desc.params.set_mac_cfg; 881 882 if (max_frame_size == 0) 883 return -EINVAL; 884 885 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 886 887 cmd->max_frame_size = cpu_to_le16(max_frame_size); 888 889 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 890 891 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 892 } 893 894 /** 895 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 896 * @hw: pointer to the HW struct 897 */ 898 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 899 { 900 struct ice_switch_info *sw; 901 int status; 902 903 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 904 sizeof(*hw->switch_info), GFP_KERNEL); 905 sw = hw->switch_info; 906 907 if (!sw) 908 return -ENOMEM; 909 910 INIT_LIST_HEAD(&sw->vsi_list_map_head); 911 sw->prof_res_bm_init = 0; 912 913 status = ice_init_def_sw_recp(hw); 914 if (status) { 915 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 916 return status; 917 } 918 return 0; 919 } 920 921 /** 922 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 923 * @hw: pointer to the HW struct 924 */ 925 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 926 { 927 struct ice_switch_info *sw = hw->switch_info; 928 struct ice_vsi_list_map_info *v_pos_map; 929 struct ice_vsi_list_map_info *v_tmp_map; 930 struct ice_sw_recipe *recps; 931 u8 i; 932 933 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 934 list_entry) { 935 list_del(&v_pos_map->list_entry); 936 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 937 } 938 recps = sw->recp_list; 939 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 940 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 941 942 recps[i].root_rid = i; 943 list_for_each_entry_safe(rg_entry, tmprg_entry, 944 &recps[i].rg_list, l_entry) { 945 list_del(&rg_entry->l_entry); 946 devm_kfree(ice_hw_to_dev(hw), rg_entry); 947 } 948 949 if (recps[i].adv_rule) { 950 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 951 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 952 953 mutex_destroy(&recps[i].filt_rule_lock); 954 list_for_each_entry_safe(lst_itr, tmp_entry, 955 &recps[i].filt_rules, 956 list_entry) { 957 list_del(&lst_itr->list_entry); 958 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 959 devm_kfree(ice_hw_to_dev(hw), lst_itr); 960 } 961 } else { 962 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 963 964 mutex_destroy(&recps[i].filt_rule_lock); 965 list_for_each_entry_safe(lst_itr, tmp_entry, 966 &recps[i].filt_rules, 967 list_entry) { 968 list_del(&lst_itr->list_entry); 969 devm_kfree(ice_hw_to_dev(hw), lst_itr); 970 } 971 } 972 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 973 } 974 ice_rm_all_sw_replay_rule_info(hw); 975 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 976 devm_kfree(ice_hw_to_dev(hw), sw); 977 } 978 979 /** 980 * ice_get_itr_intrl_gran 981 * @hw: pointer to the HW struct 982 * 983 * Determines the ITR/INTRL granularities based on the maximum aggregate 984 * bandwidth according to the device's configuration during power-on. 985 */ 986 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 987 { 988 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 989 rd32(hw, GL_PWR_MODE_CTL)); 990 991 switch (max_agg_bw) { 992 case ICE_MAX_AGG_BW_200G: 993 case ICE_MAX_AGG_BW_100G: 994 case ICE_MAX_AGG_BW_50G: 995 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 996 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 997 break; 998 case ICE_MAX_AGG_BW_25G: 999 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1000 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1001 break; 1002 } 1003 } 1004 1005 /** 1006 * ice_init_hw - main hardware initialization routine 1007 * @hw: pointer to the hardware structure 1008 */ 1009 int ice_init_hw(struct ice_hw *hw) 1010 { 1011 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 1012 void *mac_buf __free(kfree) = NULL; 1013 u16 mac_buf_len; 1014 int status; 1015 1016 /* Set MAC type based on DeviceID */ 1017 status = ice_set_mac_type(hw); 1018 if (status) 1019 return status; 1020 1021 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1022 1023 status = ice_reset(hw, ICE_RESET_PFR); 1024 if (status) 1025 return status; 1026 1027 ice_get_itr_intrl_gran(hw); 1028 1029 status = ice_create_all_ctrlq(hw); 1030 if (status) 1031 goto err_unroll_cqinit; 1032 1033 status = ice_fwlog_init(hw); 1034 if (status) 1035 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1036 status); 1037 1038 status = ice_clear_pf_cfg(hw); 1039 if (status) 1040 goto err_unroll_cqinit; 1041 1042 /* Set bit to enable Flow Director filters */ 1043 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1044 INIT_LIST_HEAD(&hw->fdir_list_head); 1045 1046 ice_clear_pxe_mode(hw); 1047 1048 status = ice_init_nvm(hw); 1049 if (status) 1050 goto err_unroll_cqinit; 1051 1052 status = ice_get_caps(hw); 1053 if (status) 1054 goto err_unroll_cqinit; 1055 1056 if (!hw->port_info) 1057 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1058 sizeof(*hw->port_info), 1059 GFP_KERNEL); 1060 if (!hw->port_info) { 1061 status = -ENOMEM; 1062 goto err_unroll_cqinit; 1063 } 1064 1065 /* set the back pointer to HW */ 1066 hw->port_info->hw = hw; 1067 1068 /* Initialize port_info struct with switch configuration data */ 1069 status = ice_get_initial_sw_cfg(hw); 1070 if (status) 1071 goto err_unroll_alloc; 1072 1073 hw->evb_veb = true; 1074 1075 /* init xarray for identifying scheduling nodes uniquely */ 1076 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1077 1078 /* Query the allocated resources for Tx scheduler */ 1079 status = ice_sched_query_res_alloc(hw); 1080 if (status) { 1081 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1082 goto err_unroll_alloc; 1083 } 1084 ice_sched_get_psm_clk_freq(hw); 1085 1086 /* Initialize port_info struct with scheduler data */ 1087 status = ice_sched_init_port(hw->port_info); 1088 if (status) 1089 goto err_unroll_sched; 1090 1091 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1092 if (!pcaps) { 1093 status = -ENOMEM; 1094 goto err_unroll_sched; 1095 } 1096 1097 /* Initialize port_info struct with PHY capabilities */ 1098 status = ice_aq_get_phy_caps(hw->port_info, false, 1099 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1100 NULL); 1101 if (status) 1102 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1103 status); 1104 1105 /* Initialize port_info struct with link information */ 1106 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1107 if (status) 1108 goto err_unroll_sched; 1109 1110 /* need a valid SW entry point to build a Tx tree */ 1111 if (!hw->sw_entry_point_layer) { 1112 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1113 status = -EIO; 1114 goto err_unroll_sched; 1115 } 1116 INIT_LIST_HEAD(&hw->agg_list); 1117 /* Initialize max burst size */ 1118 if (!hw->max_burst_size) 1119 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1120 1121 status = ice_init_fltr_mgmt_struct(hw); 1122 if (status) 1123 goto err_unroll_sched; 1124 1125 /* Get MAC information */ 1126 /* A single port can report up to two (LAN and WoL) addresses */ 1127 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1128 GFP_KERNEL); 1129 if (!mac_buf) { 1130 status = -ENOMEM; 1131 goto err_unroll_fltr_mgmt_struct; 1132 } 1133 1134 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1135 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1136 1137 if (status) 1138 goto err_unroll_fltr_mgmt_struct; 1139 /* enable jumbo frame support at MAC level */ 1140 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1141 if (status) 1142 goto err_unroll_fltr_mgmt_struct; 1143 /* Obtain counter base index which would be used by flow director */ 1144 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1145 if (status) 1146 goto err_unroll_fltr_mgmt_struct; 1147 status = ice_init_hw_tbls(hw); 1148 if (status) 1149 goto err_unroll_fltr_mgmt_struct; 1150 mutex_init(&hw->tnl_lock); 1151 ice_init_chk_recipe_reuse_support(hw); 1152 1153 return 0; 1154 1155 err_unroll_fltr_mgmt_struct: 1156 ice_cleanup_fltr_mgmt_struct(hw); 1157 err_unroll_sched: 1158 ice_sched_cleanup_all(hw); 1159 err_unroll_alloc: 1160 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1161 err_unroll_cqinit: 1162 ice_destroy_all_ctrlq(hw); 1163 return status; 1164 } 1165 1166 /** 1167 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1168 * @hw: pointer to the hardware structure 1169 * 1170 * This should be called only during nominal operation, not as a result of 1171 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1172 * applicable initializations if it fails for any reason. 1173 */ 1174 void ice_deinit_hw(struct ice_hw *hw) 1175 { 1176 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1177 ice_cleanup_fltr_mgmt_struct(hw); 1178 1179 ice_sched_cleanup_all(hw); 1180 ice_sched_clear_agg(hw); 1181 ice_free_seg(hw); 1182 ice_free_hw_tbls(hw); 1183 mutex_destroy(&hw->tnl_lock); 1184 1185 ice_fwlog_deinit(hw); 1186 ice_destroy_all_ctrlq(hw); 1187 1188 /* Clear VSI contexts if not already cleared */ 1189 ice_clear_all_vsi_ctx(hw); 1190 } 1191 1192 /** 1193 * ice_check_reset - Check to see if a global reset is complete 1194 * @hw: pointer to the hardware structure 1195 */ 1196 int ice_check_reset(struct ice_hw *hw) 1197 { 1198 u32 cnt, reg = 0, grst_timeout, uld_mask; 1199 1200 /* Poll for Device Active state in case a recent CORER, GLOBR, 1201 * or EMPR has occurred. The grst delay value is in 100ms units. 1202 * Add 1sec for outstanding AQ commands that can take a long time. 1203 */ 1204 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1205 rd32(hw, GLGEN_RSTCTL)) + 10; 1206 1207 for (cnt = 0; cnt < grst_timeout; cnt++) { 1208 mdelay(100); 1209 reg = rd32(hw, GLGEN_RSTAT); 1210 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1211 break; 1212 } 1213 1214 if (cnt == grst_timeout) { 1215 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1216 return -EIO; 1217 } 1218 1219 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1220 GLNVM_ULD_PCIER_DONE_1_M |\ 1221 GLNVM_ULD_CORER_DONE_M |\ 1222 GLNVM_ULD_GLOBR_DONE_M |\ 1223 GLNVM_ULD_POR_DONE_M |\ 1224 GLNVM_ULD_POR_DONE_1_M |\ 1225 GLNVM_ULD_PCIER_DONE_2_M) 1226 1227 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1228 GLNVM_ULD_PE_DONE_M : 0); 1229 1230 /* Device is Active; check Global Reset processes are done */ 1231 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1232 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1233 if (reg == uld_mask) { 1234 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1235 break; 1236 } 1237 mdelay(10); 1238 } 1239 1240 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1241 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1242 reg); 1243 return -EIO; 1244 } 1245 1246 return 0; 1247 } 1248 1249 /** 1250 * ice_pf_reset - Reset the PF 1251 * @hw: pointer to the hardware structure 1252 * 1253 * If a global reset has been triggered, this function checks 1254 * for its completion and then issues the PF reset 1255 */ 1256 static int ice_pf_reset(struct ice_hw *hw) 1257 { 1258 u32 cnt, reg; 1259 1260 /* If at function entry a global reset was already in progress, i.e. 1261 * state is not 'device active' or any of the reset done bits are not 1262 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1263 * global reset is done. 1264 */ 1265 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1266 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1267 /* poll on global reset currently in progress until done */ 1268 if (ice_check_reset(hw)) 1269 return -EIO; 1270 1271 return 0; 1272 } 1273 1274 /* Reset the PF */ 1275 reg = rd32(hw, PFGEN_CTRL); 1276 1277 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1278 1279 /* Wait for the PFR to complete. The wait time is the global config lock 1280 * timeout plus the PFR timeout which will account for a possible reset 1281 * that is occurring during a download package operation. 1282 */ 1283 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1284 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1285 reg = rd32(hw, PFGEN_CTRL); 1286 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1287 break; 1288 1289 mdelay(1); 1290 } 1291 1292 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1293 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1294 return -EIO; 1295 } 1296 1297 return 0; 1298 } 1299 1300 /** 1301 * ice_reset - Perform different types of reset 1302 * @hw: pointer to the hardware structure 1303 * @req: reset request 1304 * 1305 * This function triggers a reset as specified by the req parameter. 1306 * 1307 * Note: 1308 * If anything other than a PF reset is triggered, PXE mode is restored. 1309 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1310 * interface has been restored in the rebuild flow. 1311 */ 1312 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1313 { 1314 u32 val = 0; 1315 1316 switch (req) { 1317 case ICE_RESET_PFR: 1318 return ice_pf_reset(hw); 1319 case ICE_RESET_CORER: 1320 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1321 val = GLGEN_RTRIG_CORER_M; 1322 break; 1323 case ICE_RESET_GLOBR: 1324 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1325 val = GLGEN_RTRIG_GLOBR_M; 1326 break; 1327 default: 1328 return -EINVAL; 1329 } 1330 1331 val |= rd32(hw, GLGEN_RTRIG); 1332 wr32(hw, GLGEN_RTRIG, val); 1333 ice_flush(hw); 1334 1335 /* wait for the FW to be ready */ 1336 return ice_check_reset(hw); 1337 } 1338 1339 /** 1340 * ice_copy_rxq_ctx_to_hw 1341 * @hw: pointer to the hardware structure 1342 * @ice_rxq_ctx: pointer to the rxq context 1343 * @rxq_index: the index of the Rx queue 1344 * 1345 * Copies rxq context from dense structure to HW register space 1346 */ 1347 static int 1348 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1349 { 1350 u8 i; 1351 1352 if (!ice_rxq_ctx) 1353 return -EINVAL; 1354 1355 if (rxq_index > QRX_CTRL_MAX_INDEX) 1356 return -EINVAL; 1357 1358 /* Copy each dword separately to HW */ 1359 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1360 wr32(hw, QRX_CONTEXT(i, rxq_index), 1361 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1362 1363 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1364 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1365 } 1366 1367 return 0; 1368 } 1369 1370 /* LAN Rx Queue Context */ 1371 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1372 /* Field Width LSB */ 1373 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1374 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1375 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1376 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1377 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1378 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1379 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1380 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1381 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1382 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1383 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1384 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1385 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1386 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1387 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1388 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1389 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1390 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1391 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1392 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1393 { 0 } 1394 }; 1395 1396 /** 1397 * ice_write_rxq_ctx 1398 * @hw: pointer to the hardware structure 1399 * @rlan_ctx: pointer to the rxq context 1400 * @rxq_index: the index of the Rx queue 1401 * 1402 * Converts rxq context from sparse to dense structure and then writes 1403 * it to HW register space and enables the hardware to prefetch descriptors 1404 * instead of only fetching them on demand 1405 */ 1406 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1407 u32 rxq_index) 1408 { 1409 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1410 1411 if (!rlan_ctx) 1412 return -EINVAL; 1413 1414 rlan_ctx->prefena = 1; 1415 1416 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1417 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1418 } 1419 1420 /* LAN Tx Queue Context */ 1421 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1422 /* Field Width LSB */ 1423 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1424 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1425 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1426 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1427 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1428 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1429 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1430 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1431 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1432 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1433 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1434 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1435 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1436 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1437 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1438 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1439 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1440 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1441 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1442 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1443 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1444 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1445 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1446 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1447 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1448 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1449 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1450 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1451 { 0 } 1452 }; 1453 1454 /* Sideband Queue command wrappers */ 1455 1456 /** 1457 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1458 * @hw: pointer to the HW struct 1459 * @desc: descriptor describing the command 1460 * @buf: buffer to use for indirect commands (NULL for direct commands) 1461 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1462 * @cd: pointer to command details structure 1463 */ 1464 static int 1465 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1466 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1467 { 1468 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1469 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1470 } 1471 1472 /** 1473 * ice_sbq_rw_reg - Fill Sideband Queue command 1474 * @hw: pointer to the HW struct 1475 * @in: message info to be filled in descriptor 1476 */ 1477 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1478 { 1479 struct ice_sbq_cmd_desc desc = {0}; 1480 struct ice_sbq_msg_req msg = {0}; 1481 u16 msg_len; 1482 int status; 1483 1484 msg_len = sizeof(msg); 1485 1486 msg.dest_dev = in->dest_dev; 1487 msg.opcode = in->opcode; 1488 msg.flags = ICE_SBQ_MSG_FLAGS; 1489 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1490 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1491 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1492 1493 if (in->opcode) 1494 msg.data = cpu_to_le32(in->data); 1495 else 1496 /* data read comes back in completion, so shorten the struct by 1497 * sizeof(msg.data) 1498 */ 1499 msg_len -= sizeof(msg.data); 1500 1501 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1502 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1503 desc.param0.cmd_len = cpu_to_le16(msg_len); 1504 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1505 if (!status && !in->opcode) 1506 in->data = le32_to_cpu 1507 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1508 return status; 1509 } 1510 1511 /* FW Admin Queue command wrappers */ 1512 1513 /* Software lock/mutex that is meant to be held while the Global Config Lock 1514 * in firmware is acquired by the software to prevent most (but not all) types 1515 * of AQ commands from being sent to FW 1516 */ 1517 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1518 1519 /** 1520 * ice_should_retry_sq_send_cmd 1521 * @opcode: AQ opcode 1522 * 1523 * Decide if we should retry the send command routine for the ATQ, depending 1524 * on the opcode. 1525 */ 1526 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1527 { 1528 switch (opcode) { 1529 case ice_aqc_opc_get_link_topo: 1530 case ice_aqc_opc_lldp_stop: 1531 case ice_aqc_opc_lldp_start: 1532 case ice_aqc_opc_lldp_filter_ctrl: 1533 return true; 1534 } 1535 1536 return false; 1537 } 1538 1539 /** 1540 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1541 * @hw: pointer to the HW struct 1542 * @cq: pointer to the specific Control queue 1543 * @desc: prefilled descriptor describing the command 1544 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1545 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1546 * @cd: pointer to command details structure 1547 * 1548 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1549 * Queue if the EBUSY AQ error is returned. 1550 */ 1551 static int 1552 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1553 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1554 struct ice_sq_cd *cd) 1555 { 1556 struct ice_aq_desc desc_cpy; 1557 bool is_cmd_for_retry; 1558 u8 idx = 0; 1559 u16 opcode; 1560 int status; 1561 1562 opcode = le16_to_cpu(desc->opcode); 1563 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1564 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1565 1566 if (is_cmd_for_retry) { 1567 /* All retryable cmds are direct, without buf. */ 1568 WARN_ON(buf); 1569 1570 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1571 } 1572 1573 do { 1574 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1575 1576 if (!is_cmd_for_retry || !status || 1577 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1578 break; 1579 1580 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1581 1582 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1583 1584 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1585 1586 return status; 1587 } 1588 1589 /** 1590 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1591 * @hw: pointer to the HW struct 1592 * @desc: descriptor describing the command 1593 * @buf: buffer to use for indirect commands (NULL for direct commands) 1594 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1595 * @cd: pointer to command details structure 1596 * 1597 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1598 */ 1599 int 1600 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1601 u16 buf_size, struct ice_sq_cd *cd) 1602 { 1603 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1604 bool lock_acquired = false; 1605 int status; 1606 1607 /* When a package download is in process (i.e. when the firmware's 1608 * Global Configuration Lock resource is held), only the Download 1609 * Package, Get Version, Get Package Info List, Upload Section, 1610 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1611 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1612 * Recipes to Profile Association, and Release Resource (with resource 1613 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1614 * must block until the package download completes and the Global Config 1615 * Lock is released. See also ice_acquire_global_cfg_lock(). 1616 */ 1617 switch (le16_to_cpu(desc->opcode)) { 1618 case ice_aqc_opc_download_pkg: 1619 case ice_aqc_opc_get_pkg_info_list: 1620 case ice_aqc_opc_get_ver: 1621 case ice_aqc_opc_upload_section: 1622 case ice_aqc_opc_update_pkg: 1623 case ice_aqc_opc_set_port_params: 1624 case ice_aqc_opc_get_vlan_mode_parameters: 1625 case ice_aqc_opc_set_vlan_mode_parameters: 1626 case ice_aqc_opc_set_tx_topo: 1627 case ice_aqc_opc_get_tx_topo: 1628 case ice_aqc_opc_add_recipe: 1629 case ice_aqc_opc_recipe_to_profile: 1630 case ice_aqc_opc_get_recipe: 1631 case ice_aqc_opc_get_recipe_to_profile: 1632 break; 1633 case ice_aqc_opc_release_res: 1634 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1635 break; 1636 fallthrough; 1637 default: 1638 mutex_lock(&ice_global_cfg_lock_sw); 1639 lock_acquired = true; 1640 break; 1641 } 1642 1643 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1644 if (lock_acquired) 1645 mutex_unlock(&ice_global_cfg_lock_sw); 1646 1647 return status; 1648 } 1649 1650 /** 1651 * ice_aq_get_fw_ver 1652 * @hw: pointer to the HW struct 1653 * @cd: pointer to command details structure or NULL 1654 * 1655 * Get the firmware version (0x0001) from the admin queue commands 1656 */ 1657 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1658 { 1659 struct ice_aqc_get_ver *resp; 1660 struct ice_aq_desc desc; 1661 int status; 1662 1663 resp = &desc.params.get_ver; 1664 1665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1666 1667 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1668 1669 if (!status) { 1670 hw->fw_branch = resp->fw_branch; 1671 hw->fw_maj_ver = resp->fw_major; 1672 hw->fw_min_ver = resp->fw_minor; 1673 hw->fw_patch = resp->fw_patch; 1674 hw->fw_build = le32_to_cpu(resp->fw_build); 1675 hw->api_branch = resp->api_branch; 1676 hw->api_maj_ver = resp->api_major; 1677 hw->api_min_ver = resp->api_minor; 1678 hw->api_patch = resp->api_patch; 1679 } 1680 1681 return status; 1682 } 1683 1684 /** 1685 * ice_aq_send_driver_ver 1686 * @hw: pointer to the HW struct 1687 * @dv: driver's major, minor version 1688 * @cd: pointer to command details structure or NULL 1689 * 1690 * Send the driver version (0x0002) to the firmware 1691 */ 1692 int 1693 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1694 struct ice_sq_cd *cd) 1695 { 1696 struct ice_aqc_driver_ver *cmd; 1697 struct ice_aq_desc desc; 1698 u16 len; 1699 1700 cmd = &desc.params.driver_ver; 1701 1702 if (!dv) 1703 return -EINVAL; 1704 1705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1706 1707 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1708 cmd->major_ver = dv->major_ver; 1709 cmd->minor_ver = dv->minor_ver; 1710 cmd->build_ver = dv->build_ver; 1711 cmd->subbuild_ver = dv->subbuild_ver; 1712 1713 len = 0; 1714 while (len < sizeof(dv->driver_string) && 1715 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1716 len++; 1717 1718 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1719 } 1720 1721 /** 1722 * ice_aq_q_shutdown 1723 * @hw: pointer to the HW struct 1724 * @unloading: is the driver unloading itself 1725 * 1726 * Tell the Firmware that we're shutting down the AdminQ and whether 1727 * or not the driver is unloading as well (0x0003). 1728 */ 1729 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1730 { 1731 struct ice_aqc_q_shutdown *cmd; 1732 struct ice_aq_desc desc; 1733 1734 cmd = &desc.params.q_shutdown; 1735 1736 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1737 1738 if (unloading) 1739 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1740 1741 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1742 } 1743 1744 /** 1745 * ice_aq_req_res 1746 * @hw: pointer to the HW struct 1747 * @res: resource ID 1748 * @access: access type 1749 * @sdp_number: resource number 1750 * @timeout: the maximum time in ms that the driver may hold the resource 1751 * @cd: pointer to command details structure or NULL 1752 * 1753 * Requests common resource using the admin queue commands (0x0008). 1754 * When attempting to acquire the Global Config Lock, the driver can 1755 * learn of three states: 1756 * 1) 0 - acquired lock, and can perform download package 1757 * 2) -EIO - did not get lock, driver should fail to load 1758 * 3) -EALREADY - did not get lock, but another driver has 1759 * successfully downloaded the package; the driver does 1760 * not have to download the package and can continue 1761 * loading 1762 * 1763 * Note that if the caller is in an acquire lock, perform action, release lock 1764 * phase of operation, it is possible that the FW may detect a timeout and issue 1765 * a CORER. In this case, the driver will receive a CORER interrupt and will 1766 * have to determine its cause. The calling thread that is handling this flow 1767 * will likely get an error propagated back to it indicating the Download 1768 * Package, Update Package or the Release Resource AQ commands timed out. 1769 */ 1770 static int 1771 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1772 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1773 struct ice_sq_cd *cd) 1774 { 1775 struct ice_aqc_req_res *cmd_resp; 1776 struct ice_aq_desc desc; 1777 int status; 1778 1779 cmd_resp = &desc.params.res_owner; 1780 1781 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1782 1783 cmd_resp->res_id = cpu_to_le16(res); 1784 cmd_resp->access_type = cpu_to_le16(access); 1785 cmd_resp->res_number = cpu_to_le32(sdp_number); 1786 cmd_resp->timeout = cpu_to_le32(*timeout); 1787 *timeout = 0; 1788 1789 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1790 1791 /* The completion specifies the maximum time in ms that the driver 1792 * may hold the resource in the Timeout field. 1793 */ 1794 1795 /* Global config lock response utilizes an additional status field. 1796 * 1797 * If the Global config lock resource is held by some other driver, the 1798 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1799 * and the timeout field indicates the maximum time the current owner 1800 * of the resource has to free it. 1801 */ 1802 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1803 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1804 *timeout = le32_to_cpu(cmd_resp->timeout); 1805 return 0; 1806 } else if (le16_to_cpu(cmd_resp->status) == 1807 ICE_AQ_RES_GLBL_IN_PROG) { 1808 *timeout = le32_to_cpu(cmd_resp->timeout); 1809 return -EIO; 1810 } else if (le16_to_cpu(cmd_resp->status) == 1811 ICE_AQ_RES_GLBL_DONE) { 1812 return -EALREADY; 1813 } 1814 1815 /* invalid FW response, force a timeout immediately */ 1816 *timeout = 0; 1817 return -EIO; 1818 } 1819 1820 /* If the resource is held by some other driver, the command completes 1821 * with a busy return value and the timeout field indicates the maximum 1822 * time the current owner of the resource has to free it. 1823 */ 1824 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1825 *timeout = le32_to_cpu(cmd_resp->timeout); 1826 1827 return status; 1828 } 1829 1830 /** 1831 * ice_aq_release_res 1832 * @hw: pointer to the HW struct 1833 * @res: resource ID 1834 * @sdp_number: resource number 1835 * @cd: pointer to command details structure or NULL 1836 * 1837 * release common resource using the admin queue commands (0x0009) 1838 */ 1839 static int 1840 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1841 struct ice_sq_cd *cd) 1842 { 1843 struct ice_aqc_req_res *cmd; 1844 struct ice_aq_desc desc; 1845 1846 cmd = &desc.params.res_owner; 1847 1848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1849 1850 cmd->res_id = cpu_to_le16(res); 1851 cmd->res_number = cpu_to_le32(sdp_number); 1852 1853 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1854 } 1855 1856 /** 1857 * ice_acquire_res 1858 * @hw: pointer to the HW structure 1859 * @res: resource ID 1860 * @access: access type (read or write) 1861 * @timeout: timeout in milliseconds 1862 * 1863 * This function will attempt to acquire the ownership of a resource. 1864 */ 1865 int 1866 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1867 enum ice_aq_res_access_type access, u32 timeout) 1868 { 1869 #define ICE_RES_POLLING_DELAY_MS 10 1870 u32 delay = ICE_RES_POLLING_DELAY_MS; 1871 u32 time_left = timeout; 1872 int status; 1873 1874 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1875 1876 /* A return code of -EALREADY means that another driver has 1877 * previously acquired the resource and performed any necessary updates; 1878 * in this case the caller does not obtain the resource and has no 1879 * further work to do. 1880 */ 1881 if (status == -EALREADY) 1882 goto ice_acquire_res_exit; 1883 1884 if (status) 1885 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1886 1887 /* If necessary, poll until the current lock owner timeouts */ 1888 timeout = time_left; 1889 while (status && timeout && time_left) { 1890 mdelay(delay); 1891 timeout = (timeout > delay) ? timeout - delay : 0; 1892 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1893 1894 if (status == -EALREADY) 1895 /* lock free, but no work to do */ 1896 break; 1897 1898 if (!status) 1899 /* lock acquired */ 1900 break; 1901 } 1902 if (status && status != -EALREADY) 1903 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1904 1905 ice_acquire_res_exit: 1906 if (status == -EALREADY) { 1907 if (access == ICE_RES_WRITE) 1908 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1909 else 1910 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1911 } 1912 return status; 1913 } 1914 1915 /** 1916 * ice_release_res 1917 * @hw: pointer to the HW structure 1918 * @res: resource ID 1919 * 1920 * This function will release a resource using the proper Admin Command. 1921 */ 1922 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1923 { 1924 unsigned long timeout; 1925 int status; 1926 1927 /* there are some rare cases when trying to release the resource 1928 * results in an admin queue timeout, so handle them correctly 1929 */ 1930 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1931 do { 1932 status = ice_aq_release_res(hw, res, 0, NULL); 1933 if (status != -EIO) 1934 break; 1935 usleep_range(1000, 2000); 1936 } while (time_before(jiffies, timeout)); 1937 } 1938 1939 /** 1940 * ice_aq_alloc_free_res - command to allocate/free resources 1941 * @hw: pointer to the HW struct 1942 * @buf: Indirect buffer to hold data parameters and response 1943 * @buf_size: size of buffer for indirect commands 1944 * @opc: pass in the command opcode 1945 * 1946 * Helper function to allocate/free resources using the admin queue commands 1947 */ 1948 int ice_aq_alloc_free_res(struct ice_hw *hw, 1949 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1950 enum ice_adminq_opc opc) 1951 { 1952 struct ice_aqc_alloc_free_res_cmd *cmd; 1953 struct ice_aq_desc desc; 1954 1955 cmd = &desc.params.sw_res_ctrl; 1956 1957 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1958 return -EINVAL; 1959 1960 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1961 1962 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1963 1964 cmd->num_entries = cpu_to_le16(1); 1965 1966 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1967 } 1968 1969 /** 1970 * ice_alloc_hw_res - allocate resource 1971 * @hw: pointer to the HW struct 1972 * @type: type of resource 1973 * @num: number of resources to allocate 1974 * @btm: allocate from bottom 1975 * @res: pointer to array that will receive the resources 1976 */ 1977 int 1978 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1979 { 1980 struct ice_aqc_alloc_free_res_elem *buf; 1981 u16 buf_len; 1982 int status; 1983 1984 buf_len = struct_size(buf, elem, num); 1985 buf = kzalloc(buf_len, GFP_KERNEL); 1986 if (!buf) 1987 return -ENOMEM; 1988 1989 /* Prepare buffer to allocate resource. */ 1990 buf->num_elems = cpu_to_le16(num); 1991 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1992 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1993 if (btm) 1994 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1995 1996 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 1997 if (status) 1998 goto ice_alloc_res_exit; 1999 2000 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2001 2002 ice_alloc_res_exit: 2003 kfree(buf); 2004 return status; 2005 } 2006 2007 /** 2008 * ice_free_hw_res - free allocated HW resource 2009 * @hw: pointer to the HW struct 2010 * @type: type of resource to free 2011 * @num: number of resources 2012 * @res: pointer to array that contains the resources to free 2013 */ 2014 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2015 { 2016 struct ice_aqc_alloc_free_res_elem *buf; 2017 u16 buf_len; 2018 int status; 2019 2020 buf_len = struct_size(buf, elem, num); 2021 buf = kzalloc(buf_len, GFP_KERNEL); 2022 if (!buf) 2023 return -ENOMEM; 2024 2025 /* Prepare buffer to free resource. */ 2026 buf->num_elems = cpu_to_le16(num); 2027 buf->res_type = cpu_to_le16(type); 2028 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2029 2030 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2031 if (status) 2032 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2033 2034 kfree(buf); 2035 return status; 2036 } 2037 2038 /** 2039 * ice_get_num_per_func - determine number of resources per PF 2040 * @hw: pointer to the HW structure 2041 * @max: value to be evenly split between each PF 2042 * 2043 * Determine the number of valid functions by going through the bitmap returned 2044 * from parsing capabilities and use this to calculate the number of resources 2045 * per PF based on the max value passed in. 2046 */ 2047 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2048 { 2049 u8 funcs; 2050 2051 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2052 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2053 ICE_CAPS_VALID_FUNCS_M); 2054 2055 if (!funcs) 2056 return 0; 2057 2058 return max / funcs; 2059 } 2060 2061 /** 2062 * ice_parse_common_caps - parse common device/function capabilities 2063 * @hw: pointer to the HW struct 2064 * @caps: pointer to common capabilities structure 2065 * @elem: the capability element to parse 2066 * @prefix: message prefix for tracing capabilities 2067 * 2068 * Given a capability element, extract relevant details into the common 2069 * capability structure. 2070 * 2071 * Returns: true if the capability matches one of the common capability ids, 2072 * false otherwise. 2073 */ 2074 static bool 2075 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2076 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2077 { 2078 u32 logical_id = le32_to_cpu(elem->logical_id); 2079 u32 phys_id = le32_to_cpu(elem->phys_id); 2080 u32 number = le32_to_cpu(elem->number); 2081 u16 cap = le16_to_cpu(elem->cap); 2082 bool found = true; 2083 2084 switch (cap) { 2085 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2086 caps->valid_functions = number; 2087 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2088 caps->valid_functions); 2089 break; 2090 case ICE_AQC_CAPS_SRIOV: 2091 caps->sr_iov_1_1 = (number == 1); 2092 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2093 caps->sr_iov_1_1); 2094 break; 2095 case ICE_AQC_CAPS_DCB: 2096 caps->dcb = (number == 1); 2097 caps->active_tc_bitmap = logical_id; 2098 caps->maxtc = phys_id; 2099 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2100 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2101 caps->active_tc_bitmap); 2102 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2103 break; 2104 case ICE_AQC_CAPS_RSS: 2105 caps->rss_table_size = number; 2106 caps->rss_table_entry_width = logical_id; 2107 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2108 caps->rss_table_size); 2109 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2110 caps->rss_table_entry_width); 2111 break; 2112 case ICE_AQC_CAPS_RXQS: 2113 caps->num_rxq = number; 2114 caps->rxq_first_id = phys_id; 2115 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2116 caps->num_rxq); 2117 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2118 caps->rxq_first_id); 2119 break; 2120 case ICE_AQC_CAPS_TXQS: 2121 caps->num_txq = number; 2122 caps->txq_first_id = phys_id; 2123 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2124 caps->num_txq); 2125 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2126 caps->txq_first_id); 2127 break; 2128 case ICE_AQC_CAPS_MSIX: 2129 caps->num_msix_vectors = number; 2130 caps->msix_vector_first_id = phys_id; 2131 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2132 caps->num_msix_vectors); 2133 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2134 caps->msix_vector_first_id); 2135 break; 2136 case ICE_AQC_CAPS_PENDING_NVM_VER: 2137 caps->nvm_update_pending_nvm = true; 2138 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2139 break; 2140 case ICE_AQC_CAPS_PENDING_OROM_VER: 2141 caps->nvm_update_pending_orom = true; 2142 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2143 break; 2144 case ICE_AQC_CAPS_PENDING_NET_VER: 2145 caps->nvm_update_pending_netlist = true; 2146 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2147 break; 2148 case ICE_AQC_CAPS_NVM_MGMT: 2149 caps->nvm_unified_update = 2150 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2151 true : false; 2152 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2153 caps->nvm_unified_update); 2154 break; 2155 case ICE_AQC_CAPS_RDMA: 2156 caps->rdma = (number == 1); 2157 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2158 break; 2159 case ICE_AQC_CAPS_MAX_MTU: 2160 caps->max_mtu = number; 2161 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2162 prefix, caps->max_mtu); 2163 break; 2164 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2165 caps->pcie_reset_avoidance = (number > 0); 2166 ice_debug(hw, ICE_DBG_INIT, 2167 "%s: pcie_reset_avoidance = %d\n", prefix, 2168 caps->pcie_reset_avoidance); 2169 break; 2170 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2171 caps->reset_restrict_support = (number == 1); 2172 ice_debug(hw, ICE_DBG_INIT, 2173 "%s: reset_restrict_support = %d\n", prefix, 2174 caps->reset_restrict_support); 2175 break; 2176 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2177 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2178 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2179 prefix, caps->roce_lag); 2180 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2181 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2182 prefix, caps->sriov_lag); 2183 break; 2184 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2185 caps->tx_sched_topo_comp_mode_en = (number == 1); 2186 break; 2187 default: 2188 /* Not one of the recognized common capabilities */ 2189 found = false; 2190 } 2191 2192 return found; 2193 } 2194 2195 /** 2196 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2197 * @hw: pointer to the HW structure 2198 * @caps: pointer to capabilities structure to fix 2199 * 2200 * Re-calculate the capabilities that are dependent on the number of physical 2201 * ports; i.e. some features are not supported or function differently on 2202 * devices with more than 4 ports. 2203 */ 2204 static void 2205 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2206 { 2207 /* This assumes device capabilities are always scanned before function 2208 * capabilities during the initialization flow. 2209 */ 2210 if (hw->dev_caps.num_funcs > 4) { 2211 /* Max 4 TCs per port */ 2212 caps->maxtc = 4; 2213 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2214 caps->maxtc); 2215 if (caps->rdma) { 2216 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2217 caps->rdma = 0; 2218 } 2219 2220 /* print message only when processing device capabilities 2221 * during initialization. 2222 */ 2223 if (caps == &hw->dev_caps.common_cap) 2224 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2225 } 2226 } 2227 2228 /** 2229 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2230 * @hw: pointer to the HW struct 2231 * @func_p: pointer to function capabilities structure 2232 * @cap: pointer to the capability element to parse 2233 * 2234 * Extract function capabilities for ICE_AQC_CAPS_VF. 2235 */ 2236 static void 2237 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2238 struct ice_aqc_list_caps_elem *cap) 2239 { 2240 u32 logical_id = le32_to_cpu(cap->logical_id); 2241 u32 number = le32_to_cpu(cap->number); 2242 2243 func_p->num_allocd_vfs = number; 2244 func_p->vf_base_id = logical_id; 2245 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2246 func_p->num_allocd_vfs); 2247 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2248 func_p->vf_base_id); 2249 } 2250 2251 /** 2252 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2253 * @hw: pointer to the HW struct 2254 * @func_p: pointer to function capabilities structure 2255 * @cap: pointer to the capability element to parse 2256 * 2257 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2258 */ 2259 static void 2260 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2261 struct ice_aqc_list_caps_elem *cap) 2262 { 2263 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2264 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2265 le32_to_cpu(cap->number)); 2266 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2267 func_p->guar_num_vsi); 2268 } 2269 2270 /** 2271 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2272 * @hw: pointer to the HW struct 2273 * @func_p: pointer to function capabilities structure 2274 * @cap: pointer to the capability element to parse 2275 * 2276 * Extract function capabilities for ICE_AQC_CAPS_1588. 2277 */ 2278 static void 2279 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2280 struct ice_aqc_list_caps_elem *cap) 2281 { 2282 struct ice_ts_func_info *info = &func_p->ts_func_info; 2283 u32 number = le32_to_cpu(cap->number); 2284 2285 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2286 func_p->common_cap.ieee_1588 = info->ena; 2287 2288 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2289 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2290 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2291 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2292 2293 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2294 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2295 2296 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2297 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2298 } else { 2299 /* Unknown clock frequency, so assume a (probably incorrect) 2300 * default to avoid out-of-bounds look ups of frequency 2301 * related information. 2302 */ 2303 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2304 info->clk_freq); 2305 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2306 } 2307 2308 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2309 func_p->common_cap.ieee_1588); 2310 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2311 info->src_tmr_owned); 2312 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2313 info->tmr_ena); 2314 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2315 info->tmr_index_owned); 2316 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2317 info->tmr_index_assoc); 2318 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2319 info->clk_freq); 2320 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2321 info->clk_src); 2322 } 2323 2324 /** 2325 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2326 * @hw: pointer to the HW struct 2327 * @func_p: pointer to function capabilities structure 2328 * 2329 * Extract function capabilities for ICE_AQC_CAPS_FD. 2330 */ 2331 static void 2332 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2333 { 2334 u32 reg_val, gsize, bsize; 2335 2336 reg_val = rd32(hw, GLQF_FD_SIZE); 2337 switch (hw->mac_type) { 2338 case ICE_MAC_E830: 2339 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2340 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2341 break; 2342 case ICE_MAC_E810: 2343 default: 2344 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2345 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2346 } 2347 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2348 func_p->fd_fltr_best_effort = bsize; 2349 2350 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2351 func_p->fd_fltr_guar); 2352 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2353 func_p->fd_fltr_best_effort); 2354 } 2355 2356 /** 2357 * ice_parse_func_caps - Parse function capabilities 2358 * @hw: pointer to the HW struct 2359 * @func_p: pointer to function capabilities structure 2360 * @buf: buffer containing the function capability records 2361 * @cap_count: the number of capabilities 2362 * 2363 * Helper function to parse function (0x000A) capabilities list. For 2364 * capabilities shared between device and function, this relies on 2365 * ice_parse_common_caps. 2366 * 2367 * Loop through the list of provided capabilities and extract the relevant 2368 * data into the function capabilities structured. 2369 */ 2370 static void 2371 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2372 void *buf, u32 cap_count) 2373 { 2374 struct ice_aqc_list_caps_elem *cap_resp; 2375 u32 i; 2376 2377 cap_resp = buf; 2378 2379 memset(func_p, 0, sizeof(*func_p)); 2380 2381 for (i = 0; i < cap_count; i++) { 2382 u16 cap = le16_to_cpu(cap_resp[i].cap); 2383 bool found; 2384 2385 found = ice_parse_common_caps(hw, &func_p->common_cap, 2386 &cap_resp[i], "func caps"); 2387 2388 switch (cap) { 2389 case ICE_AQC_CAPS_VF: 2390 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2391 break; 2392 case ICE_AQC_CAPS_VSI: 2393 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2394 break; 2395 case ICE_AQC_CAPS_1588: 2396 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2397 break; 2398 case ICE_AQC_CAPS_FD: 2399 ice_parse_fdir_func_caps(hw, func_p); 2400 break; 2401 default: 2402 /* Don't list common capabilities as unknown */ 2403 if (!found) 2404 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2405 i, cap); 2406 break; 2407 } 2408 } 2409 2410 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2411 } 2412 2413 /** 2414 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2415 * @hw: pointer to the HW struct 2416 * @dev_p: pointer to device capabilities structure 2417 * @cap: capability element to parse 2418 * 2419 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2420 */ 2421 static void 2422 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2423 struct ice_aqc_list_caps_elem *cap) 2424 { 2425 u32 number = le32_to_cpu(cap->number); 2426 2427 dev_p->num_funcs = hweight32(number); 2428 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2429 dev_p->num_funcs); 2430 } 2431 2432 /** 2433 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2434 * @hw: pointer to the HW struct 2435 * @dev_p: pointer to device capabilities structure 2436 * @cap: capability element to parse 2437 * 2438 * Parse ICE_AQC_CAPS_VF for device capabilities. 2439 */ 2440 static void 2441 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2442 struct ice_aqc_list_caps_elem *cap) 2443 { 2444 u32 number = le32_to_cpu(cap->number); 2445 2446 dev_p->num_vfs_exposed = number; 2447 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2448 dev_p->num_vfs_exposed); 2449 } 2450 2451 /** 2452 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2453 * @hw: pointer to the HW struct 2454 * @dev_p: pointer to device capabilities structure 2455 * @cap: capability element to parse 2456 * 2457 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2458 */ 2459 static void 2460 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2461 struct ice_aqc_list_caps_elem *cap) 2462 { 2463 u32 number = le32_to_cpu(cap->number); 2464 2465 dev_p->num_vsi_allocd_to_host = number; 2466 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2467 dev_p->num_vsi_allocd_to_host); 2468 } 2469 2470 /** 2471 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2472 * @hw: pointer to the HW struct 2473 * @dev_p: pointer to device capabilities structure 2474 * @cap: capability element to parse 2475 * 2476 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2477 */ 2478 static void 2479 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2480 struct ice_aqc_list_caps_elem *cap) 2481 { 2482 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2483 u32 logical_id = le32_to_cpu(cap->logical_id); 2484 u32 phys_id = le32_to_cpu(cap->phys_id); 2485 u32 number = le32_to_cpu(cap->number); 2486 2487 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2488 dev_p->common_cap.ieee_1588 = info->ena; 2489 2490 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2491 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2492 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2493 2494 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2495 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2496 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2497 2498 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2499 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2500 2501 info->ena_ports = logical_id; 2502 info->tmr_own_map = phys_id; 2503 2504 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2505 dev_p->common_cap.ieee_1588); 2506 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2507 info->tmr0_owner); 2508 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2509 info->tmr0_owned); 2510 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2511 info->tmr0_ena); 2512 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2513 info->tmr1_owner); 2514 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2515 info->tmr1_owned); 2516 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2517 info->tmr1_ena); 2518 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2519 info->ts_ll_read); 2520 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2521 info->ts_ll_int_read); 2522 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2523 info->ena_ports); 2524 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2525 info->tmr_own_map); 2526 } 2527 2528 /** 2529 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2530 * @hw: pointer to the HW struct 2531 * @dev_p: pointer to device capabilities structure 2532 * @cap: capability element to parse 2533 * 2534 * Parse ICE_AQC_CAPS_FD for device capabilities. 2535 */ 2536 static void 2537 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2538 struct ice_aqc_list_caps_elem *cap) 2539 { 2540 u32 number = le32_to_cpu(cap->number); 2541 2542 dev_p->num_flow_director_fltr = number; 2543 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2544 dev_p->num_flow_director_fltr); 2545 } 2546 2547 /** 2548 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2549 * @hw: pointer to the HW struct 2550 * @dev_p: pointer to device capabilities structure 2551 * @cap: capability element to parse 2552 * 2553 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2554 * enabled sensors. 2555 */ 2556 static void 2557 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2558 struct ice_aqc_list_caps_elem *cap) 2559 { 2560 dev_p->supported_sensors = le32_to_cpu(cap->number); 2561 2562 ice_debug(hw, ICE_DBG_INIT, 2563 "dev caps: supported sensors (bitmap) = 0x%x\n", 2564 dev_p->supported_sensors); 2565 } 2566 2567 /** 2568 * ice_parse_dev_caps - Parse device capabilities 2569 * @hw: pointer to the HW struct 2570 * @dev_p: pointer to device capabilities structure 2571 * @buf: buffer containing the device capability records 2572 * @cap_count: the number of capabilities 2573 * 2574 * Helper device to parse device (0x000B) capabilities list. For 2575 * capabilities shared between device and function, this relies on 2576 * ice_parse_common_caps. 2577 * 2578 * Loop through the list of provided capabilities and extract the relevant 2579 * data into the device capabilities structured. 2580 */ 2581 static void 2582 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2583 void *buf, u32 cap_count) 2584 { 2585 struct ice_aqc_list_caps_elem *cap_resp; 2586 u32 i; 2587 2588 cap_resp = buf; 2589 2590 memset(dev_p, 0, sizeof(*dev_p)); 2591 2592 for (i = 0; i < cap_count; i++) { 2593 u16 cap = le16_to_cpu(cap_resp[i].cap); 2594 bool found; 2595 2596 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2597 &cap_resp[i], "dev caps"); 2598 2599 switch (cap) { 2600 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2601 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2602 break; 2603 case ICE_AQC_CAPS_VF: 2604 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2605 break; 2606 case ICE_AQC_CAPS_VSI: 2607 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2608 break; 2609 case ICE_AQC_CAPS_1588: 2610 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2611 break; 2612 case ICE_AQC_CAPS_FD: 2613 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2614 break; 2615 case ICE_AQC_CAPS_SENSOR_READING: 2616 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2617 break; 2618 default: 2619 /* Don't list common capabilities as unknown */ 2620 if (!found) 2621 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2622 i, cap); 2623 break; 2624 } 2625 } 2626 2627 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2628 } 2629 2630 /** 2631 * ice_is_pf_c827 - check if pf contains c827 phy 2632 * @hw: pointer to the hw struct 2633 */ 2634 bool ice_is_pf_c827(struct ice_hw *hw) 2635 { 2636 struct ice_aqc_get_link_topo cmd = {}; 2637 u8 node_part_number; 2638 u16 node_handle; 2639 int status; 2640 2641 if (hw->mac_type != ICE_MAC_E810) 2642 return false; 2643 2644 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2645 return true; 2646 2647 cmd.addr.topo_params.node_type_ctx = 2648 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2649 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2650 cmd.addr.topo_params.index = 0; 2651 2652 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2653 &node_handle); 2654 2655 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2656 return false; 2657 2658 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2659 return true; 2660 2661 return false; 2662 } 2663 2664 /** 2665 * ice_is_phy_rclk_in_netlist 2666 * @hw: pointer to the hw struct 2667 * 2668 * Check if the PHY Recovered Clock device is present in the netlist 2669 */ 2670 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2671 { 2672 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2673 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2674 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2675 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2676 return false; 2677 2678 return true; 2679 } 2680 2681 /** 2682 * ice_is_clock_mux_in_netlist 2683 * @hw: pointer to the hw struct 2684 * 2685 * Check if the Clock Multiplexer device is present in the netlist 2686 */ 2687 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2688 { 2689 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2690 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2691 NULL)) 2692 return false; 2693 2694 return true; 2695 } 2696 2697 /** 2698 * ice_is_cgu_in_netlist - check for CGU presence 2699 * @hw: pointer to the hw struct 2700 * 2701 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2702 * Save the CGU part number in the hw structure for later use. 2703 * Return: 2704 * * true - cgu is present 2705 * * false - cgu is not present 2706 */ 2707 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2708 { 2709 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2710 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2711 NULL)) { 2712 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2713 return true; 2714 } else if (!ice_find_netlist_node(hw, 2715 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2716 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2717 NULL)) { 2718 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2719 return true; 2720 } 2721 2722 return false; 2723 } 2724 2725 /** 2726 * ice_is_gps_in_netlist 2727 * @hw: pointer to the hw struct 2728 * 2729 * Check if the GPS generic device is present in the netlist 2730 */ 2731 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2732 { 2733 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2734 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2735 return false; 2736 2737 return true; 2738 } 2739 2740 /** 2741 * ice_aq_list_caps - query function/device capabilities 2742 * @hw: pointer to the HW struct 2743 * @buf: a buffer to hold the capabilities 2744 * @buf_size: size of the buffer 2745 * @cap_count: if not NULL, set to the number of capabilities reported 2746 * @opc: capabilities type to discover, device or function 2747 * @cd: pointer to command details structure or NULL 2748 * 2749 * Get the function (0x000A) or device (0x000B) capabilities description from 2750 * firmware and store it in the buffer. 2751 * 2752 * If the cap_count pointer is not NULL, then it is set to the number of 2753 * capabilities firmware will report. Note that if the buffer size is too 2754 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2755 * cap_count will still be updated in this case. It is recommended that the 2756 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2757 * firmware could return) to avoid this. 2758 */ 2759 int 2760 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2761 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2762 { 2763 struct ice_aqc_list_caps *cmd; 2764 struct ice_aq_desc desc; 2765 int status; 2766 2767 cmd = &desc.params.get_cap; 2768 2769 if (opc != ice_aqc_opc_list_func_caps && 2770 opc != ice_aqc_opc_list_dev_caps) 2771 return -EINVAL; 2772 2773 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2774 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2775 2776 if (cap_count) 2777 *cap_count = le32_to_cpu(cmd->count); 2778 2779 return status; 2780 } 2781 2782 /** 2783 * ice_discover_dev_caps - Read and extract device capabilities 2784 * @hw: pointer to the hardware structure 2785 * @dev_caps: pointer to device capabilities structure 2786 * 2787 * Read the device capabilities and extract them into the dev_caps structure 2788 * for later use. 2789 */ 2790 int 2791 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2792 { 2793 u32 cap_count = 0; 2794 void *cbuf; 2795 int status; 2796 2797 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2798 if (!cbuf) 2799 return -ENOMEM; 2800 2801 /* Although the driver doesn't know the number of capabilities the 2802 * device will return, we can simply send a 4KB buffer, the maximum 2803 * possible size that firmware can return. 2804 */ 2805 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2806 2807 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2808 ice_aqc_opc_list_dev_caps, NULL); 2809 if (!status) 2810 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2811 kfree(cbuf); 2812 2813 return status; 2814 } 2815 2816 /** 2817 * ice_discover_func_caps - Read and extract function capabilities 2818 * @hw: pointer to the hardware structure 2819 * @func_caps: pointer to function capabilities structure 2820 * 2821 * Read the function capabilities and extract them into the func_caps structure 2822 * for later use. 2823 */ 2824 static int 2825 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2826 { 2827 u32 cap_count = 0; 2828 void *cbuf; 2829 int status; 2830 2831 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2832 if (!cbuf) 2833 return -ENOMEM; 2834 2835 /* Although the driver doesn't know the number of capabilities the 2836 * device will return, we can simply send a 4KB buffer, the maximum 2837 * possible size that firmware can return. 2838 */ 2839 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2840 2841 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2842 ice_aqc_opc_list_func_caps, NULL); 2843 if (!status) 2844 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2845 kfree(cbuf); 2846 2847 return status; 2848 } 2849 2850 /** 2851 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2852 * @hw: pointer to the hardware structure 2853 */ 2854 void ice_set_safe_mode_caps(struct ice_hw *hw) 2855 { 2856 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2857 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2858 struct ice_hw_common_caps cached_caps; 2859 u32 num_funcs; 2860 2861 /* cache some func_caps values that should be restored after memset */ 2862 cached_caps = func_caps->common_cap; 2863 2864 /* unset func capabilities */ 2865 memset(func_caps, 0, sizeof(*func_caps)); 2866 2867 #define ICE_RESTORE_FUNC_CAP(name) \ 2868 func_caps->common_cap.name = cached_caps.name 2869 2870 /* restore cached values */ 2871 ICE_RESTORE_FUNC_CAP(valid_functions); 2872 ICE_RESTORE_FUNC_CAP(txq_first_id); 2873 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2874 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2875 ICE_RESTORE_FUNC_CAP(max_mtu); 2876 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2877 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2878 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2879 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2880 2881 /* one Tx and one Rx queue in safe mode */ 2882 func_caps->common_cap.num_rxq = 1; 2883 func_caps->common_cap.num_txq = 1; 2884 2885 /* two MSIX vectors, one for traffic and one for misc causes */ 2886 func_caps->common_cap.num_msix_vectors = 2; 2887 func_caps->guar_num_vsi = 1; 2888 2889 /* cache some dev_caps values that should be restored after memset */ 2890 cached_caps = dev_caps->common_cap; 2891 num_funcs = dev_caps->num_funcs; 2892 2893 /* unset dev capabilities */ 2894 memset(dev_caps, 0, sizeof(*dev_caps)); 2895 2896 #define ICE_RESTORE_DEV_CAP(name) \ 2897 dev_caps->common_cap.name = cached_caps.name 2898 2899 /* restore cached values */ 2900 ICE_RESTORE_DEV_CAP(valid_functions); 2901 ICE_RESTORE_DEV_CAP(txq_first_id); 2902 ICE_RESTORE_DEV_CAP(rxq_first_id); 2903 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2904 ICE_RESTORE_DEV_CAP(max_mtu); 2905 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2906 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2907 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2908 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2909 dev_caps->num_funcs = num_funcs; 2910 2911 /* one Tx and one Rx queue per function in safe mode */ 2912 dev_caps->common_cap.num_rxq = num_funcs; 2913 dev_caps->common_cap.num_txq = num_funcs; 2914 2915 /* two MSIX vectors per function */ 2916 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2917 } 2918 2919 /** 2920 * ice_get_caps - get info about the HW 2921 * @hw: pointer to the hardware structure 2922 */ 2923 int ice_get_caps(struct ice_hw *hw) 2924 { 2925 int status; 2926 2927 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2928 if (status) 2929 return status; 2930 2931 return ice_discover_func_caps(hw, &hw->func_caps); 2932 } 2933 2934 /** 2935 * ice_aq_manage_mac_write - manage MAC address write command 2936 * @hw: pointer to the HW struct 2937 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2938 * @flags: flags to control write behavior 2939 * @cd: pointer to command details structure or NULL 2940 * 2941 * This function is used to write MAC address to the NVM (0x0108). 2942 */ 2943 int 2944 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2945 struct ice_sq_cd *cd) 2946 { 2947 struct ice_aqc_manage_mac_write *cmd; 2948 struct ice_aq_desc desc; 2949 2950 cmd = &desc.params.mac_write; 2951 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2952 2953 cmd->flags = flags; 2954 ether_addr_copy(cmd->mac_addr, mac_addr); 2955 2956 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2957 } 2958 2959 /** 2960 * ice_aq_clear_pxe_mode 2961 * @hw: pointer to the HW struct 2962 * 2963 * Tell the firmware that the driver is taking over from PXE (0x0110). 2964 */ 2965 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2966 { 2967 struct ice_aq_desc desc; 2968 2969 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2970 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2971 2972 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2973 } 2974 2975 /** 2976 * ice_clear_pxe_mode - clear pxe operations mode 2977 * @hw: pointer to the HW struct 2978 * 2979 * Make sure all PXE mode settings are cleared, including things 2980 * like descriptor fetch/write-back mode. 2981 */ 2982 void ice_clear_pxe_mode(struct ice_hw *hw) 2983 { 2984 if (ice_check_sq_alive(hw, &hw->adminq)) 2985 ice_aq_clear_pxe_mode(hw); 2986 } 2987 2988 /** 2989 * ice_aq_set_port_params - set physical port parameters. 2990 * @pi: pointer to the port info struct 2991 * @double_vlan: if set double VLAN is enabled 2992 * @cd: pointer to command details structure or NULL 2993 * 2994 * Set Physical port parameters (0x0203) 2995 */ 2996 int 2997 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2998 struct ice_sq_cd *cd) 2999 3000 { 3001 struct ice_aqc_set_port_params *cmd; 3002 struct ice_hw *hw = pi->hw; 3003 struct ice_aq_desc desc; 3004 u16 cmd_flags = 0; 3005 3006 cmd = &desc.params.set_port_params; 3007 3008 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3009 if (double_vlan) 3010 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3011 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3012 3013 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3014 } 3015 3016 /** 3017 * ice_is_100m_speed_supported 3018 * @hw: pointer to the HW struct 3019 * 3020 * returns true if 100M speeds are supported by the device, 3021 * false otherwise. 3022 */ 3023 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3024 { 3025 switch (hw->device_id) { 3026 case ICE_DEV_ID_E822C_SGMII: 3027 case ICE_DEV_ID_E822L_SGMII: 3028 case ICE_DEV_ID_E823L_1GBE: 3029 case ICE_DEV_ID_E823C_SGMII: 3030 return true; 3031 default: 3032 return false; 3033 } 3034 } 3035 3036 /** 3037 * ice_get_link_speed_based_on_phy_type - returns link speed 3038 * @phy_type_low: lower part of phy_type 3039 * @phy_type_high: higher part of phy_type 3040 * 3041 * This helper function will convert an entry in PHY type structure 3042 * [phy_type_low, phy_type_high] to its corresponding link speed. 3043 * Note: In the structure of [phy_type_low, phy_type_high], there should 3044 * be one bit set, as this function will convert one PHY type to its 3045 * speed. 3046 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3047 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3048 */ 3049 static u16 3050 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3051 { 3052 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3053 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3054 3055 switch (phy_type_low) { 3056 case ICE_PHY_TYPE_LOW_100BASE_TX: 3057 case ICE_PHY_TYPE_LOW_100M_SGMII: 3058 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3059 break; 3060 case ICE_PHY_TYPE_LOW_1000BASE_T: 3061 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3062 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3063 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3064 case ICE_PHY_TYPE_LOW_1G_SGMII: 3065 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3066 break; 3067 case ICE_PHY_TYPE_LOW_2500BASE_T: 3068 case ICE_PHY_TYPE_LOW_2500BASE_X: 3069 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3070 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3071 break; 3072 case ICE_PHY_TYPE_LOW_5GBASE_T: 3073 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3074 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3075 break; 3076 case ICE_PHY_TYPE_LOW_10GBASE_T: 3077 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3078 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3079 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3080 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3081 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3082 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3083 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3084 break; 3085 case ICE_PHY_TYPE_LOW_25GBASE_T: 3086 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3087 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3088 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3089 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3090 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3091 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3092 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3093 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3094 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3095 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3096 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3097 break; 3098 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3099 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3100 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3101 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3102 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3103 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3104 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3105 break; 3106 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3107 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3108 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3109 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3110 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3111 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3112 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3113 case ICE_PHY_TYPE_LOW_50G_AUI2: 3114 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3115 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3116 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3117 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3118 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3119 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3120 case ICE_PHY_TYPE_LOW_50G_AUI1: 3121 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3122 break; 3123 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3124 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3125 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3126 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3127 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3128 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3129 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3130 case ICE_PHY_TYPE_LOW_100G_AUI4: 3131 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3132 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3133 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3134 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3135 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3136 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3137 break; 3138 default: 3139 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3140 break; 3141 } 3142 3143 switch (phy_type_high) { 3144 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3145 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3146 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3147 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3148 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3149 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3150 break; 3151 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: 3152 case ICE_PHY_TYPE_HIGH_200G_SR4: 3153 case ICE_PHY_TYPE_HIGH_200G_FR4: 3154 case ICE_PHY_TYPE_HIGH_200G_LR4: 3155 case ICE_PHY_TYPE_HIGH_200G_DR4: 3156 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: 3157 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: 3158 case ICE_PHY_TYPE_HIGH_200G_AUI4: 3159 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; 3160 break; 3161 default: 3162 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3163 break; 3164 } 3165 3166 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3167 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3168 return ICE_AQ_LINK_SPEED_UNKNOWN; 3169 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3170 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3171 return ICE_AQ_LINK_SPEED_UNKNOWN; 3172 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3173 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3174 return speed_phy_type_low; 3175 else 3176 return speed_phy_type_high; 3177 } 3178 3179 /** 3180 * ice_update_phy_type 3181 * @phy_type_low: pointer to the lower part of phy_type 3182 * @phy_type_high: pointer to the higher part of phy_type 3183 * @link_speeds_bitmap: targeted link speeds bitmap 3184 * 3185 * Note: For the link_speeds_bitmap structure, you can check it at 3186 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3187 * link_speeds_bitmap include multiple speeds. 3188 * 3189 * Each entry in this [phy_type_low, phy_type_high] structure will 3190 * present a certain link speed. This helper function will turn on bits 3191 * in [phy_type_low, phy_type_high] structure based on the value of 3192 * link_speeds_bitmap input parameter. 3193 */ 3194 void 3195 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3196 u16 link_speeds_bitmap) 3197 { 3198 u64 pt_high; 3199 u64 pt_low; 3200 int index; 3201 u16 speed; 3202 3203 /* We first check with low part of phy_type */ 3204 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3205 pt_low = BIT_ULL(index); 3206 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3207 3208 if (link_speeds_bitmap & speed) 3209 *phy_type_low |= BIT_ULL(index); 3210 } 3211 3212 /* We then check with high part of phy_type */ 3213 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3214 pt_high = BIT_ULL(index); 3215 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3216 3217 if (link_speeds_bitmap & speed) 3218 *phy_type_high |= BIT_ULL(index); 3219 } 3220 } 3221 3222 /** 3223 * ice_aq_set_phy_cfg 3224 * @hw: pointer to the HW struct 3225 * @pi: port info structure of the interested logical port 3226 * @cfg: structure with PHY configuration data to be set 3227 * @cd: pointer to command details structure or NULL 3228 * 3229 * Set the various PHY configuration parameters supported on the Port. 3230 * One or more of the Set PHY config parameters may be ignored in an MFP 3231 * mode as the PF may not have the privilege to set some of the PHY Config 3232 * parameters. This status will be indicated by the command response (0x0601). 3233 */ 3234 int 3235 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3236 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3237 { 3238 struct ice_aq_desc desc; 3239 int status; 3240 3241 if (!cfg) 3242 return -EINVAL; 3243 3244 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3245 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3246 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3247 cfg->caps); 3248 3249 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3250 } 3251 3252 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3253 desc.params.set_phy.lport_num = pi->lport; 3254 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3255 3256 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3257 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3258 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3259 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3260 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3261 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3262 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3263 cfg->low_power_ctrl_an); 3264 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3265 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3266 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3267 cfg->link_fec_opt); 3268 3269 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3270 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3271 status = 0; 3272 3273 if (!status) 3274 pi->phy.curr_user_phy_cfg = *cfg; 3275 3276 return status; 3277 } 3278 3279 /** 3280 * ice_update_link_info - update status of the HW network link 3281 * @pi: port info structure of the interested logical port 3282 */ 3283 int ice_update_link_info(struct ice_port_info *pi) 3284 { 3285 struct ice_link_status *li; 3286 int status; 3287 3288 if (!pi) 3289 return -EINVAL; 3290 3291 li = &pi->phy.link_info; 3292 3293 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3294 if (status) 3295 return status; 3296 3297 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3298 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3299 3300 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3301 if (!pcaps) 3302 return -ENOMEM; 3303 3304 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3305 pcaps, NULL); 3306 } 3307 3308 return status; 3309 } 3310 3311 /** 3312 * ice_cache_phy_user_req 3313 * @pi: port information structure 3314 * @cache_data: PHY logging data 3315 * @cache_mode: PHY logging mode 3316 * 3317 * Log the user request on (FC, FEC, SPEED) for later use. 3318 */ 3319 static void 3320 ice_cache_phy_user_req(struct ice_port_info *pi, 3321 struct ice_phy_cache_mode_data cache_data, 3322 enum ice_phy_cache_mode cache_mode) 3323 { 3324 if (!pi) 3325 return; 3326 3327 switch (cache_mode) { 3328 case ICE_FC_MODE: 3329 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3330 break; 3331 case ICE_SPEED_MODE: 3332 pi->phy.curr_user_speed_req = 3333 cache_data.data.curr_user_speed_req; 3334 break; 3335 case ICE_FEC_MODE: 3336 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3337 break; 3338 default: 3339 break; 3340 } 3341 } 3342 3343 /** 3344 * ice_caps_to_fc_mode 3345 * @caps: PHY capabilities 3346 * 3347 * Convert PHY FC capabilities to ice FC mode 3348 */ 3349 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3350 { 3351 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3352 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3353 return ICE_FC_FULL; 3354 3355 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3356 return ICE_FC_TX_PAUSE; 3357 3358 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3359 return ICE_FC_RX_PAUSE; 3360 3361 return ICE_FC_NONE; 3362 } 3363 3364 /** 3365 * ice_caps_to_fec_mode 3366 * @caps: PHY capabilities 3367 * @fec_options: Link FEC options 3368 * 3369 * Convert PHY FEC capabilities to ice FEC mode 3370 */ 3371 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3372 { 3373 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3374 return ICE_FEC_AUTO; 3375 3376 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3377 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3378 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3379 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3380 return ICE_FEC_BASER; 3381 3382 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3383 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3384 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3385 return ICE_FEC_RS; 3386 3387 return ICE_FEC_NONE; 3388 } 3389 3390 /** 3391 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3392 * @pi: port information structure 3393 * @cfg: PHY configuration data to set FC mode 3394 * @req_mode: FC mode to configure 3395 */ 3396 int 3397 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3398 enum ice_fc_mode req_mode) 3399 { 3400 struct ice_phy_cache_mode_data cache_data; 3401 u8 pause_mask = 0x0; 3402 3403 if (!pi || !cfg) 3404 return -EINVAL; 3405 3406 switch (req_mode) { 3407 case ICE_FC_FULL: 3408 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3409 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3410 break; 3411 case ICE_FC_RX_PAUSE: 3412 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3413 break; 3414 case ICE_FC_TX_PAUSE: 3415 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3416 break; 3417 default: 3418 break; 3419 } 3420 3421 /* clear the old pause settings */ 3422 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3423 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3424 3425 /* set the new capabilities */ 3426 cfg->caps |= pause_mask; 3427 3428 /* Cache user FC request */ 3429 cache_data.data.curr_user_fc_req = req_mode; 3430 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3431 3432 return 0; 3433 } 3434 3435 /** 3436 * ice_set_fc 3437 * @pi: port information structure 3438 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3439 * @ena_auto_link_update: enable automatic link update 3440 * 3441 * Set the requested flow control mode. 3442 */ 3443 int 3444 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3445 { 3446 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3447 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3448 struct ice_hw *hw; 3449 int status; 3450 3451 if (!pi || !aq_failures) 3452 return -EINVAL; 3453 3454 *aq_failures = 0; 3455 hw = pi->hw; 3456 3457 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3458 if (!pcaps) 3459 return -ENOMEM; 3460 3461 /* Get the current PHY config */ 3462 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3463 pcaps, NULL); 3464 if (status) { 3465 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3466 goto out; 3467 } 3468 3469 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3470 3471 /* Configure the set PHY data */ 3472 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3473 if (status) 3474 goto out; 3475 3476 /* If the capabilities have changed, then set the new config */ 3477 if (cfg.caps != pcaps->caps) { 3478 int retry_count, retry_max = 10; 3479 3480 /* Auto restart link so settings take effect */ 3481 if (ena_auto_link_update) 3482 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3483 3484 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3485 if (status) { 3486 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3487 goto out; 3488 } 3489 3490 /* Update the link info 3491 * It sometimes takes a really long time for link to 3492 * come back from the atomic reset. Thus, we wait a 3493 * little bit. 3494 */ 3495 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3496 status = ice_update_link_info(pi); 3497 3498 if (!status) 3499 break; 3500 3501 mdelay(100); 3502 } 3503 3504 if (status) 3505 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3506 } 3507 3508 out: 3509 return status; 3510 } 3511 3512 /** 3513 * ice_phy_caps_equals_cfg 3514 * @phy_caps: PHY capabilities 3515 * @phy_cfg: PHY configuration 3516 * 3517 * Helper function to determine if PHY capabilities matches PHY 3518 * configuration 3519 */ 3520 bool 3521 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3522 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3523 { 3524 u8 caps_mask, cfg_mask; 3525 3526 if (!phy_caps || !phy_cfg) 3527 return false; 3528 3529 /* These bits are not common between capabilities and configuration. 3530 * Do not use them to determine equality. 3531 */ 3532 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3533 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3534 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3535 3536 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3537 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3538 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3539 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3540 phy_caps->eee_cap != phy_cfg->eee_cap || 3541 phy_caps->eeer_value != phy_cfg->eeer_value || 3542 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3543 return false; 3544 3545 return true; 3546 } 3547 3548 /** 3549 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3550 * @pi: port information structure 3551 * @caps: PHY ability structure to copy date from 3552 * @cfg: PHY configuration structure to copy data to 3553 * 3554 * Helper function to copy AQC PHY get ability data to PHY set configuration 3555 * data structure 3556 */ 3557 void 3558 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3559 struct ice_aqc_get_phy_caps_data *caps, 3560 struct ice_aqc_set_phy_cfg_data *cfg) 3561 { 3562 if (!pi || !caps || !cfg) 3563 return; 3564 3565 memset(cfg, 0, sizeof(*cfg)); 3566 cfg->phy_type_low = caps->phy_type_low; 3567 cfg->phy_type_high = caps->phy_type_high; 3568 cfg->caps = caps->caps; 3569 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3570 cfg->eee_cap = caps->eee_cap; 3571 cfg->eeer_value = caps->eeer_value; 3572 cfg->link_fec_opt = caps->link_fec_options; 3573 cfg->module_compliance_enforcement = 3574 caps->module_compliance_enforcement; 3575 } 3576 3577 /** 3578 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3579 * @pi: port information structure 3580 * @cfg: PHY configuration data to set FEC mode 3581 * @fec: FEC mode to configure 3582 */ 3583 int 3584 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3585 enum ice_fec_mode fec) 3586 { 3587 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL; 3588 struct ice_hw *hw; 3589 int status; 3590 3591 if (!pi || !cfg) 3592 return -EINVAL; 3593 3594 hw = pi->hw; 3595 3596 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3597 if (!pcaps) 3598 return -ENOMEM; 3599 3600 status = ice_aq_get_phy_caps(pi, false, 3601 (ice_fw_supports_report_dflt_cfg(hw) ? 3602 ICE_AQC_REPORT_DFLT_CFG : 3603 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3604 if (status) 3605 goto out; 3606 3607 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3608 cfg->link_fec_opt = pcaps->link_fec_options; 3609 3610 switch (fec) { 3611 case ICE_FEC_BASER: 3612 /* Clear RS bits, and AND BASE-R ability 3613 * bits and OR request bits. 3614 */ 3615 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3616 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3617 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3618 ICE_AQC_PHY_FEC_25G_KR_REQ; 3619 break; 3620 case ICE_FEC_RS: 3621 /* Clear BASE-R bits, and AND RS ability 3622 * bits and OR request bits. 3623 */ 3624 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3625 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3626 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3627 break; 3628 case ICE_FEC_NONE: 3629 /* Clear all FEC option bits. */ 3630 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3631 break; 3632 case ICE_FEC_AUTO: 3633 /* AND auto FEC bit, and all caps bits. */ 3634 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3635 cfg->link_fec_opt |= pcaps->link_fec_options; 3636 break; 3637 default: 3638 status = -EINVAL; 3639 break; 3640 } 3641 3642 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3643 !ice_fw_supports_report_dflt_cfg(hw)) { 3644 struct ice_link_default_override_tlv tlv = { 0 }; 3645 3646 status = ice_get_link_default_override(&tlv, pi); 3647 if (status) 3648 goto out; 3649 3650 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3651 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3652 cfg->link_fec_opt = tlv.fec_options; 3653 } 3654 3655 out: 3656 return status; 3657 } 3658 3659 /** 3660 * ice_get_link_status - get status of the HW network link 3661 * @pi: port information structure 3662 * @link_up: pointer to bool (true/false = linkup/linkdown) 3663 * 3664 * Variable link_up is true if link is up, false if link is down. 3665 * The variable link_up is invalid if status is non zero. As a 3666 * result of this call, link status reporting becomes enabled 3667 */ 3668 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3669 { 3670 struct ice_phy_info *phy_info; 3671 int status = 0; 3672 3673 if (!pi || !link_up) 3674 return -EINVAL; 3675 3676 phy_info = &pi->phy; 3677 3678 if (phy_info->get_link_info) { 3679 status = ice_update_link_info(pi); 3680 3681 if (status) 3682 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3683 status); 3684 } 3685 3686 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3687 3688 return status; 3689 } 3690 3691 /** 3692 * ice_aq_set_link_restart_an 3693 * @pi: pointer to the port information structure 3694 * @ena_link: if true: enable link, if false: disable link 3695 * @cd: pointer to command details structure or NULL 3696 * 3697 * Sets up the link and restarts the Auto-Negotiation over the link. 3698 */ 3699 int 3700 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3701 struct ice_sq_cd *cd) 3702 { 3703 struct ice_aqc_restart_an *cmd; 3704 struct ice_aq_desc desc; 3705 3706 cmd = &desc.params.restart_an; 3707 3708 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3709 3710 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3711 cmd->lport_num = pi->lport; 3712 if (ena_link) 3713 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3714 else 3715 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3716 3717 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3718 } 3719 3720 /** 3721 * ice_aq_set_event_mask 3722 * @hw: pointer to the HW struct 3723 * @port_num: port number of the physical function 3724 * @mask: event mask to be set 3725 * @cd: pointer to command details structure or NULL 3726 * 3727 * Set event mask (0x0613) 3728 */ 3729 int 3730 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3731 struct ice_sq_cd *cd) 3732 { 3733 struct ice_aqc_set_event_mask *cmd; 3734 struct ice_aq_desc desc; 3735 3736 cmd = &desc.params.set_event_mask; 3737 3738 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3739 3740 cmd->lport_num = port_num; 3741 3742 cmd->event_mask = cpu_to_le16(mask); 3743 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3744 } 3745 3746 /** 3747 * ice_aq_set_mac_loopback 3748 * @hw: pointer to the HW struct 3749 * @ena_lpbk: Enable or Disable loopback 3750 * @cd: pointer to command details structure or NULL 3751 * 3752 * Enable/disable loopback on a given port 3753 */ 3754 int 3755 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3756 { 3757 struct ice_aqc_set_mac_lb *cmd; 3758 struct ice_aq_desc desc; 3759 3760 cmd = &desc.params.set_mac_lb; 3761 3762 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3763 if (ena_lpbk) 3764 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3765 3766 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3767 } 3768 3769 /** 3770 * ice_aq_set_port_id_led 3771 * @pi: pointer to the port information 3772 * @is_orig_mode: is this LED set to original mode (by the net-list) 3773 * @cd: pointer to command details structure or NULL 3774 * 3775 * Set LED value for the given port (0x06e9) 3776 */ 3777 int 3778 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3779 struct ice_sq_cd *cd) 3780 { 3781 struct ice_aqc_set_port_id_led *cmd; 3782 struct ice_hw *hw = pi->hw; 3783 struct ice_aq_desc desc; 3784 3785 cmd = &desc.params.set_port_id_led; 3786 3787 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3788 3789 if (is_orig_mode) 3790 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3791 else 3792 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3793 3794 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3795 } 3796 3797 /** 3798 * ice_aq_get_port_options 3799 * @hw: pointer to the HW struct 3800 * @options: buffer for the resultant port options 3801 * @option_count: input - size of the buffer in port options structures, 3802 * output - number of returned port options 3803 * @lport: logical port to call the command with (optional) 3804 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3805 * when PF owns more than 1 port it must be true 3806 * @active_option_idx: index of active port option in returned buffer 3807 * @active_option_valid: active option in returned buffer is valid 3808 * @pending_option_idx: index of pending port option in returned buffer 3809 * @pending_option_valid: pending option in returned buffer is valid 3810 * 3811 * Calls Get Port Options AQC (0x06ea) and verifies result. 3812 */ 3813 int 3814 ice_aq_get_port_options(struct ice_hw *hw, 3815 struct ice_aqc_get_port_options_elem *options, 3816 u8 *option_count, u8 lport, bool lport_valid, 3817 u8 *active_option_idx, bool *active_option_valid, 3818 u8 *pending_option_idx, bool *pending_option_valid) 3819 { 3820 struct ice_aqc_get_port_options *cmd; 3821 struct ice_aq_desc desc; 3822 int status; 3823 u8 i; 3824 3825 /* options buffer shall be able to hold max returned options */ 3826 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3827 return -EINVAL; 3828 3829 cmd = &desc.params.get_port_options; 3830 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3831 3832 if (lport_valid) 3833 cmd->lport_num = lport; 3834 cmd->lport_num_valid = lport_valid; 3835 3836 status = ice_aq_send_cmd(hw, &desc, options, 3837 *option_count * sizeof(*options), NULL); 3838 if (status) 3839 return status; 3840 3841 /* verify direct FW response & set output parameters */ 3842 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3843 cmd->port_options_count); 3844 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3845 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3846 cmd->port_options); 3847 if (*active_option_valid) { 3848 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3849 cmd->port_options); 3850 if (*active_option_idx > (*option_count - 1)) 3851 return -EIO; 3852 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3853 *active_option_idx); 3854 } 3855 3856 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3857 cmd->pending_port_option_status); 3858 if (*pending_option_valid) { 3859 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3860 cmd->pending_port_option_status); 3861 if (*pending_option_idx > (*option_count - 1)) 3862 return -EIO; 3863 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3864 *pending_option_idx); 3865 } 3866 3867 /* mask output options fields */ 3868 for (i = 0; i < *option_count; i++) { 3869 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3870 options[i].pmd); 3871 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3872 options[i].max_lane_speed); 3873 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3874 options[i].pmd, options[i].max_lane_speed); 3875 } 3876 3877 return 0; 3878 } 3879 3880 /** 3881 * ice_aq_set_port_option 3882 * @hw: pointer to the HW struct 3883 * @lport: logical port to call the command with 3884 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3885 * when PF owns more than 1 port it must be true 3886 * @new_option: new port option to be written 3887 * 3888 * Calls Set Port Options AQC (0x06eb). 3889 */ 3890 int 3891 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3892 u8 new_option) 3893 { 3894 struct ice_aqc_set_port_option *cmd; 3895 struct ice_aq_desc desc; 3896 3897 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3898 return -EINVAL; 3899 3900 cmd = &desc.params.set_port_option; 3901 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3902 3903 if (lport_valid) 3904 cmd->lport_num = lport; 3905 3906 cmd->lport_num_valid = lport_valid; 3907 cmd->selected_port_option = new_option; 3908 3909 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3910 } 3911 3912 /** 3913 * ice_aq_sff_eeprom 3914 * @hw: pointer to the HW struct 3915 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3916 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3917 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3918 * @page: QSFP page 3919 * @set_page: set or ignore the page 3920 * @data: pointer to data buffer to be read/written to the I2C device. 3921 * @length: 1-16 for read, 1 for write. 3922 * @write: 0 read, 1 for write. 3923 * @cd: pointer to command details structure or NULL 3924 * 3925 * Read/Write SFF EEPROM (0x06EE) 3926 */ 3927 int 3928 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3929 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3930 bool write, struct ice_sq_cd *cd) 3931 { 3932 struct ice_aqc_sff_eeprom *cmd; 3933 struct ice_aq_desc desc; 3934 u16 i2c_bus_addr; 3935 int status; 3936 3937 if (!data || (mem_addr & 0xff00)) 3938 return -EINVAL; 3939 3940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3941 cmd = &desc.params.read_write_sff_param; 3942 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3943 cmd->lport_num = (u8)(lport & 0xff); 3944 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3945 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 3946 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 3947 if (write) 3948 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 3949 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 3950 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3951 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 3952 3953 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3954 return status; 3955 } 3956 3957 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3958 { 3959 switch (type) { 3960 case ICE_LUT_VSI: 3961 return ICE_LUT_VSI_SIZE; 3962 case ICE_LUT_GLOBAL: 3963 return ICE_LUT_GLOBAL_SIZE; 3964 case ICE_LUT_PF: 3965 return ICE_LUT_PF_SIZE; 3966 } 3967 WARN_ONCE(1, "incorrect type passed"); 3968 return ICE_LUT_VSI_SIZE; 3969 } 3970 3971 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3972 { 3973 switch (size) { 3974 case ICE_LUT_VSI_SIZE: 3975 return ICE_AQC_LUT_SIZE_SMALL; 3976 case ICE_LUT_GLOBAL_SIZE: 3977 return ICE_AQC_LUT_SIZE_512; 3978 case ICE_LUT_PF_SIZE: 3979 return ICE_AQC_LUT_SIZE_2K; 3980 } 3981 WARN_ONCE(1, "incorrect size passed"); 3982 return 0; 3983 } 3984 3985 /** 3986 * __ice_aq_get_set_rss_lut 3987 * @hw: pointer to the hardware structure 3988 * @params: RSS LUT parameters 3989 * @set: set true to set the table, false to get the table 3990 * 3991 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3992 */ 3993 static int 3994 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3995 struct ice_aq_get_set_rss_lut_params *params, bool set) 3996 { 3997 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3998 enum ice_lut_type lut_type = params->lut_type; 3999 struct ice_aqc_get_set_rss_lut *desc_params; 4000 enum ice_aqc_lut_flags flags; 4001 enum ice_lut_size lut_size; 4002 struct ice_aq_desc desc; 4003 u8 *lut = params->lut; 4004 4005 4006 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4007 return -EINVAL; 4008 4009 lut_size = ice_lut_type_to_size(lut_type); 4010 if (lut_size > params->lut_size) 4011 return -EINVAL; 4012 else if (set && lut_size != params->lut_size) 4013 return -EINVAL; 4014 4015 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4016 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4017 if (set) 4018 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4019 4020 desc_params = &desc.params.get_set_rss_lut; 4021 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4022 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4023 4024 if (lut_type == ICE_LUT_GLOBAL) 4025 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4026 params->global_lut_id); 4027 4028 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4029 desc_params->flags = cpu_to_le16(flags); 4030 4031 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4032 } 4033 4034 /** 4035 * ice_aq_get_rss_lut 4036 * @hw: pointer to the hardware structure 4037 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4038 * 4039 * get the RSS lookup table, PF or VSI type 4040 */ 4041 int 4042 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4043 { 4044 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4045 } 4046 4047 /** 4048 * ice_aq_set_rss_lut 4049 * @hw: pointer to the hardware structure 4050 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4051 * 4052 * set the RSS lookup table, PF or VSI type 4053 */ 4054 int 4055 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4056 { 4057 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4058 } 4059 4060 /** 4061 * __ice_aq_get_set_rss_key 4062 * @hw: pointer to the HW struct 4063 * @vsi_id: VSI FW index 4064 * @key: pointer to key info struct 4065 * @set: set true to set the key, false to get the key 4066 * 4067 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4068 */ 4069 static int 4070 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4071 struct ice_aqc_get_set_rss_keys *key, bool set) 4072 { 4073 struct ice_aqc_get_set_rss_key *desc_params; 4074 u16 key_size = sizeof(*key); 4075 struct ice_aq_desc desc; 4076 4077 if (set) { 4078 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4079 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4080 } else { 4081 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4082 } 4083 4084 desc_params = &desc.params.get_set_rss_key; 4085 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4086 4087 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4088 } 4089 4090 /** 4091 * ice_aq_get_rss_key 4092 * @hw: pointer to the HW struct 4093 * @vsi_handle: software VSI handle 4094 * @key: pointer to key info struct 4095 * 4096 * get the RSS key per VSI 4097 */ 4098 int 4099 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4100 struct ice_aqc_get_set_rss_keys *key) 4101 { 4102 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4103 return -EINVAL; 4104 4105 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4106 key, false); 4107 } 4108 4109 /** 4110 * ice_aq_set_rss_key 4111 * @hw: pointer to the HW struct 4112 * @vsi_handle: software VSI handle 4113 * @keys: pointer to key info struct 4114 * 4115 * set the RSS key per VSI 4116 */ 4117 int 4118 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4119 struct ice_aqc_get_set_rss_keys *keys) 4120 { 4121 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4122 return -EINVAL; 4123 4124 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4125 keys, true); 4126 } 4127 4128 /** 4129 * ice_aq_add_lan_txq 4130 * @hw: pointer to the hardware structure 4131 * @num_qgrps: Number of added queue groups 4132 * @qg_list: list of queue groups to be added 4133 * @buf_size: size of buffer for indirect command 4134 * @cd: pointer to command details structure or NULL 4135 * 4136 * Add Tx LAN queue (0x0C30) 4137 * 4138 * NOTE: 4139 * Prior to calling add Tx LAN queue: 4140 * Initialize the following as part of the Tx queue context: 4141 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4142 * Cache profile and Packet shaper profile. 4143 * 4144 * After add Tx LAN queue AQ command is completed: 4145 * Interrupts should be associated with specific queues, 4146 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4147 * flow. 4148 */ 4149 static int 4150 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4151 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4152 struct ice_sq_cd *cd) 4153 { 4154 struct ice_aqc_add_tx_qgrp *list; 4155 struct ice_aqc_add_txqs *cmd; 4156 struct ice_aq_desc desc; 4157 u16 i, sum_size = 0; 4158 4159 cmd = &desc.params.add_txqs; 4160 4161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4162 4163 if (!qg_list) 4164 return -EINVAL; 4165 4166 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4167 return -EINVAL; 4168 4169 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4170 sum_size += struct_size(list, txqs, list->num_txqs); 4171 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4172 list->num_txqs); 4173 } 4174 4175 if (buf_size != sum_size) 4176 return -EINVAL; 4177 4178 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4179 4180 cmd->num_qgrps = num_qgrps; 4181 4182 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4183 } 4184 4185 /** 4186 * ice_aq_dis_lan_txq 4187 * @hw: pointer to the hardware structure 4188 * @num_qgrps: number of groups in the list 4189 * @qg_list: the list of groups to disable 4190 * @buf_size: the total size of the qg_list buffer in bytes 4191 * @rst_src: if called due to reset, specifies the reset source 4192 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4193 * @cd: pointer to command details structure or NULL 4194 * 4195 * Disable LAN Tx queue (0x0C31) 4196 */ 4197 static int 4198 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4199 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4200 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4201 struct ice_sq_cd *cd) 4202 { 4203 struct ice_aqc_dis_txq_item *item; 4204 struct ice_aqc_dis_txqs *cmd; 4205 struct ice_aq_desc desc; 4206 u16 vmvf_and_timeout; 4207 u16 i, sz = 0; 4208 int status; 4209 4210 cmd = &desc.params.dis_txqs; 4211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4212 4213 /* qg_list can be NULL only in VM/VF reset flow */ 4214 if (!qg_list && !rst_src) 4215 return -EINVAL; 4216 4217 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4218 return -EINVAL; 4219 4220 cmd->num_entries = num_qgrps; 4221 4222 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4223 4224 switch (rst_src) { 4225 case ICE_VM_RESET: 4226 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4227 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4228 break; 4229 case ICE_VF_RESET: 4230 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4231 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4232 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4233 ICE_AQC_Q_DIS_VMVF_NUM_M; 4234 break; 4235 case ICE_NO_RESET: 4236 default: 4237 break; 4238 } 4239 4240 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4241 4242 /* flush pipe on time out */ 4243 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4244 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4245 if (!qg_list) 4246 goto do_aq; 4247 4248 /* set RD bit to indicate that command buffer is provided by the driver 4249 * and it needs to be read by the firmware 4250 */ 4251 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4252 4253 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4254 u16 item_size = struct_size(item, q_id, item->num_qs); 4255 4256 /* If the num of queues is even, add 2 bytes of padding */ 4257 if ((item->num_qs % 2) == 0) 4258 item_size += 2; 4259 4260 sz += item_size; 4261 4262 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4263 } 4264 4265 if (buf_size != sz) 4266 return -EINVAL; 4267 4268 do_aq: 4269 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4270 if (status) { 4271 if (!qg_list) 4272 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4273 vmvf_num, hw->adminq.sq_last_status); 4274 else 4275 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4276 le16_to_cpu(qg_list[0].q_id[0]), 4277 hw->adminq.sq_last_status); 4278 } 4279 return status; 4280 } 4281 4282 /** 4283 * ice_aq_cfg_lan_txq 4284 * @hw: pointer to the hardware structure 4285 * @buf: buffer for command 4286 * @buf_size: size of buffer in bytes 4287 * @num_qs: number of queues being configured 4288 * @oldport: origination lport 4289 * @newport: destination lport 4290 * @cd: pointer to command details structure or NULL 4291 * 4292 * Move/Configure LAN Tx queue (0x0C32) 4293 * 4294 * There is a better AQ command to use for moving nodes, so only coding 4295 * this one for configuring the node. 4296 */ 4297 int 4298 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4299 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4300 struct ice_sq_cd *cd) 4301 { 4302 struct ice_aqc_cfg_txqs *cmd; 4303 struct ice_aq_desc desc; 4304 int status; 4305 4306 cmd = &desc.params.cfg_txqs; 4307 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4308 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4309 4310 if (!buf) 4311 return -EINVAL; 4312 4313 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4314 cmd->num_qs = num_qs; 4315 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4316 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4317 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4318 cmd->blocked_cgds = 0; 4319 4320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4321 if (status) 4322 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4323 hw->adminq.sq_last_status); 4324 return status; 4325 } 4326 4327 /** 4328 * ice_aq_add_rdma_qsets 4329 * @hw: pointer to the hardware structure 4330 * @num_qset_grps: Number of RDMA Qset groups 4331 * @qset_list: list of Qset groups to be added 4332 * @buf_size: size of buffer for indirect command 4333 * @cd: pointer to command details structure or NULL 4334 * 4335 * Add Tx RDMA Qsets (0x0C33) 4336 */ 4337 static int 4338 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4339 struct ice_aqc_add_rdma_qset_data *qset_list, 4340 u16 buf_size, struct ice_sq_cd *cd) 4341 { 4342 struct ice_aqc_add_rdma_qset_data *list; 4343 struct ice_aqc_add_rdma_qset *cmd; 4344 struct ice_aq_desc desc; 4345 u16 i, sum_size = 0; 4346 4347 cmd = &desc.params.add_rdma_qset; 4348 4349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4350 4351 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4352 return -EINVAL; 4353 4354 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4355 u16 num_qsets = le16_to_cpu(list->num_qsets); 4356 4357 sum_size += struct_size(list, rdma_qsets, num_qsets); 4358 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4359 num_qsets); 4360 } 4361 4362 if (buf_size != sum_size) 4363 return -EINVAL; 4364 4365 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4366 4367 cmd->num_qset_grps = num_qset_grps; 4368 4369 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4370 } 4371 4372 /* End of FW Admin Queue command wrappers */ 4373 4374 /** 4375 * ice_pack_ctx_byte - write a byte to a packed context structure 4376 * @src_ctx: unpacked source context structure 4377 * @dest_ctx: packed destination context data 4378 * @ce_info: context element description 4379 */ 4380 static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, 4381 const struct ice_ctx_ele *ce_info) 4382 { 4383 u8 src_byte, dest_byte, mask; 4384 u8 *from, *dest; 4385 u16 shift_width; 4386 4387 /* copy from the next struct field */ 4388 from = src_ctx + ce_info->offset; 4389 4390 /* prepare the bits and mask */ 4391 shift_width = ce_info->lsb % 8; 4392 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4393 4394 src_byte = *from; 4395 src_byte <<= shift_width; 4396 src_byte &= mask; 4397 4398 /* get the current bits from the target bit string */ 4399 dest = dest_ctx + (ce_info->lsb / 8); 4400 4401 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4402 4403 dest_byte &= ~mask; /* get the bits not changing */ 4404 dest_byte |= src_byte; /* add in the new bits */ 4405 4406 /* put it all back */ 4407 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4408 } 4409 4410 /** 4411 * ice_pack_ctx_word - write a word to a packed context structure 4412 * @src_ctx: unpacked source context structure 4413 * @dest_ctx: packed destination context data 4414 * @ce_info: context element description 4415 */ 4416 static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, 4417 const struct ice_ctx_ele *ce_info) 4418 { 4419 u16 src_word, mask; 4420 __le16 dest_word; 4421 u8 *from, *dest; 4422 u16 shift_width; 4423 4424 /* copy from the next struct field */ 4425 from = src_ctx + ce_info->offset; 4426 4427 /* prepare the bits and mask */ 4428 shift_width = ce_info->lsb % 8; 4429 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4430 4431 /* don't swizzle the bits until after the mask because the mask bits 4432 * will be in a different bit position on big endian machines 4433 */ 4434 src_word = *(u16 *)from; 4435 src_word <<= shift_width; 4436 src_word &= mask; 4437 4438 /* get the current bits from the target bit string */ 4439 dest = dest_ctx + (ce_info->lsb / 8); 4440 4441 memcpy(&dest_word, dest, sizeof(dest_word)); 4442 4443 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4444 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4445 4446 /* put it all back */ 4447 memcpy(dest, &dest_word, sizeof(dest_word)); 4448 } 4449 4450 /** 4451 * ice_pack_ctx_dword - write a dword to a packed context structure 4452 * @src_ctx: unpacked source context structure 4453 * @dest_ctx: packed destination context data 4454 * @ce_info: context element description 4455 */ 4456 static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, 4457 const struct ice_ctx_ele *ce_info) 4458 { 4459 u32 src_dword, mask; 4460 __le32 dest_dword; 4461 u8 *from, *dest; 4462 u16 shift_width; 4463 4464 /* copy from the next struct field */ 4465 from = src_ctx + ce_info->offset; 4466 4467 /* prepare the bits and mask */ 4468 shift_width = ce_info->lsb % 8; 4469 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4470 4471 /* don't swizzle the bits until after the mask because the mask bits 4472 * will be in a different bit position on big endian machines 4473 */ 4474 src_dword = *(u32 *)from; 4475 src_dword <<= shift_width; 4476 src_dword &= mask; 4477 4478 /* get the current bits from the target bit string */ 4479 dest = dest_ctx + (ce_info->lsb / 8); 4480 4481 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4482 4483 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4484 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4485 4486 /* put it all back */ 4487 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4488 } 4489 4490 /** 4491 * ice_pack_ctx_qword - write a qword to a packed context structure 4492 * @src_ctx: unpacked source context structure 4493 * @dest_ctx: packed destination context data 4494 * @ce_info: context element description 4495 */ 4496 static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, 4497 const struct ice_ctx_ele *ce_info) 4498 { 4499 u64 src_qword, mask; 4500 __le64 dest_qword; 4501 u8 *from, *dest; 4502 u16 shift_width; 4503 4504 /* copy from the next struct field */ 4505 from = src_ctx + ce_info->offset; 4506 4507 /* prepare the bits and mask */ 4508 shift_width = ce_info->lsb % 8; 4509 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); 4510 4511 /* don't swizzle the bits until after the mask because the mask bits 4512 * will be in a different bit position on big endian machines 4513 */ 4514 src_qword = *(u64 *)from; 4515 src_qword <<= shift_width; 4516 src_qword &= mask; 4517 4518 /* get the current bits from the target bit string */ 4519 dest = dest_ctx + (ce_info->lsb / 8); 4520 4521 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4522 4523 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4524 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4525 4526 /* put it all back */ 4527 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4528 } 4529 4530 /** 4531 * ice_set_ctx - set context bits in packed structure 4532 * @hw: pointer to the hardware structure 4533 * @src_ctx: pointer to a generic non-packed context structure 4534 * @dest_ctx: pointer to memory for the packed structure 4535 * @ce_info: List of Rx context elements 4536 */ 4537 int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4538 const struct ice_ctx_ele *ce_info) 4539 { 4540 int f; 4541 4542 for (f = 0; ce_info[f].width; f++) { 4543 /* We have to deal with each element of the FW response 4544 * using the correct size so that we are correct regardless 4545 * of the endianness of the machine. 4546 */ 4547 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4548 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4549 f, ce_info[f].width, ce_info[f].size_of); 4550 continue; 4551 } 4552 switch (ce_info[f].size_of) { 4553 case sizeof(u8): 4554 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); 4555 break; 4556 case sizeof(u16): 4557 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); 4558 break; 4559 case sizeof(u32): 4560 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); 4561 break; 4562 case sizeof(u64): 4563 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); 4564 break; 4565 default: 4566 return -EINVAL; 4567 } 4568 } 4569 4570 return 0; 4571 } 4572 4573 /** 4574 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4575 * @hw: pointer to the HW struct 4576 * @vsi_handle: software VSI handle 4577 * @tc: TC number 4578 * @q_handle: software queue handle 4579 */ 4580 struct ice_q_ctx * 4581 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4582 { 4583 struct ice_vsi_ctx *vsi; 4584 struct ice_q_ctx *q_ctx; 4585 4586 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4587 if (!vsi) 4588 return NULL; 4589 if (q_handle >= vsi->num_lan_q_entries[tc]) 4590 return NULL; 4591 if (!vsi->lan_q_ctx[tc]) 4592 return NULL; 4593 q_ctx = vsi->lan_q_ctx[tc]; 4594 return &q_ctx[q_handle]; 4595 } 4596 4597 /** 4598 * ice_ena_vsi_txq 4599 * @pi: port information structure 4600 * @vsi_handle: software VSI handle 4601 * @tc: TC number 4602 * @q_handle: software queue handle 4603 * @num_qgrps: Number of added queue groups 4604 * @buf: list of queue groups to be added 4605 * @buf_size: size of buffer for indirect command 4606 * @cd: pointer to command details structure or NULL 4607 * 4608 * This function adds one LAN queue 4609 */ 4610 int 4611 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4612 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4613 struct ice_sq_cd *cd) 4614 { 4615 struct ice_aqc_txsched_elem_data node = { 0 }; 4616 struct ice_sched_node *parent; 4617 struct ice_q_ctx *q_ctx; 4618 struct ice_hw *hw; 4619 int status; 4620 4621 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4622 return -EIO; 4623 4624 if (num_qgrps > 1 || buf->num_txqs > 1) 4625 return -ENOSPC; 4626 4627 hw = pi->hw; 4628 4629 if (!ice_is_vsi_valid(hw, vsi_handle)) 4630 return -EINVAL; 4631 4632 mutex_lock(&pi->sched_lock); 4633 4634 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4635 if (!q_ctx) { 4636 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4637 q_handle); 4638 status = -EINVAL; 4639 goto ena_txq_exit; 4640 } 4641 4642 /* find a parent node */ 4643 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4644 ICE_SCHED_NODE_OWNER_LAN); 4645 if (!parent) { 4646 status = -EINVAL; 4647 goto ena_txq_exit; 4648 } 4649 4650 buf->parent_teid = parent->info.node_teid; 4651 node.parent_teid = parent->info.node_teid; 4652 /* Mark that the values in the "generic" section as valid. The default 4653 * value in the "generic" section is zero. This means that : 4654 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4655 * - 0 priority among siblings, indicated by Bit 1-3. 4656 * - WFQ, indicated by Bit 4. 4657 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4658 * Bit 5-6. 4659 * - Bit 7 is reserved. 4660 * Without setting the generic section as valid in valid_sections, the 4661 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4662 */ 4663 buf->txqs[0].info.valid_sections = 4664 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4665 ICE_AQC_ELEM_VALID_EIR; 4666 buf->txqs[0].info.generic = 0; 4667 buf->txqs[0].info.cir_bw.bw_profile_idx = 4668 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4669 buf->txqs[0].info.cir_bw.bw_alloc = 4670 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4671 buf->txqs[0].info.eir_bw.bw_profile_idx = 4672 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4673 buf->txqs[0].info.eir_bw.bw_alloc = 4674 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4675 4676 /* add the LAN queue */ 4677 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4678 if (status) { 4679 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4680 le16_to_cpu(buf->txqs[0].txq_id), 4681 hw->adminq.sq_last_status); 4682 goto ena_txq_exit; 4683 } 4684 4685 node.node_teid = buf->txqs[0].q_teid; 4686 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4687 q_ctx->q_handle = q_handle; 4688 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4689 4690 /* add a leaf node into scheduler tree queue layer */ 4691 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4692 if (!status) 4693 status = ice_sched_replay_q_bw(pi, q_ctx); 4694 4695 ena_txq_exit: 4696 mutex_unlock(&pi->sched_lock); 4697 return status; 4698 } 4699 4700 /** 4701 * ice_dis_vsi_txq 4702 * @pi: port information structure 4703 * @vsi_handle: software VSI handle 4704 * @tc: TC number 4705 * @num_queues: number of queues 4706 * @q_handles: pointer to software queue handle array 4707 * @q_ids: pointer to the q_id array 4708 * @q_teids: pointer to queue node teids 4709 * @rst_src: if called due to reset, specifies the reset source 4710 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4711 * @cd: pointer to command details structure or NULL 4712 * 4713 * This function removes queues and their corresponding nodes in SW DB 4714 */ 4715 int 4716 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4717 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4718 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4719 struct ice_sq_cd *cd) 4720 { 4721 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4722 u16 i, buf_size = __struct_size(qg_list); 4723 struct ice_q_ctx *q_ctx; 4724 int status = -ENOENT; 4725 struct ice_hw *hw; 4726 4727 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4728 return -EIO; 4729 4730 hw = pi->hw; 4731 4732 if (!num_queues) { 4733 /* if queue is disabled already yet the disable queue command 4734 * has to be sent to complete the VF reset, then call 4735 * ice_aq_dis_lan_txq without any queue information 4736 */ 4737 if (rst_src) 4738 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4739 vmvf_num, NULL); 4740 return -EIO; 4741 } 4742 4743 mutex_lock(&pi->sched_lock); 4744 4745 for (i = 0; i < num_queues; i++) { 4746 struct ice_sched_node *node; 4747 4748 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4749 if (!node) 4750 continue; 4751 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4752 if (!q_ctx) { 4753 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4754 q_handles[i]); 4755 continue; 4756 } 4757 if (q_ctx->q_handle != q_handles[i]) { 4758 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4759 q_ctx->q_handle, q_handles[i]); 4760 continue; 4761 } 4762 qg_list->parent_teid = node->info.parent_teid; 4763 qg_list->num_qs = 1; 4764 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4765 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4766 vmvf_num, cd); 4767 4768 if (status) 4769 break; 4770 ice_free_sched_node(pi, node); 4771 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4772 q_ctx->q_teid = ICE_INVAL_TEID; 4773 } 4774 mutex_unlock(&pi->sched_lock); 4775 return status; 4776 } 4777 4778 /** 4779 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4780 * @pi: port information structure 4781 * @vsi_handle: software VSI handle 4782 * @tc_bitmap: TC bitmap 4783 * @maxqs: max queues array per TC 4784 * @owner: LAN or RDMA 4785 * 4786 * This function adds/updates the VSI queues per TC. 4787 */ 4788 static int 4789 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4790 u16 *maxqs, u8 owner) 4791 { 4792 int status = 0; 4793 u8 i; 4794 4795 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4796 return -EIO; 4797 4798 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4799 return -EINVAL; 4800 4801 mutex_lock(&pi->sched_lock); 4802 4803 ice_for_each_traffic_class(i) { 4804 /* configuration is possible only if TC node is present */ 4805 if (!ice_sched_get_tc_node(pi, i)) 4806 continue; 4807 4808 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4809 ice_is_tc_ena(tc_bitmap, i)); 4810 if (status) 4811 break; 4812 } 4813 4814 mutex_unlock(&pi->sched_lock); 4815 return status; 4816 } 4817 4818 /** 4819 * ice_cfg_vsi_lan - configure VSI LAN queues 4820 * @pi: port information structure 4821 * @vsi_handle: software VSI handle 4822 * @tc_bitmap: TC bitmap 4823 * @max_lanqs: max LAN queues array per TC 4824 * 4825 * This function adds/updates the VSI LAN queues per TC. 4826 */ 4827 int 4828 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4829 u16 *max_lanqs) 4830 { 4831 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4832 ICE_SCHED_NODE_OWNER_LAN); 4833 } 4834 4835 /** 4836 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4837 * @pi: port information structure 4838 * @vsi_handle: software VSI handle 4839 * @tc_bitmap: TC bitmap 4840 * @max_rdmaqs: max RDMA queues array per TC 4841 * 4842 * This function adds/updates the VSI RDMA queues per TC. 4843 */ 4844 int 4845 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4846 u16 *max_rdmaqs) 4847 { 4848 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4849 ICE_SCHED_NODE_OWNER_RDMA); 4850 } 4851 4852 /** 4853 * ice_ena_vsi_rdma_qset 4854 * @pi: port information structure 4855 * @vsi_handle: software VSI handle 4856 * @tc: TC number 4857 * @rdma_qset: pointer to RDMA Qset 4858 * @num_qsets: number of RDMA Qsets 4859 * @qset_teid: pointer to Qset node TEIDs 4860 * 4861 * This function adds RDMA Qset 4862 */ 4863 int 4864 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4865 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4866 { 4867 struct ice_aqc_txsched_elem_data node = { 0 }; 4868 struct ice_aqc_add_rdma_qset_data *buf; 4869 struct ice_sched_node *parent; 4870 struct ice_hw *hw; 4871 u16 i, buf_size; 4872 int ret; 4873 4874 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4875 return -EIO; 4876 hw = pi->hw; 4877 4878 if (!ice_is_vsi_valid(hw, vsi_handle)) 4879 return -EINVAL; 4880 4881 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4882 buf = kzalloc(buf_size, GFP_KERNEL); 4883 if (!buf) 4884 return -ENOMEM; 4885 mutex_lock(&pi->sched_lock); 4886 4887 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4888 ICE_SCHED_NODE_OWNER_RDMA); 4889 if (!parent) { 4890 ret = -EINVAL; 4891 goto rdma_error_exit; 4892 } 4893 buf->parent_teid = parent->info.node_teid; 4894 node.parent_teid = parent->info.node_teid; 4895 4896 buf->num_qsets = cpu_to_le16(num_qsets); 4897 for (i = 0; i < num_qsets; i++) { 4898 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4899 buf->rdma_qsets[i].info.valid_sections = 4900 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4901 ICE_AQC_ELEM_VALID_EIR; 4902 buf->rdma_qsets[i].info.generic = 0; 4903 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4904 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4905 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4906 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4907 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4908 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4909 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4910 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4911 } 4912 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4913 if (ret) { 4914 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4915 goto rdma_error_exit; 4916 } 4917 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4918 for (i = 0; i < num_qsets; i++) { 4919 node.node_teid = buf->rdma_qsets[i].qset_teid; 4920 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4921 &node, NULL); 4922 if (ret) 4923 break; 4924 qset_teid[i] = le32_to_cpu(node.node_teid); 4925 } 4926 rdma_error_exit: 4927 mutex_unlock(&pi->sched_lock); 4928 kfree(buf); 4929 return ret; 4930 } 4931 4932 /** 4933 * ice_dis_vsi_rdma_qset - free RDMA resources 4934 * @pi: port_info struct 4935 * @count: number of RDMA Qsets to free 4936 * @qset_teid: TEID of Qset node 4937 * @q_id: list of queue IDs being disabled 4938 */ 4939 int 4940 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4941 u16 *q_id) 4942 { 4943 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4944 u16 qg_size = __struct_size(qg_list); 4945 struct ice_hw *hw; 4946 int status = 0; 4947 int i; 4948 4949 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4950 return -EIO; 4951 4952 hw = pi->hw; 4953 4954 mutex_lock(&pi->sched_lock); 4955 4956 for (i = 0; i < count; i++) { 4957 struct ice_sched_node *node; 4958 4959 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4960 if (!node) 4961 continue; 4962 4963 qg_list->parent_teid = node->info.parent_teid; 4964 qg_list->num_qs = 1; 4965 qg_list->q_id[0] = 4966 cpu_to_le16(q_id[i] | 4967 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4968 4969 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4970 ICE_NO_RESET, 0, NULL); 4971 if (status) 4972 break; 4973 4974 ice_free_sched_node(pi, node); 4975 } 4976 4977 mutex_unlock(&pi->sched_lock); 4978 return status; 4979 } 4980 4981 /** 4982 * ice_aq_get_cgu_abilities - get cgu abilities 4983 * @hw: pointer to the HW struct 4984 * @abilities: CGU abilities 4985 * 4986 * Get CGU abilities (0x0C61) 4987 * Return: 0 on success or negative value on failure. 4988 */ 4989 int 4990 ice_aq_get_cgu_abilities(struct ice_hw *hw, 4991 struct ice_aqc_get_cgu_abilities *abilities) 4992 { 4993 struct ice_aq_desc desc; 4994 4995 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 4996 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 4997 } 4998 4999 /** 5000 * ice_aq_set_input_pin_cfg - set input pin config 5001 * @hw: pointer to the HW struct 5002 * @input_idx: Input index 5003 * @flags1: Input flags 5004 * @flags2: Input flags 5005 * @freq: Frequency in Hz 5006 * @phase_delay: Delay in ps 5007 * 5008 * Set CGU input config (0x0C62) 5009 * Return: 0 on success or negative value on failure. 5010 */ 5011 int 5012 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5013 u32 freq, s32 phase_delay) 5014 { 5015 struct ice_aqc_set_cgu_input_config *cmd; 5016 struct ice_aq_desc desc; 5017 5018 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5019 cmd = &desc.params.set_cgu_input_config; 5020 cmd->input_idx = input_idx; 5021 cmd->flags1 = flags1; 5022 cmd->flags2 = flags2; 5023 cmd->freq = cpu_to_le32(freq); 5024 cmd->phase_delay = cpu_to_le32(phase_delay); 5025 5026 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5027 } 5028 5029 /** 5030 * ice_aq_get_input_pin_cfg - get input pin config 5031 * @hw: pointer to the HW struct 5032 * @input_idx: Input index 5033 * @status: Pin status 5034 * @type: Pin type 5035 * @flags1: Input flags 5036 * @flags2: Input flags 5037 * @freq: Frequency in Hz 5038 * @phase_delay: Delay in ps 5039 * 5040 * Get CGU input config (0x0C63) 5041 * Return: 0 on success or negative value on failure. 5042 */ 5043 int 5044 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5045 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5046 { 5047 struct ice_aqc_get_cgu_input_config *cmd; 5048 struct ice_aq_desc desc; 5049 int ret; 5050 5051 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5052 cmd = &desc.params.get_cgu_input_config; 5053 cmd->input_idx = input_idx; 5054 5055 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5056 if (!ret) { 5057 if (status) 5058 *status = cmd->status; 5059 if (type) 5060 *type = cmd->type; 5061 if (flags1) 5062 *flags1 = cmd->flags1; 5063 if (flags2) 5064 *flags2 = cmd->flags2; 5065 if (freq) 5066 *freq = le32_to_cpu(cmd->freq); 5067 if (phase_delay) 5068 *phase_delay = le32_to_cpu(cmd->phase_delay); 5069 } 5070 5071 return ret; 5072 } 5073 5074 /** 5075 * ice_aq_set_output_pin_cfg - set output pin config 5076 * @hw: pointer to the HW struct 5077 * @output_idx: Output index 5078 * @flags: Output flags 5079 * @src_sel: Index of DPLL block 5080 * @freq: Output frequency 5081 * @phase_delay: Output phase compensation 5082 * 5083 * Set CGU output config (0x0C64) 5084 * Return: 0 on success or negative value on failure. 5085 */ 5086 int 5087 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5088 u8 src_sel, u32 freq, s32 phase_delay) 5089 { 5090 struct ice_aqc_set_cgu_output_config *cmd; 5091 struct ice_aq_desc desc; 5092 5093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5094 cmd = &desc.params.set_cgu_output_config; 5095 cmd->output_idx = output_idx; 5096 cmd->flags = flags; 5097 cmd->src_sel = src_sel; 5098 cmd->freq = cpu_to_le32(freq); 5099 cmd->phase_delay = cpu_to_le32(phase_delay); 5100 5101 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5102 } 5103 5104 /** 5105 * ice_aq_get_output_pin_cfg - get output pin config 5106 * @hw: pointer to the HW struct 5107 * @output_idx: Output index 5108 * @flags: Output flags 5109 * @src_sel: Internal DPLL source 5110 * @freq: Output frequency 5111 * @src_freq: Source frequency 5112 * 5113 * Get CGU output config (0x0C65) 5114 * Return: 0 on success or negative value on failure. 5115 */ 5116 int 5117 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5118 u8 *src_sel, u32 *freq, u32 *src_freq) 5119 { 5120 struct ice_aqc_get_cgu_output_config *cmd; 5121 struct ice_aq_desc desc; 5122 int ret; 5123 5124 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5125 cmd = &desc.params.get_cgu_output_config; 5126 cmd->output_idx = output_idx; 5127 5128 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5129 if (!ret) { 5130 if (flags) 5131 *flags = cmd->flags; 5132 if (src_sel) 5133 *src_sel = cmd->src_sel; 5134 if (freq) 5135 *freq = le32_to_cpu(cmd->freq); 5136 if (src_freq) 5137 *src_freq = le32_to_cpu(cmd->src_freq); 5138 } 5139 5140 return ret; 5141 } 5142 5143 /** 5144 * ice_aq_get_cgu_dpll_status - get dpll status 5145 * @hw: pointer to the HW struct 5146 * @dpll_num: DPLL index 5147 * @ref_state: Reference clock state 5148 * @config: current DPLL config 5149 * @dpll_state: current DPLL state 5150 * @phase_offset: Phase offset in ns 5151 * @eec_mode: EEC_mode 5152 * 5153 * Get CGU DPLL status (0x0C66) 5154 * Return: 0 on success or negative value on failure. 5155 */ 5156 int 5157 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5158 u8 *dpll_state, u8 *config, s64 *phase_offset, 5159 u8 *eec_mode) 5160 { 5161 struct ice_aqc_get_cgu_dpll_status *cmd; 5162 struct ice_aq_desc desc; 5163 int status; 5164 5165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5166 cmd = &desc.params.get_cgu_dpll_status; 5167 cmd->dpll_num = dpll_num; 5168 5169 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5170 if (!status) { 5171 *ref_state = cmd->ref_state; 5172 *dpll_state = cmd->dpll_state; 5173 *config = cmd->config; 5174 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5175 *phase_offset <<= 32; 5176 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5177 *phase_offset = sign_extend64(*phase_offset, 47); 5178 *eec_mode = cmd->eec_mode; 5179 } 5180 5181 return status; 5182 } 5183 5184 /** 5185 * ice_aq_set_cgu_dpll_config - set dpll config 5186 * @hw: pointer to the HW struct 5187 * @dpll_num: DPLL index 5188 * @ref_state: Reference clock state 5189 * @config: DPLL config 5190 * @eec_mode: EEC mode 5191 * 5192 * Set CGU DPLL config (0x0C67) 5193 * Return: 0 on success or negative value on failure. 5194 */ 5195 int 5196 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5197 u8 config, u8 eec_mode) 5198 { 5199 struct ice_aqc_set_cgu_dpll_config *cmd; 5200 struct ice_aq_desc desc; 5201 5202 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5203 cmd = &desc.params.set_cgu_dpll_config; 5204 cmd->dpll_num = dpll_num; 5205 cmd->ref_state = ref_state; 5206 cmd->config = config; 5207 cmd->eec_mode = eec_mode; 5208 5209 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5210 } 5211 5212 /** 5213 * ice_aq_set_cgu_ref_prio - set input reference priority 5214 * @hw: pointer to the HW struct 5215 * @dpll_num: DPLL index 5216 * @ref_idx: Reference pin index 5217 * @ref_priority: Reference input priority 5218 * 5219 * Set CGU reference priority (0x0C68) 5220 * Return: 0 on success or negative value on failure. 5221 */ 5222 int 5223 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5224 u8 ref_priority) 5225 { 5226 struct ice_aqc_set_cgu_ref_prio *cmd; 5227 struct ice_aq_desc desc; 5228 5229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5230 cmd = &desc.params.set_cgu_ref_prio; 5231 cmd->dpll_num = dpll_num; 5232 cmd->ref_idx = ref_idx; 5233 cmd->ref_priority = ref_priority; 5234 5235 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5236 } 5237 5238 /** 5239 * ice_aq_get_cgu_ref_prio - get input reference priority 5240 * @hw: pointer to the HW struct 5241 * @dpll_num: DPLL index 5242 * @ref_idx: Reference pin index 5243 * @ref_prio: Reference input priority 5244 * 5245 * Get CGU reference priority (0x0C69) 5246 * Return: 0 on success or negative value on failure. 5247 */ 5248 int 5249 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5250 u8 *ref_prio) 5251 { 5252 struct ice_aqc_get_cgu_ref_prio *cmd; 5253 struct ice_aq_desc desc; 5254 int status; 5255 5256 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5257 cmd = &desc.params.get_cgu_ref_prio; 5258 cmd->dpll_num = dpll_num; 5259 cmd->ref_idx = ref_idx; 5260 5261 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5262 if (!status) 5263 *ref_prio = cmd->ref_priority; 5264 5265 return status; 5266 } 5267 5268 /** 5269 * ice_aq_get_cgu_info - get cgu info 5270 * @hw: pointer to the HW struct 5271 * @cgu_id: CGU ID 5272 * @cgu_cfg_ver: CGU config version 5273 * @cgu_fw_ver: CGU firmware version 5274 * 5275 * Get CGU info (0x0C6A) 5276 * Return: 0 on success or negative value on failure. 5277 */ 5278 int 5279 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5280 u32 *cgu_fw_ver) 5281 { 5282 struct ice_aqc_get_cgu_info *cmd; 5283 struct ice_aq_desc desc; 5284 int status; 5285 5286 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5287 cmd = &desc.params.get_cgu_info; 5288 5289 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5290 if (!status) { 5291 *cgu_id = le32_to_cpu(cmd->cgu_id); 5292 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5293 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5294 } 5295 5296 return status; 5297 } 5298 5299 /** 5300 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5301 * @hw: pointer to the HW struct 5302 * @phy_output: PHY reference clock output pin 5303 * @enable: GPIO state to be applied 5304 * @freq: PHY output frequency 5305 * 5306 * Set phy recovered clock as reference (0x0630) 5307 * Return: 0 on success or negative value on failure. 5308 */ 5309 int 5310 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5311 u32 *freq) 5312 { 5313 struct ice_aqc_set_phy_rec_clk_out *cmd; 5314 struct ice_aq_desc desc; 5315 int status; 5316 5317 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5318 cmd = &desc.params.set_phy_rec_clk_out; 5319 cmd->phy_output = phy_output; 5320 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5321 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5322 cmd->freq = cpu_to_le32(*freq); 5323 5324 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5325 if (!status) 5326 *freq = le32_to_cpu(cmd->freq); 5327 5328 return status; 5329 } 5330 5331 /** 5332 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5333 * @hw: pointer to the HW struct 5334 * @phy_output: PHY reference clock output pin 5335 * @port_num: Port number 5336 * @flags: PHY flags 5337 * @node_handle: PHY output frequency 5338 * 5339 * Get PHY recovered clock output info (0x0631) 5340 * Return: 0 on success or negative value on failure. 5341 */ 5342 int 5343 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5344 u8 *flags, u16 *node_handle) 5345 { 5346 struct ice_aqc_get_phy_rec_clk_out *cmd; 5347 struct ice_aq_desc desc; 5348 int status; 5349 5350 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5351 cmd = &desc.params.get_phy_rec_clk_out; 5352 cmd->phy_output = *phy_output; 5353 5354 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5355 if (!status) { 5356 *phy_output = cmd->phy_output; 5357 if (port_num) 5358 *port_num = cmd->port_num; 5359 if (flags) 5360 *flags = cmd->flags; 5361 if (node_handle) 5362 *node_handle = le16_to_cpu(cmd->node_handle); 5363 } 5364 5365 return status; 5366 } 5367 5368 /** 5369 * ice_aq_get_sensor_reading 5370 * @hw: pointer to the HW struct 5371 * @data: pointer to data to be read from the sensor 5372 * 5373 * Get sensor reading (0x0632) 5374 */ 5375 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5376 struct ice_aqc_get_sensor_reading_resp *data) 5377 { 5378 struct ice_aqc_get_sensor_reading *cmd; 5379 struct ice_aq_desc desc; 5380 int status; 5381 5382 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5383 cmd = &desc.params.get_sensor_reading; 5384 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5385 #define ICE_INTERNAL_TEMP_SENSOR 0 5386 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5387 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5388 5389 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5390 if (!status) 5391 memcpy(data, &desc.params.get_sensor_reading_resp, 5392 sizeof(*data)); 5393 5394 return status; 5395 } 5396 5397 /** 5398 * ice_replay_pre_init - replay pre initialization 5399 * @hw: pointer to the HW struct 5400 * 5401 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5402 */ 5403 static int ice_replay_pre_init(struct ice_hw *hw) 5404 { 5405 struct ice_switch_info *sw = hw->switch_info; 5406 u8 i; 5407 5408 /* Delete old entries from replay filter list head if there is any */ 5409 ice_rm_all_sw_replay_rule_info(hw); 5410 /* In start of replay, move entries into replay_rules list, it 5411 * will allow adding rules entries back to filt_rules list, 5412 * which is operational list. 5413 */ 5414 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5415 list_replace_init(&sw->recp_list[i].filt_rules, 5416 &sw->recp_list[i].filt_replay_rules); 5417 ice_sched_replay_agg_vsi_preinit(hw); 5418 5419 return 0; 5420 } 5421 5422 /** 5423 * ice_replay_vsi - replay VSI configuration 5424 * @hw: pointer to the HW struct 5425 * @vsi_handle: driver VSI handle 5426 * 5427 * Restore all VSI configuration after reset. It is required to call this 5428 * function with main VSI first. 5429 */ 5430 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5431 { 5432 int status; 5433 5434 if (!ice_is_vsi_valid(hw, vsi_handle)) 5435 return -EINVAL; 5436 5437 /* Replay pre-initialization if there is any */ 5438 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5439 status = ice_replay_pre_init(hw); 5440 if (status) 5441 return status; 5442 } 5443 /* Replay per VSI all RSS configurations */ 5444 status = ice_replay_rss_cfg(hw, vsi_handle); 5445 if (status) 5446 return status; 5447 /* Replay per VSI all filters */ 5448 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5449 if (!status) 5450 status = ice_replay_vsi_agg(hw, vsi_handle); 5451 return status; 5452 } 5453 5454 /** 5455 * ice_replay_post - post replay configuration cleanup 5456 * @hw: pointer to the HW struct 5457 * 5458 * Post replay cleanup. 5459 */ 5460 void ice_replay_post(struct ice_hw *hw) 5461 { 5462 /* Delete old entries from replay filter list head */ 5463 ice_rm_all_sw_replay_rule_info(hw); 5464 ice_sched_replay_agg(hw); 5465 } 5466 5467 /** 5468 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5469 * @hw: ptr to the hardware info 5470 * @reg: offset of 64 bit HW register to read from 5471 * @prev_stat_loaded: bool to specify if previous stats are loaded 5472 * @prev_stat: ptr to previous loaded stat value 5473 * @cur_stat: ptr to current stat value 5474 */ 5475 void 5476 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5477 u64 *prev_stat, u64 *cur_stat) 5478 { 5479 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5480 5481 /* device stats are not reset at PFR, they likely will not be zeroed 5482 * when the driver starts. Thus, save the value from the first read 5483 * without adding to the statistic value so that we report stats which 5484 * count up from zero. 5485 */ 5486 if (!prev_stat_loaded) { 5487 *prev_stat = new_data; 5488 return; 5489 } 5490 5491 /* Calculate the difference between the new and old values, and then 5492 * add it to the software stat value. 5493 */ 5494 if (new_data >= *prev_stat) 5495 *cur_stat += new_data - *prev_stat; 5496 else 5497 /* to manage the potential roll-over */ 5498 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5499 5500 /* Update the previously stored value to prepare for next read */ 5501 *prev_stat = new_data; 5502 } 5503 5504 /** 5505 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5506 * @hw: ptr to the hardware info 5507 * @reg: offset of HW register to read from 5508 * @prev_stat_loaded: bool to specify if previous stats are loaded 5509 * @prev_stat: ptr to previous loaded stat value 5510 * @cur_stat: ptr to current stat value 5511 */ 5512 void 5513 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5514 u64 *prev_stat, u64 *cur_stat) 5515 { 5516 u32 new_data; 5517 5518 new_data = rd32(hw, reg); 5519 5520 /* device stats are not reset at PFR, they likely will not be zeroed 5521 * when the driver starts. Thus, save the value from the first read 5522 * without adding to the statistic value so that we report stats which 5523 * count up from zero. 5524 */ 5525 if (!prev_stat_loaded) { 5526 *prev_stat = new_data; 5527 return; 5528 } 5529 5530 /* Calculate the difference between the new and old values, and then 5531 * add it to the software stat value. 5532 */ 5533 if (new_data >= *prev_stat) 5534 *cur_stat += new_data - *prev_stat; 5535 else 5536 /* to manage the potential roll-over */ 5537 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5538 5539 /* Update the previously stored value to prepare for next read */ 5540 *prev_stat = new_data; 5541 } 5542 5543 /** 5544 * ice_sched_query_elem - query element information from HW 5545 * @hw: pointer to the HW struct 5546 * @node_teid: node TEID to be queried 5547 * @buf: buffer to element information 5548 * 5549 * This function queries HW element information 5550 */ 5551 int 5552 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5553 struct ice_aqc_txsched_elem_data *buf) 5554 { 5555 u16 buf_size, num_elem_ret = 0; 5556 int status; 5557 5558 buf_size = sizeof(*buf); 5559 memset(buf, 0, buf_size); 5560 buf->node_teid = cpu_to_le32(node_teid); 5561 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5562 NULL); 5563 if (status || num_elem_ret != 1) 5564 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5565 return status; 5566 } 5567 5568 /** 5569 * ice_aq_read_i2c 5570 * @hw: pointer to the hw struct 5571 * @topo_addr: topology address for a device to communicate with 5572 * @bus_addr: 7-bit I2C bus address 5573 * @addr: I2C memory address (I2C offset) with up to 16 bits 5574 * @params: I2C parameters: bit [7] - Repeated start, 5575 * bits [6:5] data offset size, 5576 * bit [4] - I2C address type, 5577 * bits [3:0] - data size to read (0-16 bytes) 5578 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5579 * @cd: pointer to command details structure or NULL 5580 * 5581 * Read I2C (0x06E2) 5582 */ 5583 int 5584 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5585 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5586 struct ice_sq_cd *cd) 5587 { 5588 struct ice_aq_desc desc = { 0 }; 5589 struct ice_aqc_i2c *cmd; 5590 u8 data_size; 5591 int status; 5592 5593 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5594 cmd = &desc.params.read_write_i2c; 5595 5596 if (!data) 5597 return -EINVAL; 5598 5599 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5600 5601 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5602 cmd->topo_addr = topo_addr; 5603 cmd->i2c_params = params; 5604 cmd->i2c_addr = addr; 5605 5606 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5607 if (!status) { 5608 struct ice_aqc_read_i2c_resp *resp; 5609 u8 i; 5610 5611 resp = &desc.params.read_i2c_resp; 5612 for (i = 0; i < data_size; i++) { 5613 *data = resp->i2c_data[i]; 5614 data++; 5615 } 5616 } 5617 5618 return status; 5619 } 5620 5621 /** 5622 * ice_aq_write_i2c 5623 * @hw: pointer to the hw struct 5624 * @topo_addr: topology address for a device to communicate with 5625 * @bus_addr: 7-bit I2C bus address 5626 * @addr: I2C memory address (I2C offset) with up to 16 bits 5627 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5628 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5629 * @cd: pointer to command details structure or NULL 5630 * 5631 * Write I2C (0x06E3) 5632 * 5633 * * Return: 5634 * * 0 - Successful write to the i2c device 5635 * * -EINVAL - Data size greater than 4 bytes 5636 * * -EIO - FW error 5637 */ 5638 int 5639 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5640 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5641 struct ice_sq_cd *cd) 5642 { 5643 struct ice_aq_desc desc = { 0 }; 5644 struct ice_aqc_i2c *cmd; 5645 u8 data_size; 5646 5647 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5648 cmd = &desc.params.read_write_i2c; 5649 5650 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5651 5652 /* data_size limited to 4 */ 5653 if (data_size > 4) 5654 return -EINVAL; 5655 5656 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5657 cmd->topo_addr = topo_addr; 5658 cmd->i2c_params = params; 5659 cmd->i2c_addr = addr; 5660 5661 memcpy(cmd->i2c_data, data, data_size); 5662 5663 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5664 } 5665 5666 /** 5667 * ice_aq_set_gpio 5668 * @hw: pointer to the hw struct 5669 * @gpio_ctrl_handle: GPIO controller node handle 5670 * @pin_idx: IO Number of the GPIO that needs to be set 5671 * @value: SW provide IO value to set in the LSB 5672 * @cd: pointer to command details structure or NULL 5673 * 5674 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5675 */ 5676 int 5677 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5678 struct ice_sq_cd *cd) 5679 { 5680 struct ice_aqc_gpio *cmd; 5681 struct ice_aq_desc desc; 5682 5683 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5684 cmd = &desc.params.read_write_gpio; 5685 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5686 cmd->gpio_num = pin_idx; 5687 cmd->gpio_val = value ? 1 : 0; 5688 5689 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5690 } 5691 5692 /** 5693 * ice_aq_get_gpio 5694 * @hw: pointer to the hw struct 5695 * @gpio_ctrl_handle: GPIO controller node handle 5696 * @pin_idx: IO Number of the GPIO that needs to be set 5697 * @value: IO value read 5698 * @cd: pointer to command details structure or NULL 5699 * 5700 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5701 * the topology 5702 */ 5703 int 5704 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5705 bool *value, struct ice_sq_cd *cd) 5706 { 5707 struct ice_aqc_gpio *cmd; 5708 struct ice_aq_desc desc; 5709 int status; 5710 5711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5712 cmd = &desc.params.read_write_gpio; 5713 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5714 cmd->gpio_num = pin_idx; 5715 5716 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5717 if (status) 5718 return status; 5719 5720 *value = !!cmd->gpio_val; 5721 return 0; 5722 } 5723 5724 /** 5725 * ice_is_fw_api_min_ver 5726 * @hw: pointer to the hardware structure 5727 * @maj: major version 5728 * @min: minor version 5729 * @patch: patch version 5730 * 5731 * Checks if the firmware API is minimum version 5732 */ 5733 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5734 { 5735 if (hw->api_maj_ver == maj) { 5736 if (hw->api_min_ver > min) 5737 return true; 5738 if (hw->api_min_ver == min && hw->api_patch >= patch) 5739 return true; 5740 } else if (hw->api_maj_ver > maj) { 5741 return true; 5742 } 5743 5744 return false; 5745 } 5746 5747 /** 5748 * ice_fw_supports_link_override 5749 * @hw: pointer to the hardware structure 5750 * 5751 * Checks if the firmware supports link override 5752 */ 5753 bool ice_fw_supports_link_override(struct ice_hw *hw) 5754 { 5755 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5756 ICE_FW_API_LINK_OVERRIDE_MIN, 5757 ICE_FW_API_LINK_OVERRIDE_PATCH); 5758 } 5759 5760 /** 5761 * ice_get_link_default_override 5762 * @ldo: pointer to the link default override struct 5763 * @pi: pointer to the port info struct 5764 * 5765 * Gets the link default override for a port 5766 */ 5767 int 5768 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5769 struct ice_port_info *pi) 5770 { 5771 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5772 struct ice_hw *hw = pi->hw; 5773 int status; 5774 5775 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5776 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5777 if (status) { 5778 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5779 return status; 5780 } 5781 5782 /* Each port has its own config; calculate for our port */ 5783 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5784 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5785 5786 /* link options first */ 5787 status = ice_read_sr_word(hw, tlv_start, &buf); 5788 if (status) { 5789 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5790 return status; 5791 } 5792 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5793 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5794 ICE_LINK_OVERRIDE_PHY_CFG_S; 5795 5796 /* link PHY config */ 5797 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5798 status = ice_read_sr_word(hw, offset, &buf); 5799 if (status) { 5800 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5801 return status; 5802 } 5803 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5804 5805 /* PHY types low */ 5806 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5807 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5808 status = ice_read_sr_word(hw, (offset + i), &buf); 5809 if (status) { 5810 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5811 return status; 5812 } 5813 /* shift 16 bits at a time to fill 64 bits */ 5814 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5815 } 5816 5817 /* PHY types high */ 5818 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5819 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5820 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5821 status = ice_read_sr_word(hw, (offset + i), &buf); 5822 if (status) { 5823 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5824 return status; 5825 } 5826 /* shift 16 bits at a time to fill 64 bits */ 5827 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5828 } 5829 5830 return status; 5831 } 5832 5833 /** 5834 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5835 * @caps: get PHY capability data 5836 */ 5837 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5838 { 5839 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5840 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5841 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5842 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5843 return true; 5844 5845 return false; 5846 } 5847 5848 /** 5849 * ice_aq_set_lldp_mib - Set the LLDP MIB 5850 * @hw: pointer to the HW struct 5851 * @mib_type: Local, Remote or both Local and Remote MIBs 5852 * @buf: pointer to the caller-supplied buffer to store the MIB block 5853 * @buf_size: size of the buffer (in bytes) 5854 * @cd: pointer to command details structure or NULL 5855 * 5856 * Set the LLDP MIB. (0x0A08) 5857 */ 5858 int 5859 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5860 struct ice_sq_cd *cd) 5861 { 5862 struct ice_aqc_lldp_set_local_mib *cmd; 5863 struct ice_aq_desc desc; 5864 5865 cmd = &desc.params.lldp_set_mib; 5866 5867 if (buf_size == 0 || !buf) 5868 return -EINVAL; 5869 5870 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5871 5872 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5873 desc.datalen = cpu_to_le16(buf_size); 5874 5875 cmd->type = mib_type; 5876 cmd->length = cpu_to_le16(buf_size); 5877 5878 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5879 } 5880 5881 /** 5882 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5883 * @hw: pointer to HW struct 5884 */ 5885 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5886 { 5887 if (hw->mac_type != ICE_MAC_E810) 5888 return false; 5889 5890 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5891 ICE_FW_API_LLDP_FLTR_MIN, 5892 ICE_FW_API_LLDP_FLTR_PATCH); 5893 } 5894 5895 /** 5896 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5897 * @hw: pointer to HW struct 5898 * @vsi_num: absolute HW index for VSI 5899 * @add: boolean for if adding or removing a filter 5900 */ 5901 int 5902 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5903 { 5904 struct ice_aqc_lldp_filter_ctrl *cmd; 5905 struct ice_aq_desc desc; 5906 5907 cmd = &desc.params.lldp_filter_ctrl; 5908 5909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5910 5911 if (add) 5912 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5913 else 5914 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5915 5916 cmd->vsi_num = cpu_to_le16(vsi_num); 5917 5918 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5919 } 5920 5921 /** 5922 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5923 * @hw: pointer to HW struct 5924 */ 5925 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5926 { 5927 struct ice_aq_desc desc; 5928 5929 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5930 5931 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5932 } 5933 5934 /** 5935 * ice_fw_supports_report_dflt_cfg 5936 * @hw: pointer to the hardware structure 5937 * 5938 * Checks if the firmware supports report default configuration 5939 */ 5940 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5941 { 5942 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5943 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5944 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5945 } 5946 5947 /* each of the indexes into the following array match the speed of a return 5948 * value from the list of AQ returned speeds like the range: 5949 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5950 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5951 * array. The array is defined as 15 elements long because the link_speed 5952 * returned by the firmware is a 16 bit * value, but is indexed 5953 * by [fls(speed) - 1] 5954 */ 5955 static const u32 ice_aq_to_link_speed[] = { 5956 SPEED_10, /* BIT(0) */ 5957 SPEED_100, 5958 SPEED_1000, 5959 SPEED_2500, 5960 SPEED_5000, 5961 SPEED_10000, 5962 SPEED_20000, 5963 SPEED_25000, 5964 SPEED_40000, 5965 SPEED_50000, 5966 SPEED_100000, /* BIT(10) */ 5967 SPEED_200000, 5968 }; 5969 5970 /** 5971 * ice_get_link_speed - get integer speed from table 5972 * @index: array index from fls(aq speed) - 1 5973 * 5974 * Returns: u32 value containing integer speed 5975 */ 5976 u32 ice_get_link_speed(u16 index) 5977 { 5978 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5979 return 0; 5980 5981 return ice_aq_to_link_speed[index]; 5982 } 5983