1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830_BACKPLANE: 164 case ICE_DEV_ID_E830_QSFP56: 165 case ICE_DEV_ID_E830_SFP: 166 case ICE_DEV_ID_E830_SFP_DD: 167 hw->mac_type = ICE_MAC_E830; 168 break; 169 default: 170 hw->mac_type = ICE_MAC_UNKNOWN; 171 break; 172 } 173 174 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 175 return 0; 176 } 177 178 /** 179 * ice_is_generic_mac - check if device's mac_type is generic 180 * @hw: pointer to the hardware structure 181 * 182 * Return: true if mac_type is generic (with SBQ support), false if not 183 */ 184 bool ice_is_generic_mac(struct ice_hw *hw) 185 { 186 return (hw->mac_type == ICE_MAC_GENERIC || 187 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 188 } 189 190 /** 191 * ice_is_e810 192 * @hw: pointer to the hardware structure 193 * 194 * returns true if the device is E810 based, false if not. 195 */ 196 bool ice_is_e810(struct ice_hw *hw) 197 { 198 return hw->mac_type == ICE_MAC_E810; 199 } 200 201 /** 202 * ice_is_e810t 203 * @hw: pointer to the hardware structure 204 * 205 * returns true if the device is E810T based, false if not. 206 */ 207 bool ice_is_e810t(struct ice_hw *hw) 208 { 209 switch (hw->device_id) { 210 case ICE_DEV_ID_E810C_SFP: 211 switch (hw->subsystem_device_id) { 212 case ICE_SUBDEV_ID_E810T: 213 case ICE_SUBDEV_ID_E810T2: 214 case ICE_SUBDEV_ID_E810T3: 215 case ICE_SUBDEV_ID_E810T4: 216 case ICE_SUBDEV_ID_E810T6: 217 case ICE_SUBDEV_ID_E810T7: 218 return true; 219 } 220 break; 221 case ICE_DEV_ID_E810C_QSFP: 222 switch (hw->subsystem_device_id) { 223 case ICE_SUBDEV_ID_E810T2: 224 case ICE_SUBDEV_ID_E810T3: 225 case ICE_SUBDEV_ID_E810T5: 226 return true; 227 } 228 break; 229 default: 230 break; 231 } 232 233 return false; 234 } 235 236 /** 237 * ice_is_e823 238 * @hw: pointer to the hardware structure 239 * 240 * returns true if the device is E823-L or E823-C based, false if not. 241 */ 242 bool ice_is_e823(struct ice_hw *hw) 243 { 244 switch (hw->device_id) { 245 case ICE_DEV_ID_E823L_BACKPLANE: 246 case ICE_DEV_ID_E823L_SFP: 247 case ICE_DEV_ID_E823L_10G_BASE_T: 248 case ICE_DEV_ID_E823L_1GBE: 249 case ICE_DEV_ID_E823L_QSFP: 250 case ICE_DEV_ID_E823C_BACKPLANE: 251 case ICE_DEV_ID_E823C_QSFP: 252 case ICE_DEV_ID_E823C_SFP: 253 case ICE_DEV_ID_E823C_10G_BASE_T: 254 case ICE_DEV_ID_E823C_SGMII: 255 return true; 256 default: 257 return false; 258 } 259 } 260 261 /** 262 * ice_is_e825c - Check if a device is E825C family device 263 * @hw: pointer to the hardware structure 264 * 265 * Return: true if the device is E825-C based, false if not. 266 */ 267 bool ice_is_e825c(struct ice_hw *hw) 268 { 269 switch (hw->device_id) { 270 case ICE_DEV_ID_E825C_BACKPLANE: 271 case ICE_DEV_ID_E825C_QSFP: 272 case ICE_DEV_ID_E825C_SFP: 273 case ICE_DEV_ID_E825C_SGMII: 274 return true; 275 default: 276 return false; 277 } 278 } 279 280 /** 281 * ice_clear_pf_cfg - Clear PF configuration 282 * @hw: pointer to the hardware structure 283 * 284 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 285 * configuration, flow director filters, etc.). 286 */ 287 int ice_clear_pf_cfg(struct ice_hw *hw) 288 { 289 struct ice_aq_desc desc; 290 291 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 292 293 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 294 } 295 296 /** 297 * ice_aq_manage_mac_read - manage MAC address read command 298 * @hw: pointer to the HW struct 299 * @buf: a virtual buffer to hold the manage MAC read response 300 * @buf_size: Size of the virtual buffer 301 * @cd: pointer to command details structure or NULL 302 * 303 * This function is used to return per PF station MAC address (0x0107). 304 * NOTE: Upon successful completion of this command, MAC address information 305 * is returned in user specified buffer. Please interpret user specified 306 * buffer as "manage_mac_read" response. 307 * Response such as various MAC addresses are stored in HW struct (port.mac) 308 * ice_discover_dev_caps is expected to be called before this function is 309 * called. 310 */ 311 static int 312 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 313 struct ice_sq_cd *cd) 314 { 315 struct ice_aqc_manage_mac_read_resp *resp; 316 struct ice_aqc_manage_mac_read *cmd; 317 struct ice_aq_desc desc; 318 int status; 319 u16 flags; 320 u8 i; 321 322 cmd = &desc.params.mac_read; 323 324 if (buf_size < sizeof(*resp)) 325 return -EINVAL; 326 327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 328 329 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 330 if (status) 331 return status; 332 333 resp = buf; 334 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 335 336 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 337 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 338 return -EIO; 339 } 340 341 /* A single port can report up to two (LAN and WoL) addresses */ 342 for (i = 0; i < cmd->num_addr; i++) 343 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 344 ether_addr_copy(hw->port_info->mac.lan_addr, 345 resp[i].mac_addr); 346 ether_addr_copy(hw->port_info->mac.perm_addr, 347 resp[i].mac_addr); 348 break; 349 } 350 351 return 0; 352 } 353 354 /** 355 * ice_aq_get_phy_caps - returns PHY capabilities 356 * @pi: port information structure 357 * @qual_mods: report qualified modules 358 * @report_mode: report mode capabilities 359 * @pcaps: structure for PHY capabilities to be filled 360 * @cd: pointer to command details structure or NULL 361 * 362 * Returns the various PHY capabilities supported on the Port (0x0600) 363 */ 364 int 365 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 366 struct ice_aqc_get_phy_caps_data *pcaps, 367 struct ice_sq_cd *cd) 368 { 369 struct ice_aqc_get_phy_caps *cmd; 370 u16 pcaps_size = sizeof(*pcaps); 371 struct ice_aq_desc desc; 372 const char *prefix; 373 struct ice_hw *hw; 374 int status; 375 376 cmd = &desc.params.get_phy; 377 378 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 379 return -EINVAL; 380 hw = pi->hw; 381 382 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 383 !ice_fw_supports_report_dflt_cfg(hw)) 384 return -EINVAL; 385 386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 387 388 if (qual_mods) 389 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 390 391 cmd->param0 |= cpu_to_le16(report_mode); 392 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 393 394 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 395 396 switch (report_mode) { 397 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 398 prefix = "phy_caps_media"; 399 break; 400 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 401 prefix = "phy_caps_no_media"; 402 break; 403 case ICE_AQC_REPORT_ACTIVE_CFG: 404 prefix = "phy_caps_active"; 405 break; 406 case ICE_AQC_REPORT_DFLT_CFG: 407 prefix = "phy_caps_default"; 408 break; 409 default: 410 prefix = "phy_caps_invalid"; 411 } 412 413 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 414 le64_to_cpu(pcaps->phy_type_high), prefix); 415 416 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 417 prefix, report_mode); 418 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 419 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 420 pcaps->low_power_ctrl_an); 421 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 422 pcaps->eee_cap); 423 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 424 pcaps->eeer_value); 425 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 426 pcaps->link_fec_options); 427 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 428 prefix, pcaps->module_compliance_enforcement); 429 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 430 prefix, pcaps->extended_compliance_code); 431 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 432 pcaps->module_type[0]); 433 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 434 pcaps->module_type[1]); 435 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 436 pcaps->module_type[2]); 437 438 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 439 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 440 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 441 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 442 sizeof(pi->phy.link_info.module_type)); 443 } 444 445 return status; 446 } 447 448 /** 449 * ice_aq_get_link_topo_handle - get link topology node return status 450 * @pi: port information structure 451 * @node_type: requested node type 452 * @cd: pointer to command details structure or NULL 453 * 454 * Get link topology node return status for specified node type (0x06E0) 455 * 456 * Node type cage can be used to determine if cage is present. If AQC 457 * returns error (ENOENT), then no cage present. If no cage present, then 458 * connection type is backplane or BASE-T. 459 */ 460 static int 461 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 462 struct ice_sq_cd *cd) 463 { 464 struct ice_aqc_get_link_topo *cmd; 465 struct ice_aq_desc desc; 466 467 cmd = &desc.params.get_link_topo; 468 469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 470 471 cmd->addr.topo_params.node_type_ctx = 472 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 473 ICE_AQC_LINK_TOPO_NODE_CTX_S); 474 475 /* set node type */ 476 cmd->addr.topo_params.node_type_ctx |= 477 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 478 479 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 480 } 481 482 /** 483 * ice_aq_get_netlist_node 484 * @hw: pointer to the hw struct 485 * @cmd: get_link_topo AQ structure 486 * @node_part_number: output node part number if node found 487 * @node_handle: output node handle parameter if node found 488 * 489 * Get netlist node handle. 490 */ 491 int 492 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 493 u8 *node_part_number, u16 *node_handle) 494 { 495 struct ice_aq_desc desc; 496 497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 498 desc.params.get_link_topo = *cmd; 499 500 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 501 return -EINTR; 502 503 if (node_handle) 504 *node_handle = 505 le16_to_cpu(desc.params.get_link_topo.addr.handle); 506 if (node_part_number) 507 *node_part_number = desc.params.get_link_topo.node_part_num; 508 509 return 0; 510 } 511 512 /** 513 * ice_find_netlist_node 514 * @hw: pointer to the hw struct 515 * @node_type_ctx: type of netlist node to look for 516 * @node_part_number: node part number to look for 517 * @node_handle: output parameter if node found - optional 518 * 519 * Scan the netlist for a node handle of the given node type and part number. 520 * 521 * If node_handle is non-NULL it will be modified on function exit. It is only 522 * valid if the function returns zero, and should be ignored on any non-zero 523 * return value. 524 * 525 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 526 * a negative error code on failure to access the AQ. 527 */ 528 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 529 u8 node_part_number, u16 *node_handle) 530 { 531 u8 idx; 532 533 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 534 struct ice_aqc_get_link_topo cmd = {}; 535 u8 rec_node_part_number; 536 int status; 537 538 cmd.addr.topo_params.node_type_ctx = 539 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 540 node_type_ctx); 541 cmd.addr.topo_params.index = idx; 542 543 status = ice_aq_get_netlist_node(hw, &cmd, 544 &rec_node_part_number, 545 node_handle); 546 if (status) 547 return status; 548 549 if (rec_node_part_number == node_part_number) 550 return 0; 551 } 552 553 return -ENOENT; 554 } 555 556 /** 557 * ice_is_media_cage_present 558 * @pi: port information structure 559 * 560 * Returns true if media cage is present, else false. If no cage, then 561 * media type is backplane or BASE-T. 562 */ 563 static bool ice_is_media_cage_present(struct ice_port_info *pi) 564 { 565 /* Node type cage can be used to determine if cage is present. If AQC 566 * returns error (ENOENT), then no cage present. If no cage present then 567 * connection type is backplane or BASE-T. 568 */ 569 return !ice_aq_get_link_topo_handle(pi, 570 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 571 NULL); 572 } 573 574 /** 575 * ice_get_media_type - Gets media type 576 * @pi: port information structure 577 */ 578 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 579 { 580 struct ice_link_status *hw_link_info; 581 582 if (!pi) 583 return ICE_MEDIA_UNKNOWN; 584 585 hw_link_info = &pi->phy.link_info; 586 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 587 /* If more than one media type is selected, report unknown */ 588 return ICE_MEDIA_UNKNOWN; 589 590 if (hw_link_info->phy_type_low) { 591 /* 1G SGMII is a special case where some DA cable PHYs 592 * may show this as an option when it really shouldn't 593 * be since SGMII is meant to be between a MAC and a PHY 594 * in a backplane. Try to detect this case and handle it 595 */ 596 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 597 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 598 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 599 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 600 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 601 return ICE_MEDIA_DA; 602 603 switch (hw_link_info->phy_type_low) { 604 case ICE_PHY_TYPE_LOW_1000BASE_SX: 605 case ICE_PHY_TYPE_LOW_1000BASE_LX: 606 case ICE_PHY_TYPE_LOW_10GBASE_SR: 607 case ICE_PHY_TYPE_LOW_10GBASE_LR: 608 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 609 case ICE_PHY_TYPE_LOW_25GBASE_SR: 610 case ICE_PHY_TYPE_LOW_25GBASE_LR: 611 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 612 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 613 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 614 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 615 case ICE_PHY_TYPE_LOW_50GBASE_SR: 616 case ICE_PHY_TYPE_LOW_50GBASE_FR: 617 case ICE_PHY_TYPE_LOW_50GBASE_LR: 618 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 619 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 620 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 621 case ICE_PHY_TYPE_LOW_100GBASE_DR: 622 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 623 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 624 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 625 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 626 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 627 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 628 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 629 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 630 return ICE_MEDIA_FIBER; 631 case ICE_PHY_TYPE_LOW_100BASE_TX: 632 case ICE_PHY_TYPE_LOW_1000BASE_T: 633 case ICE_PHY_TYPE_LOW_2500BASE_T: 634 case ICE_PHY_TYPE_LOW_5GBASE_T: 635 case ICE_PHY_TYPE_LOW_10GBASE_T: 636 case ICE_PHY_TYPE_LOW_25GBASE_T: 637 return ICE_MEDIA_BASET; 638 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 639 case ICE_PHY_TYPE_LOW_25GBASE_CR: 640 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 641 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 642 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 644 case ICE_PHY_TYPE_LOW_50GBASE_CP: 645 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 646 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 647 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 648 return ICE_MEDIA_DA; 649 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 650 case ICE_PHY_TYPE_LOW_40G_XLAUI: 651 case ICE_PHY_TYPE_LOW_50G_LAUI2: 652 case ICE_PHY_TYPE_LOW_50G_AUI2: 653 case ICE_PHY_TYPE_LOW_50G_AUI1: 654 case ICE_PHY_TYPE_LOW_100G_AUI4: 655 case ICE_PHY_TYPE_LOW_100G_CAUI4: 656 if (ice_is_media_cage_present(pi)) 657 return ICE_MEDIA_DA; 658 fallthrough; 659 case ICE_PHY_TYPE_LOW_1000BASE_KX: 660 case ICE_PHY_TYPE_LOW_2500BASE_KX: 661 case ICE_PHY_TYPE_LOW_2500BASE_X: 662 case ICE_PHY_TYPE_LOW_5GBASE_KR: 663 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 664 case ICE_PHY_TYPE_LOW_25GBASE_KR: 665 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 666 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 667 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 668 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 669 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 670 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 671 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 672 return ICE_MEDIA_BACKPLANE; 673 } 674 } else { 675 switch (hw_link_info->phy_type_high) { 676 case ICE_PHY_TYPE_HIGH_100G_AUI2: 677 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 678 if (ice_is_media_cage_present(pi)) 679 return ICE_MEDIA_DA; 680 fallthrough; 681 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 682 return ICE_MEDIA_BACKPLANE; 683 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 684 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 685 return ICE_MEDIA_FIBER; 686 } 687 } 688 return ICE_MEDIA_UNKNOWN; 689 } 690 691 /** 692 * ice_get_link_status_datalen 693 * @hw: pointer to the HW struct 694 * 695 * Returns datalength for the Get Link Status AQ command, which is bigger for 696 * newer adapter families handled by ice driver. 697 */ 698 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 699 { 700 switch (hw->mac_type) { 701 case ICE_MAC_E830: 702 return ICE_AQC_LS_DATA_SIZE_V2; 703 case ICE_MAC_E810: 704 default: 705 return ICE_AQC_LS_DATA_SIZE_V1; 706 } 707 } 708 709 /** 710 * ice_aq_get_link_info 711 * @pi: port information structure 712 * @ena_lse: enable/disable LinkStatusEvent reporting 713 * @link: pointer to link status structure - optional 714 * @cd: pointer to command details structure or NULL 715 * 716 * Get Link Status (0x607). Returns the link status of the adapter. 717 */ 718 int 719 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 720 struct ice_link_status *link, struct ice_sq_cd *cd) 721 { 722 struct ice_aqc_get_link_status_data link_data = { 0 }; 723 struct ice_aqc_get_link_status *resp; 724 struct ice_link_status *li_old, *li; 725 enum ice_media_type *hw_media_type; 726 struct ice_fc_info *hw_fc_info; 727 bool tx_pause, rx_pause; 728 struct ice_aq_desc desc; 729 struct ice_hw *hw; 730 u16 cmd_flags; 731 int status; 732 733 if (!pi) 734 return -EINVAL; 735 hw = pi->hw; 736 li_old = &pi->phy.link_info_old; 737 hw_media_type = &pi->phy.media_type; 738 li = &pi->phy.link_info; 739 hw_fc_info = &pi->fc; 740 741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 742 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 743 resp = &desc.params.get_link_status; 744 resp->cmd_flags = cpu_to_le16(cmd_flags); 745 resp->lport_num = pi->lport; 746 747 status = ice_aq_send_cmd(hw, &desc, &link_data, 748 ice_get_link_status_datalen(hw), cd); 749 if (status) 750 return status; 751 752 /* save off old link status information */ 753 *li_old = *li; 754 755 /* update current link status information */ 756 li->link_speed = le16_to_cpu(link_data.link_speed); 757 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 758 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 759 *hw_media_type = ice_get_media_type(pi); 760 li->link_info = link_data.link_info; 761 li->link_cfg_err = link_data.link_cfg_err; 762 li->an_info = link_data.an_info; 763 li->ext_info = link_data.ext_info; 764 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 765 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 766 li->topo_media_conflict = link_data.topo_media_conflict; 767 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 768 ICE_AQ_CFG_PACING_TYPE_M); 769 770 /* update fc info */ 771 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 772 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 773 if (tx_pause && rx_pause) 774 hw_fc_info->current_mode = ICE_FC_FULL; 775 else if (tx_pause) 776 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 777 else if (rx_pause) 778 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 779 else 780 hw_fc_info->current_mode = ICE_FC_NONE; 781 782 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 783 784 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 785 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 786 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 787 (unsigned long long)li->phy_type_low); 788 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 789 (unsigned long long)li->phy_type_high); 790 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 791 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 792 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 793 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 794 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 795 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 796 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 797 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 798 li->max_frame_size); 799 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 800 801 /* save link status information */ 802 if (link) 803 *link = *li; 804 805 /* flag cleared so calling functions don't call AQ again */ 806 pi->phy.get_link_info = false; 807 808 return 0; 809 } 810 811 /** 812 * ice_fill_tx_timer_and_fc_thresh 813 * @hw: pointer to the HW struct 814 * @cmd: pointer to MAC cfg structure 815 * 816 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 817 * descriptor 818 */ 819 static void 820 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 821 struct ice_aqc_set_mac_cfg *cmd) 822 { 823 u32 val, fc_thres_m; 824 825 /* We read back the transmit timer and FC threshold value of 826 * LFC. Thus, we will use index = 827 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 828 * 829 * Also, because we are operating on transmit timer and FC 830 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 831 */ 832 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 833 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 834 835 if (hw->mac_type == ICE_MAC_E830) { 836 /* Retrieve the transmit timer */ 837 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 838 cmd->tx_tmr_value = 839 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 840 841 /* Retrieve the fc threshold */ 842 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 843 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 844 } else { 845 /* Retrieve the transmit timer */ 846 val = rd32(hw, 847 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 848 cmd->tx_tmr_value = 849 le16_encode_bits(val, 850 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 851 852 /* Retrieve the fc threshold */ 853 val = rd32(hw, 854 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 855 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 856 } 857 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 858 } 859 860 /** 861 * ice_aq_set_mac_cfg 862 * @hw: pointer to the HW struct 863 * @max_frame_size: Maximum Frame Size to be supported 864 * @cd: pointer to command details structure or NULL 865 * 866 * Set MAC configuration (0x0603) 867 */ 868 int 869 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 870 { 871 struct ice_aqc_set_mac_cfg *cmd; 872 struct ice_aq_desc desc; 873 874 cmd = &desc.params.set_mac_cfg; 875 876 if (max_frame_size == 0) 877 return -EINVAL; 878 879 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 880 881 cmd->max_frame_size = cpu_to_le16(max_frame_size); 882 883 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 884 885 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 886 } 887 888 /** 889 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 890 * @hw: pointer to the HW struct 891 */ 892 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 893 { 894 struct ice_switch_info *sw; 895 int status; 896 897 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 898 sizeof(*hw->switch_info), GFP_KERNEL); 899 sw = hw->switch_info; 900 901 if (!sw) 902 return -ENOMEM; 903 904 INIT_LIST_HEAD(&sw->vsi_list_map_head); 905 sw->prof_res_bm_init = 0; 906 907 status = ice_init_def_sw_recp(hw); 908 if (status) { 909 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 910 return status; 911 } 912 return 0; 913 } 914 915 /** 916 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 917 * @hw: pointer to the HW struct 918 */ 919 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 920 { 921 struct ice_switch_info *sw = hw->switch_info; 922 struct ice_vsi_list_map_info *v_pos_map; 923 struct ice_vsi_list_map_info *v_tmp_map; 924 struct ice_sw_recipe *recps; 925 u8 i; 926 927 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 928 list_entry) { 929 list_del(&v_pos_map->list_entry); 930 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 931 } 932 recps = sw->recp_list; 933 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 934 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 935 936 recps[i].root_rid = i; 937 list_for_each_entry_safe(rg_entry, tmprg_entry, 938 &recps[i].rg_list, l_entry) { 939 list_del(&rg_entry->l_entry); 940 devm_kfree(ice_hw_to_dev(hw), rg_entry); 941 } 942 943 if (recps[i].adv_rule) { 944 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 945 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 946 947 mutex_destroy(&recps[i].filt_rule_lock); 948 list_for_each_entry_safe(lst_itr, tmp_entry, 949 &recps[i].filt_rules, 950 list_entry) { 951 list_del(&lst_itr->list_entry); 952 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 953 devm_kfree(ice_hw_to_dev(hw), lst_itr); 954 } 955 } else { 956 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 957 958 mutex_destroy(&recps[i].filt_rule_lock); 959 list_for_each_entry_safe(lst_itr, tmp_entry, 960 &recps[i].filt_rules, 961 list_entry) { 962 list_del(&lst_itr->list_entry); 963 devm_kfree(ice_hw_to_dev(hw), lst_itr); 964 } 965 } 966 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 967 } 968 ice_rm_all_sw_replay_rule_info(hw); 969 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 970 devm_kfree(ice_hw_to_dev(hw), sw); 971 } 972 973 /** 974 * ice_get_itr_intrl_gran 975 * @hw: pointer to the HW struct 976 * 977 * Determines the ITR/INTRL granularities based on the maximum aggregate 978 * bandwidth according to the device's configuration during power-on. 979 */ 980 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 981 { 982 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 983 rd32(hw, GL_PWR_MODE_CTL)); 984 985 switch (max_agg_bw) { 986 case ICE_MAX_AGG_BW_200G: 987 case ICE_MAX_AGG_BW_100G: 988 case ICE_MAX_AGG_BW_50G: 989 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 990 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 991 break; 992 case ICE_MAX_AGG_BW_25G: 993 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 994 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 995 break; 996 } 997 } 998 999 /** 1000 * ice_init_hw - main hardware initialization routine 1001 * @hw: pointer to the hardware structure 1002 */ 1003 int ice_init_hw(struct ice_hw *hw) 1004 { 1005 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 1006 void *mac_buf __free(kfree); 1007 u16 mac_buf_len; 1008 int status; 1009 1010 /* Set MAC type based on DeviceID */ 1011 status = ice_set_mac_type(hw); 1012 if (status) 1013 return status; 1014 1015 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1016 1017 status = ice_reset(hw, ICE_RESET_PFR); 1018 if (status) 1019 return status; 1020 1021 ice_get_itr_intrl_gran(hw); 1022 1023 status = ice_create_all_ctrlq(hw); 1024 if (status) 1025 goto err_unroll_cqinit; 1026 1027 status = ice_fwlog_init(hw); 1028 if (status) 1029 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1030 status); 1031 1032 status = ice_clear_pf_cfg(hw); 1033 if (status) 1034 goto err_unroll_cqinit; 1035 1036 /* Set bit to enable Flow Director filters */ 1037 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1038 INIT_LIST_HEAD(&hw->fdir_list_head); 1039 1040 ice_clear_pxe_mode(hw); 1041 1042 status = ice_init_nvm(hw); 1043 if (status) 1044 goto err_unroll_cqinit; 1045 1046 status = ice_get_caps(hw); 1047 if (status) 1048 goto err_unroll_cqinit; 1049 1050 if (!hw->port_info) 1051 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1052 sizeof(*hw->port_info), 1053 GFP_KERNEL); 1054 if (!hw->port_info) { 1055 status = -ENOMEM; 1056 goto err_unroll_cqinit; 1057 } 1058 1059 /* set the back pointer to HW */ 1060 hw->port_info->hw = hw; 1061 1062 /* Initialize port_info struct with switch configuration data */ 1063 status = ice_get_initial_sw_cfg(hw); 1064 if (status) 1065 goto err_unroll_alloc; 1066 1067 hw->evb_veb = true; 1068 1069 /* init xarray for identifying scheduling nodes uniquely */ 1070 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1071 1072 /* Query the allocated resources for Tx scheduler */ 1073 status = ice_sched_query_res_alloc(hw); 1074 if (status) { 1075 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1076 goto err_unroll_alloc; 1077 } 1078 ice_sched_get_psm_clk_freq(hw); 1079 1080 /* Initialize port_info struct with scheduler data */ 1081 status = ice_sched_init_port(hw->port_info); 1082 if (status) 1083 goto err_unroll_sched; 1084 1085 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1086 if (!pcaps) { 1087 status = -ENOMEM; 1088 goto err_unroll_sched; 1089 } 1090 1091 /* Initialize port_info struct with PHY capabilities */ 1092 status = ice_aq_get_phy_caps(hw->port_info, false, 1093 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1094 NULL); 1095 if (status) 1096 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1097 status); 1098 1099 /* Initialize port_info struct with link information */ 1100 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1101 if (status) 1102 goto err_unroll_sched; 1103 1104 /* need a valid SW entry point to build a Tx tree */ 1105 if (!hw->sw_entry_point_layer) { 1106 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1107 status = -EIO; 1108 goto err_unroll_sched; 1109 } 1110 INIT_LIST_HEAD(&hw->agg_list); 1111 /* Initialize max burst size */ 1112 if (!hw->max_burst_size) 1113 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1114 1115 status = ice_init_fltr_mgmt_struct(hw); 1116 if (status) 1117 goto err_unroll_sched; 1118 1119 /* Get MAC information */ 1120 /* A single port can report up to two (LAN and WoL) addresses */ 1121 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1122 GFP_KERNEL); 1123 if (!mac_buf) { 1124 status = -ENOMEM; 1125 goto err_unroll_fltr_mgmt_struct; 1126 } 1127 1128 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1129 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1130 1131 if (status) 1132 goto err_unroll_fltr_mgmt_struct; 1133 /* enable jumbo frame support at MAC level */ 1134 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1135 if (status) 1136 goto err_unroll_fltr_mgmt_struct; 1137 /* Obtain counter base index which would be used by flow director */ 1138 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1139 if (status) 1140 goto err_unroll_fltr_mgmt_struct; 1141 status = ice_init_hw_tbls(hw); 1142 if (status) 1143 goto err_unroll_fltr_mgmt_struct; 1144 mutex_init(&hw->tnl_lock); 1145 ice_init_chk_recipe_reuse_support(hw); 1146 1147 return 0; 1148 1149 err_unroll_fltr_mgmt_struct: 1150 ice_cleanup_fltr_mgmt_struct(hw); 1151 err_unroll_sched: 1152 ice_sched_cleanup_all(hw); 1153 err_unroll_alloc: 1154 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1155 err_unroll_cqinit: 1156 ice_destroy_all_ctrlq(hw); 1157 return status; 1158 } 1159 1160 /** 1161 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1162 * @hw: pointer to the hardware structure 1163 * 1164 * This should be called only during nominal operation, not as a result of 1165 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1166 * applicable initializations if it fails for any reason. 1167 */ 1168 void ice_deinit_hw(struct ice_hw *hw) 1169 { 1170 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1171 ice_cleanup_fltr_mgmt_struct(hw); 1172 1173 ice_sched_cleanup_all(hw); 1174 ice_sched_clear_agg(hw); 1175 ice_free_seg(hw); 1176 ice_free_hw_tbls(hw); 1177 mutex_destroy(&hw->tnl_lock); 1178 1179 ice_fwlog_deinit(hw); 1180 ice_destroy_all_ctrlq(hw); 1181 1182 /* Clear VSI contexts if not already cleared */ 1183 ice_clear_all_vsi_ctx(hw); 1184 } 1185 1186 /** 1187 * ice_check_reset - Check to see if a global reset is complete 1188 * @hw: pointer to the hardware structure 1189 */ 1190 int ice_check_reset(struct ice_hw *hw) 1191 { 1192 u32 cnt, reg = 0, grst_timeout, uld_mask; 1193 1194 /* Poll for Device Active state in case a recent CORER, GLOBR, 1195 * or EMPR has occurred. The grst delay value is in 100ms units. 1196 * Add 1sec for outstanding AQ commands that can take a long time. 1197 */ 1198 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1199 rd32(hw, GLGEN_RSTCTL)) + 10; 1200 1201 for (cnt = 0; cnt < grst_timeout; cnt++) { 1202 mdelay(100); 1203 reg = rd32(hw, GLGEN_RSTAT); 1204 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1205 break; 1206 } 1207 1208 if (cnt == grst_timeout) { 1209 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1210 return -EIO; 1211 } 1212 1213 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1214 GLNVM_ULD_PCIER_DONE_1_M |\ 1215 GLNVM_ULD_CORER_DONE_M |\ 1216 GLNVM_ULD_GLOBR_DONE_M |\ 1217 GLNVM_ULD_POR_DONE_M |\ 1218 GLNVM_ULD_POR_DONE_1_M |\ 1219 GLNVM_ULD_PCIER_DONE_2_M) 1220 1221 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1222 GLNVM_ULD_PE_DONE_M : 0); 1223 1224 /* Device is Active; check Global Reset processes are done */ 1225 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1226 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1227 if (reg == uld_mask) { 1228 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1229 break; 1230 } 1231 mdelay(10); 1232 } 1233 1234 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1235 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1236 reg); 1237 return -EIO; 1238 } 1239 1240 return 0; 1241 } 1242 1243 /** 1244 * ice_pf_reset - Reset the PF 1245 * @hw: pointer to the hardware structure 1246 * 1247 * If a global reset has been triggered, this function checks 1248 * for its completion and then issues the PF reset 1249 */ 1250 static int ice_pf_reset(struct ice_hw *hw) 1251 { 1252 u32 cnt, reg; 1253 1254 /* If at function entry a global reset was already in progress, i.e. 1255 * state is not 'device active' or any of the reset done bits are not 1256 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1257 * global reset is done. 1258 */ 1259 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1260 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1261 /* poll on global reset currently in progress until done */ 1262 if (ice_check_reset(hw)) 1263 return -EIO; 1264 1265 return 0; 1266 } 1267 1268 /* Reset the PF */ 1269 reg = rd32(hw, PFGEN_CTRL); 1270 1271 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1272 1273 /* Wait for the PFR to complete. The wait time is the global config lock 1274 * timeout plus the PFR timeout which will account for a possible reset 1275 * that is occurring during a download package operation. 1276 */ 1277 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1278 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1279 reg = rd32(hw, PFGEN_CTRL); 1280 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1281 break; 1282 1283 mdelay(1); 1284 } 1285 1286 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1287 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1288 return -EIO; 1289 } 1290 1291 return 0; 1292 } 1293 1294 /** 1295 * ice_reset - Perform different types of reset 1296 * @hw: pointer to the hardware structure 1297 * @req: reset request 1298 * 1299 * This function triggers a reset as specified by the req parameter. 1300 * 1301 * Note: 1302 * If anything other than a PF reset is triggered, PXE mode is restored. 1303 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1304 * interface has been restored in the rebuild flow. 1305 */ 1306 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1307 { 1308 u32 val = 0; 1309 1310 switch (req) { 1311 case ICE_RESET_PFR: 1312 return ice_pf_reset(hw); 1313 case ICE_RESET_CORER: 1314 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1315 val = GLGEN_RTRIG_CORER_M; 1316 break; 1317 case ICE_RESET_GLOBR: 1318 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1319 val = GLGEN_RTRIG_GLOBR_M; 1320 break; 1321 default: 1322 return -EINVAL; 1323 } 1324 1325 val |= rd32(hw, GLGEN_RTRIG); 1326 wr32(hw, GLGEN_RTRIG, val); 1327 ice_flush(hw); 1328 1329 /* wait for the FW to be ready */ 1330 return ice_check_reset(hw); 1331 } 1332 1333 /** 1334 * ice_copy_rxq_ctx_to_hw 1335 * @hw: pointer to the hardware structure 1336 * @ice_rxq_ctx: pointer to the rxq context 1337 * @rxq_index: the index of the Rx queue 1338 * 1339 * Copies rxq context from dense structure to HW register space 1340 */ 1341 static int 1342 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1343 { 1344 u8 i; 1345 1346 if (!ice_rxq_ctx) 1347 return -EINVAL; 1348 1349 if (rxq_index > QRX_CTRL_MAX_INDEX) 1350 return -EINVAL; 1351 1352 /* Copy each dword separately to HW */ 1353 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1354 wr32(hw, QRX_CONTEXT(i, rxq_index), 1355 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1356 1357 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1358 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1359 } 1360 1361 return 0; 1362 } 1363 1364 /* LAN Rx Queue Context */ 1365 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1366 /* Field Width LSB */ 1367 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1368 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1369 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1370 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1371 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1372 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1373 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1374 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1375 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1376 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1377 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1378 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1379 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1380 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1381 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1382 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1383 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1384 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1385 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1386 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1387 { 0 } 1388 }; 1389 1390 /** 1391 * ice_write_rxq_ctx 1392 * @hw: pointer to the hardware structure 1393 * @rlan_ctx: pointer to the rxq context 1394 * @rxq_index: the index of the Rx queue 1395 * 1396 * Converts rxq context from sparse to dense structure and then writes 1397 * it to HW register space and enables the hardware to prefetch descriptors 1398 * instead of only fetching them on demand 1399 */ 1400 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1401 u32 rxq_index) 1402 { 1403 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1404 1405 if (!rlan_ctx) 1406 return -EINVAL; 1407 1408 rlan_ctx->prefena = 1; 1409 1410 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1411 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1412 } 1413 1414 /* LAN Tx Queue Context */ 1415 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1416 /* Field Width LSB */ 1417 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1418 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1419 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1420 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1421 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1422 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1423 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1424 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1425 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1426 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1427 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1428 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1429 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1430 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1431 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1432 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1433 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1434 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1435 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1436 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1437 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1438 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1439 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1440 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1441 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1442 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1443 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1444 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1445 { 0 } 1446 }; 1447 1448 /* Sideband Queue command wrappers */ 1449 1450 /** 1451 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1452 * @hw: pointer to the HW struct 1453 * @desc: descriptor describing the command 1454 * @buf: buffer to use for indirect commands (NULL for direct commands) 1455 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1456 * @cd: pointer to command details structure 1457 */ 1458 static int 1459 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1460 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1461 { 1462 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1463 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1464 } 1465 1466 /** 1467 * ice_sbq_rw_reg - Fill Sideband Queue command 1468 * @hw: pointer to the HW struct 1469 * @in: message info to be filled in descriptor 1470 */ 1471 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1472 { 1473 struct ice_sbq_cmd_desc desc = {0}; 1474 struct ice_sbq_msg_req msg = {0}; 1475 u16 msg_len; 1476 int status; 1477 1478 msg_len = sizeof(msg); 1479 1480 msg.dest_dev = in->dest_dev; 1481 msg.opcode = in->opcode; 1482 msg.flags = ICE_SBQ_MSG_FLAGS; 1483 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1484 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1485 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1486 1487 if (in->opcode) 1488 msg.data = cpu_to_le32(in->data); 1489 else 1490 /* data read comes back in completion, so shorten the struct by 1491 * sizeof(msg.data) 1492 */ 1493 msg_len -= sizeof(msg.data); 1494 1495 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1496 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1497 desc.param0.cmd_len = cpu_to_le16(msg_len); 1498 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1499 if (!status && !in->opcode) 1500 in->data = le32_to_cpu 1501 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1502 return status; 1503 } 1504 1505 /* FW Admin Queue command wrappers */ 1506 1507 /* Software lock/mutex that is meant to be held while the Global Config Lock 1508 * in firmware is acquired by the software to prevent most (but not all) types 1509 * of AQ commands from being sent to FW 1510 */ 1511 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1512 1513 /** 1514 * ice_should_retry_sq_send_cmd 1515 * @opcode: AQ opcode 1516 * 1517 * Decide if we should retry the send command routine for the ATQ, depending 1518 * on the opcode. 1519 */ 1520 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1521 { 1522 switch (opcode) { 1523 case ice_aqc_opc_get_link_topo: 1524 case ice_aqc_opc_lldp_stop: 1525 case ice_aqc_opc_lldp_start: 1526 case ice_aqc_opc_lldp_filter_ctrl: 1527 return true; 1528 } 1529 1530 return false; 1531 } 1532 1533 /** 1534 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1535 * @hw: pointer to the HW struct 1536 * @cq: pointer to the specific Control queue 1537 * @desc: prefilled descriptor describing the command 1538 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1539 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1540 * @cd: pointer to command details structure 1541 * 1542 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1543 * Queue if the EBUSY AQ error is returned. 1544 */ 1545 static int 1546 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1547 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1548 struct ice_sq_cd *cd) 1549 { 1550 struct ice_aq_desc desc_cpy; 1551 bool is_cmd_for_retry; 1552 u8 idx = 0; 1553 u16 opcode; 1554 int status; 1555 1556 opcode = le16_to_cpu(desc->opcode); 1557 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1558 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1559 1560 if (is_cmd_for_retry) { 1561 /* All retryable cmds are direct, without buf. */ 1562 WARN_ON(buf); 1563 1564 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1565 } 1566 1567 do { 1568 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1569 1570 if (!is_cmd_for_retry || !status || 1571 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1572 break; 1573 1574 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1575 1576 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1577 1578 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1579 1580 return status; 1581 } 1582 1583 /** 1584 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1585 * @hw: pointer to the HW struct 1586 * @desc: descriptor describing the command 1587 * @buf: buffer to use for indirect commands (NULL for direct commands) 1588 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1589 * @cd: pointer to command details structure 1590 * 1591 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1592 */ 1593 int 1594 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1595 u16 buf_size, struct ice_sq_cd *cd) 1596 { 1597 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1598 bool lock_acquired = false; 1599 int status; 1600 1601 /* When a package download is in process (i.e. when the firmware's 1602 * Global Configuration Lock resource is held), only the Download 1603 * Package, Get Version, Get Package Info List, Upload Section, 1604 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1605 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1606 * Recipes to Profile Association, and Release Resource (with resource 1607 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1608 * must block until the package download completes and the Global Config 1609 * Lock is released. See also ice_acquire_global_cfg_lock(). 1610 */ 1611 switch (le16_to_cpu(desc->opcode)) { 1612 case ice_aqc_opc_download_pkg: 1613 case ice_aqc_opc_get_pkg_info_list: 1614 case ice_aqc_opc_get_ver: 1615 case ice_aqc_opc_upload_section: 1616 case ice_aqc_opc_update_pkg: 1617 case ice_aqc_opc_set_port_params: 1618 case ice_aqc_opc_get_vlan_mode_parameters: 1619 case ice_aqc_opc_set_vlan_mode_parameters: 1620 case ice_aqc_opc_add_recipe: 1621 case ice_aqc_opc_recipe_to_profile: 1622 case ice_aqc_opc_get_recipe: 1623 case ice_aqc_opc_get_recipe_to_profile: 1624 break; 1625 case ice_aqc_opc_release_res: 1626 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1627 break; 1628 fallthrough; 1629 default: 1630 mutex_lock(&ice_global_cfg_lock_sw); 1631 lock_acquired = true; 1632 break; 1633 } 1634 1635 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1636 if (lock_acquired) 1637 mutex_unlock(&ice_global_cfg_lock_sw); 1638 1639 return status; 1640 } 1641 1642 /** 1643 * ice_aq_get_fw_ver 1644 * @hw: pointer to the HW struct 1645 * @cd: pointer to command details structure or NULL 1646 * 1647 * Get the firmware version (0x0001) from the admin queue commands 1648 */ 1649 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1650 { 1651 struct ice_aqc_get_ver *resp; 1652 struct ice_aq_desc desc; 1653 int status; 1654 1655 resp = &desc.params.get_ver; 1656 1657 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1658 1659 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1660 1661 if (!status) { 1662 hw->fw_branch = resp->fw_branch; 1663 hw->fw_maj_ver = resp->fw_major; 1664 hw->fw_min_ver = resp->fw_minor; 1665 hw->fw_patch = resp->fw_patch; 1666 hw->fw_build = le32_to_cpu(resp->fw_build); 1667 hw->api_branch = resp->api_branch; 1668 hw->api_maj_ver = resp->api_major; 1669 hw->api_min_ver = resp->api_minor; 1670 hw->api_patch = resp->api_patch; 1671 } 1672 1673 return status; 1674 } 1675 1676 /** 1677 * ice_aq_send_driver_ver 1678 * @hw: pointer to the HW struct 1679 * @dv: driver's major, minor version 1680 * @cd: pointer to command details structure or NULL 1681 * 1682 * Send the driver version (0x0002) to the firmware 1683 */ 1684 int 1685 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1686 struct ice_sq_cd *cd) 1687 { 1688 struct ice_aqc_driver_ver *cmd; 1689 struct ice_aq_desc desc; 1690 u16 len; 1691 1692 cmd = &desc.params.driver_ver; 1693 1694 if (!dv) 1695 return -EINVAL; 1696 1697 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1698 1699 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1700 cmd->major_ver = dv->major_ver; 1701 cmd->minor_ver = dv->minor_ver; 1702 cmd->build_ver = dv->build_ver; 1703 cmd->subbuild_ver = dv->subbuild_ver; 1704 1705 len = 0; 1706 while (len < sizeof(dv->driver_string) && 1707 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1708 len++; 1709 1710 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1711 } 1712 1713 /** 1714 * ice_aq_q_shutdown 1715 * @hw: pointer to the HW struct 1716 * @unloading: is the driver unloading itself 1717 * 1718 * Tell the Firmware that we're shutting down the AdminQ and whether 1719 * or not the driver is unloading as well (0x0003). 1720 */ 1721 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1722 { 1723 struct ice_aqc_q_shutdown *cmd; 1724 struct ice_aq_desc desc; 1725 1726 cmd = &desc.params.q_shutdown; 1727 1728 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1729 1730 if (unloading) 1731 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1732 1733 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1734 } 1735 1736 /** 1737 * ice_aq_req_res 1738 * @hw: pointer to the HW struct 1739 * @res: resource ID 1740 * @access: access type 1741 * @sdp_number: resource number 1742 * @timeout: the maximum time in ms that the driver may hold the resource 1743 * @cd: pointer to command details structure or NULL 1744 * 1745 * Requests common resource using the admin queue commands (0x0008). 1746 * When attempting to acquire the Global Config Lock, the driver can 1747 * learn of three states: 1748 * 1) 0 - acquired lock, and can perform download package 1749 * 2) -EIO - did not get lock, driver should fail to load 1750 * 3) -EALREADY - did not get lock, but another driver has 1751 * successfully downloaded the package; the driver does 1752 * not have to download the package and can continue 1753 * loading 1754 * 1755 * Note that if the caller is in an acquire lock, perform action, release lock 1756 * phase of operation, it is possible that the FW may detect a timeout and issue 1757 * a CORER. In this case, the driver will receive a CORER interrupt and will 1758 * have to determine its cause. The calling thread that is handling this flow 1759 * will likely get an error propagated back to it indicating the Download 1760 * Package, Update Package or the Release Resource AQ commands timed out. 1761 */ 1762 static int 1763 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1764 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1765 struct ice_sq_cd *cd) 1766 { 1767 struct ice_aqc_req_res *cmd_resp; 1768 struct ice_aq_desc desc; 1769 int status; 1770 1771 cmd_resp = &desc.params.res_owner; 1772 1773 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1774 1775 cmd_resp->res_id = cpu_to_le16(res); 1776 cmd_resp->access_type = cpu_to_le16(access); 1777 cmd_resp->res_number = cpu_to_le32(sdp_number); 1778 cmd_resp->timeout = cpu_to_le32(*timeout); 1779 *timeout = 0; 1780 1781 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1782 1783 /* The completion specifies the maximum time in ms that the driver 1784 * may hold the resource in the Timeout field. 1785 */ 1786 1787 /* Global config lock response utilizes an additional status field. 1788 * 1789 * If the Global config lock resource is held by some other driver, the 1790 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1791 * and the timeout field indicates the maximum time the current owner 1792 * of the resource has to free it. 1793 */ 1794 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1795 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1796 *timeout = le32_to_cpu(cmd_resp->timeout); 1797 return 0; 1798 } else if (le16_to_cpu(cmd_resp->status) == 1799 ICE_AQ_RES_GLBL_IN_PROG) { 1800 *timeout = le32_to_cpu(cmd_resp->timeout); 1801 return -EIO; 1802 } else if (le16_to_cpu(cmd_resp->status) == 1803 ICE_AQ_RES_GLBL_DONE) { 1804 return -EALREADY; 1805 } 1806 1807 /* invalid FW response, force a timeout immediately */ 1808 *timeout = 0; 1809 return -EIO; 1810 } 1811 1812 /* If the resource is held by some other driver, the command completes 1813 * with a busy return value and the timeout field indicates the maximum 1814 * time the current owner of the resource has to free it. 1815 */ 1816 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1817 *timeout = le32_to_cpu(cmd_resp->timeout); 1818 1819 return status; 1820 } 1821 1822 /** 1823 * ice_aq_release_res 1824 * @hw: pointer to the HW struct 1825 * @res: resource ID 1826 * @sdp_number: resource number 1827 * @cd: pointer to command details structure or NULL 1828 * 1829 * release common resource using the admin queue commands (0x0009) 1830 */ 1831 static int 1832 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1833 struct ice_sq_cd *cd) 1834 { 1835 struct ice_aqc_req_res *cmd; 1836 struct ice_aq_desc desc; 1837 1838 cmd = &desc.params.res_owner; 1839 1840 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1841 1842 cmd->res_id = cpu_to_le16(res); 1843 cmd->res_number = cpu_to_le32(sdp_number); 1844 1845 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1846 } 1847 1848 /** 1849 * ice_acquire_res 1850 * @hw: pointer to the HW structure 1851 * @res: resource ID 1852 * @access: access type (read or write) 1853 * @timeout: timeout in milliseconds 1854 * 1855 * This function will attempt to acquire the ownership of a resource. 1856 */ 1857 int 1858 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1859 enum ice_aq_res_access_type access, u32 timeout) 1860 { 1861 #define ICE_RES_POLLING_DELAY_MS 10 1862 u32 delay = ICE_RES_POLLING_DELAY_MS; 1863 u32 time_left = timeout; 1864 int status; 1865 1866 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1867 1868 /* A return code of -EALREADY means that another driver has 1869 * previously acquired the resource and performed any necessary updates; 1870 * in this case the caller does not obtain the resource and has no 1871 * further work to do. 1872 */ 1873 if (status == -EALREADY) 1874 goto ice_acquire_res_exit; 1875 1876 if (status) 1877 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1878 1879 /* If necessary, poll until the current lock owner timeouts */ 1880 timeout = time_left; 1881 while (status && timeout && time_left) { 1882 mdelay(delay); 1883 timeout = (timeout > delay) ? timeout - delay : 0; 1884 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1885 1886 if (status == -EALREADY) 1887 /* lock free, but no work to do */ 1888 break; 1889 1890 if (!status) 1891 /* lock acquired */ 1892 break; 1893 } 1894 if (status && status != -EALREADY) 1895 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1896 1897 ice_acquire_res_exit: 1898 if (status == -EALREADY) { 1899 if (access == ICE_RES_WRITE) 1900 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1901 else 1902 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1903 } 1904 return status; 1905 } 1906 1907 /** 1908 * ice_release_res 1909 * @hw: pointer to the HW structure 1910 * @res: resource ID 1911 * 1912 * This function will release a resource using the proper Admin Command. 1913 */ 1914 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1915 { 1916 unsigned long timeout; 1917 int status; 1918 1919 /* there are some rare cases when trying to release the resource 1920 * results in an admin queue timeout, so handle them correctly 1921 */ 1922 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1923 do { 1924 status = ice_aq_release_res(hw, res, 0, NULL); 1925 if (status != -EIO) 1926 break; 1927 usleep_range(1000, 2000); 1928 } while (time_before(jiffies, timeout)); 1929 } 1930 1931 /** 1932 * ice_aq_alloc_free_res - command to allocate/free resources 1933 * @hw: pointer to the HW struct 1934 * @buf: Indirect buffer to hold data parameters and response 1935 * @buf_size: size of buffer for indirect commands 1936 * @opc: pass in the command opcode 1937 * 1938 * Helper function to allocate/free resources using the admin queue commands 1939 */ 1940 int ice_aq_alloc_free_res(struct ice_hw *hw, 1941 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1942 enum ice_adminq_opc opc) 1943 { 1944 struct ice_aqc_alloc_free_res_cmd *cmd; 1945 struct ice_aq_desc desc; 1946 1947 cmd = &desc.params.sw_res_ctrl; 1948 1949 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1950 return -EINVAL; 1951 1952 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1953 1954 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1955 1956 cmd->num_entries = cpu_to_le16(1); 1957 1958 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1959 } 1960 1961 /** 1962 * ice_alloc_hw_res - allocate resource 1963 * @hw: pointer to the HW struct 1964 * @type: type of resource 1965 * @num: number of resources to allocate 1966 * @btm: allocate from bottom 1967 * @res: pointer to array that will receive the resources 1968 */ 1969 int 1970 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1971 { 1972 struct ice_aqc_alloc_free_res_elem *buf; 1973 u16 buf_len; 1974 int status; 1975 1976 buf_len = struct_size(buf, elem, num); 1977 buf = kzalloc(buf_len, GFP_KERNEL); 1978 if (!buf) 1979 return -ENOMEM; 1980 1981 /* Prepare buffer to allocate resource. */ 1982 buf->num_elems = cpu_to_le16(num); 1983 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1984 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1985 if (btm) 1986 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1987 1988 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 1989 if (status) 1990 goto ice_alloc_res_exit; 1991 1992 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1993 1994 ice_alloc_res_exit: 1995 kfree(buf); 1996 return status; 1997 } 1998 1999 /** 2000 * ice_free_hw_res - free allocated HW resource 2001 * @hw: pointer to the HW struct 2002 * @type: type of resource to free 2003 * @num: number of resources 2004 * @res: pointer to array that contains the resources to free 2005 */ 2006 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2007 { 2008 struct ice_aqc_alloc_free_res_elem *buf; 2009 u16 buf_len; 2010 int status; 2011 2012 buf_len = struct_size(buf, elem, num); 2013 buf = kzalloc(buf_len, GFP_KERNEL); 2014 if (!buf) 2015 return -ENOMEM; 2016 2017 /* Prepare buffer to free resource. */ 2018 buf->num_elems = cpu_to_le16(num); 2019 buf->res_type = cpu_to_le16(type); 2020 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2021 2022 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2023 if (status) 2024 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2025 2026 kfree(buf); 2027 return status; 2028 } 2029 2030 /** 2031 * ice_get_num_per_func - determine number of resources per PF 2032 * @hw: pointer to the HW structure 2033 * @max: value to be evenly split between each PF 2034 * 2035 * Determine the number of valid functions by going through the bitmap returned 2036 * from parsing capabilities and use this to calculate the number of resources 2037 * per PF based on the max value passed in. 2038 */ 2039 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2040 { 2041 u8 funcs; 2042 2043 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2044 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2045 ICE_CAPS_VALID_FUNCS_M); 2046 2047 if (!funcs) 2048 return 0; 2049 2050 return max / funcs; 2051 } 2052 2053 /** 2054 * ice_parse_common_caps - parse common device/function capabilities 2055 * @hw: pointer to the HW struct 2056 * @caps: pointer to common capabilities structure 2057 * @elem: the capability element to parse 2058 * @prefix: message prefix for tracing capabilities 2059 * 2060 * Given a capability element, extract relevant details into the common 2061 * capability structure. 2062 * 2063 * Returns: true if the capability matches one of the common capability ids, 2064 * false otherwise. 2065 */ 2066 static bool 2067 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2068 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2069 { 2070 u32 logical_id = le32_to_cpu(elem->logical_id); 2071 u32 phys_id = le32_to_cpu(elem->phys_id); 2072 u32 number = le32_to_cpu(elem->number); 2073 u16 cap = le16_to_cpu(elem->cap); 2074 bool found = true; 2075 2076 switch (cap) { 2077 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2078 caps->valid_functions = number; 2079 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2080 caps->valid_functions); 2081 break; 2082 case ICE_AQC_CAPS_SRIOV: 2083 caps->sr_iov_1_1 = (number == 1); 2084 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2085 caps->sr_iov_1_1); 2086 break; 2087 case ICE_AQC_CAPS_DCB: 2088 caps->dcb = (number == 1); 2089 caps->active_tc_bitmap = logical_id; 2090 caps->maxtc = phys_id; 2091 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2092 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2093 caps->active_tc_bitmap); 2094 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2095 break; 2096 case ICE_AQC_CAPS_RSS: 2097 caps->rss_table_size = number; 2098 caps->rss_table_entry_width = logical_id; 2099 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2100 caps->rss_table_size); 2101 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2102 caps->rss_table_entry_width); 2103 break; 2104 case ICE_AQC_CAPS_RXQS: 2105 caps->num_rxq = number; 2106 caps->rxq_first_id = phys_id; 2107 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2108 caps->num_rxq); 2109 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2110 caps->rxq_first_id); 2111 break; 2112 case ICE_AQC_CAPS_TXQS: 2113 caps->num_txq = number; 2114 caps->txq_first_id = phys_id; 2115 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2116 caps->num_txq); 2117 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2118 caps->txq_first_id); 2119 break; 2120 case ICE_AQC_CAPS_MSIX: 2121 caps->num_msix_vectors = number; 2122 caps->msix_vector_first_id = phys_id; 2123 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2124 caps->num_msix_vectors); 2125 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2126 caps->msix_vector_first_id); 2127 break; 2128 case ICE_AQC_CAPS_PENDING_NVM_VER: 2129 caps->nvm_update_pending_nvm = true; 2130 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2131 break; 2132 case ICE_AQC_CAPS_PENDING_OROM_VER: 2133 caps->nvm_update_pending_orom = true; 2134 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2135 break; 2136 case ICE_AQC_CAPS_PENDING_NET_VER: 2137 caps->nvm_update_pending_netlist = true; 2138 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2139 break; 2140 case ICE_AQC_CAPS_NVM_MGMT: 2141 caps->nvm_unified_update = 2142 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2143 true : false; 2144 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2145 caps->nvm_unified_update); 2146 break; 2147 case ICE_AQC_CAPS_RDMA: 2148 caps->rdma = (number == 1); 2149 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2150 break; 2151 case ICE_AQC_CAPS_MAX_MTU: 2152 caps->max_mtu = number; 2153 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2154 prefix, caps->max_mtu); 2155 break; 2156 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2157 caps->pcie_reset_avoidance = (number > 0); 2158 ice_debug(hw, ICE_DBG_INIT, 2159 "%s: pcie_reset_avoidance = %d\n", prefix, 2160 caps->pcie_reset_avoidance); 2161 break; 2162 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2163 caps->reset_restrict_support = (number == 1); 2164 ice_debug(hw, ICE_DBG_INIT, 2165 "%s: reset_restrict_support = %d\n", prefix, 2166 caps->reset_restrict_support); 2167 break; 2168 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2169 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2170 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2171 prefix, caps->roce_lag); 2172 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2173 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2174 prefix, caps->sriov_lag); 2175 break; 2176 default: 2177 /* Not one of the recognized common capabilities */ 2178 found = false; 2179 } 2180 2181 return found; 2182 } 2183 2184 /** 2185 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2186 * @hw: pointer to the HW structure 2187 * @caps: pointer to capabilities structure to fix 2188 * 2189 * Re-calculate the capabilities that are dependent on the number of physical 2190 * ports; i.e. some features are not supported or function differently on 2191 * devices with more than 4 ports. 2192 */ 2193 static void 2194 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2195 { 2196 /* This assumes device capabilities are always scanned before function 2197 * capabilities during the initialization flow. 2198 */ 2199 if (hw->dev_caps.num_funcs > 4) { 2200 /* Max 4 TCs per port */ 2201 caps->maxtc = 4; 2202 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2203 caps->maxtc); 2204 if (caps->rdma) { 2205 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2206 caps->rdma = 0; 2207 } 2208 2209 /* print message only when processing device capabilities 2210 * during initialization. 2211 */ 2212 if (caps == &hw->dev_caps.common_cap) 2213 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2214 } 2215 } 2216 2217 /** 2218 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2219 * @hw: pointer to the HW struct 2220 * @func_p: pointer to function capabilities structure 2221 * @cap: pointer to the capability element to parse 2222 * 2223 * Extract function capabilities for ICE_AQC_CAPS_VF. 2224 */ 2225 static void 2226 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2227 struct ice_aqc_list_caps_elem *cap) 2228 { 2229 u32 logical_id = le32_to_cpu(cap->logical_id); 2230 u32 number = le32_to_cpu(cap->number); 2231 2232 func_p->num_allocd_vfs = number; 2233 func_p->vf_base_id = logical_id; 2234 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2235 func_p->num_allocd_vfs); 2236 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2237 func_p->vf_base_id); 2238 } 2239 2240 /** 2241 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2242 * @hw: pointer to the HW struct 2243 * @func_p: pointer to function capabilities structure 2244 * @cap: pointer to the capability element to parse 2245 * 2246 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2247 */ 2248 static void 2249 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2250 struct ice_aqc_list_caps_elem *cap) 2251 { 2252 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2253 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2254 le32_to_cpu(cap->number)); 2255 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2256 func_p->guar_num_vsi); 2257 } 2258 2259 /** 2260 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2261 * @hw: pointer to the HW struct 2262 * @func_p: pointer to function capabilities structure 2263 * @cap: pointer to the capability element to parse 2264 * 2265 * Extract function capabilities for ICE_AQC_CAPS_1588. 2266 */ 2267 static void 2268 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2269 struct ice_aqc_list_caps_elem *cap) 2270 { 2271 struct ice_ts_func_info *info = &func_p->ts_func_info; 2272 u32 number = le32_to_cpu(cap->number); 2273 2274 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2275 func_p->common_cap.ieee_1588 = info->ena; 2276 2277 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2278 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2279 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2280 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2281 2282 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2283 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2284 2285 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2286 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2287 } else { 2288 /* Unknown clock frequency, so assume a (probably incorrect) 2289 * default to avoid out-of-bounds look ups of frequency 2290 * related information. 2291 */ 2292 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2293 info->clk_freq); 2294 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2295 } 2296 2297 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2298 func_p->common_cap.ieee_1588); 2299 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2300 info->src_tmr_owned); 2301 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2302 info->tmr_ena); 2303 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2304 info->tmr_index_owned); 2305 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2306 info->tmr_index_assoc); 2307 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2308 info->clk_freq); 2309 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2310 info->clk_src); 2311 } 2312 2313 /** 2314 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2315 * @hw: pointer to the HW struct 2316 * @func_p: pointer to function capabilities structure 2317 * 2318 * Extract function capabilities for ICE_AQC_CAPS_FD. 2319 */ 2320 static void 2321 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2322 { 2323 u32 reg_val, gsize, bsize; 2324 2325 reg_val = rd32(hw, GLQF_FD_SIZE); 2326 switch (hw->mac_type) { 2327 case ICE_MAC_E830: 2328 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2329 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2330 break; 2331 case ICE_MAC_E810: 2332 default: 2333 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2334 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2335 } 2336 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2337 func_p->fd_fltr_best_effort = bsize; 2338 2339 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2340 func_p->fd_fltr_guar); 2341 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2342 func_p->fd_fltr_best_effort); 2343 } 2344 2345 /** 2346 * ice_parse_func_caps - Parse function capabilities 2347 * @hw: pointer to the HW struct 2348 * @func_p: pointer to function capabilities structure 2349 * @buf: buffer containing the function capability records 2350 * @cap_count: the number of capabilities 2351 * 2352 * Helper function to parse function (0x000A) capabilities list. For 2353 * capabilities shared between device and function, this relies on 2354 * ice_parse_common_caps. 2355 * 2356 * Loop through the list of provided capabilities and extract the relevant 2357 * data into the function capabilities structured. 2358 */ 2359 static void 2360 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2361 void *buf, u32 cap_count) 2362 { 2363 struct ice_aqc_list_caps_elem *cap_resp; 2364 u32 i; 2365 2366 cap_resp = buf; 2367 2368 memset(func_p, 0, sizeof(*func_p)); 2369 2370 for (i = 0; i < cap_count; i++) { 2371 u16 cap = le16_to_cpu(cap_resp[i].cap); 2372 bool found; 2373 2374 found = ice_parse_common_caps(hw, &func_p->common_cap, 2375 &cap_resp[i], "func caps"); 2376 2377 switch (cap) { 2378 case ICE_AQC_CAPS_VF: 2379 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2380 break; 2381 case ICE_AQC_CAPS_VSI: 2382 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2383 break; 2384 case ICE_AQC_CAPS_1588: 2385 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2386 break; 2387 case ICE_AQC_CAPS_FD: 2388 ice_parse_fdir_func_caps(hw, func_p); 2389 break; 2390 default: 2391 /* Don't list common capabilities as unknown */ 2392 if (!found) 2393 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2394 i, cap); 2395 break; 2396 } 2397 } 2398 2399 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2400 } 2401 2402 /** 2403 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2404 * @hw: pointer to the HW struct 2405 * @dev_p: pointer to device capabilities structure 2406 * @cap: capability element to parse 2407 * 2408 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2409 */ 2410 static void 2411 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2412 struct ice_aqc_list_caps_elem *cap) 2413 { 2414 u32 number = le32_to_cpu(cap->number); 2415 2416 dev_p->num_funcs = hweight32(number); 2417 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2418 dev_p->num_funcs); 2419 } 2420 2421 /** 2422 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2423 * @hw: pointer to the HW struct 2424 * @dev_p: pointer to device capabilities structure 2425 * @cap: capability element to parse 2426 * 2427 * Parse ICE_AQC_CAPS_VF for device capabilities. 2428 */ 2429 static void 2430 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2431 struct ice_aqc_list_caps_elem *cap) 2432 { 2433 u32 number = le32_to_cpu(cap->number); 2434 2435 dev_p->num_vfs_exposed = number; 2436 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2437 dev_p->num_vfs_exposed); 2438 } 2439 2440 /** 2441 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2442 * @hw: pointer to the HW struct 2443 * @dev_p: pointer to device capabilities structure 2444 * @cap: capability element to parse 2445 * 2446 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2447 */ 2448 static void 2449 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2450 struct ice_aqc_list_caps_elem *cap) 2451 { 2452 u32 number = le32_to_cpu(cap->number); 2453 2454 dev_p->num_vsi_allocd_to_host = number; 2455 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2456 dev_p->num_vsi_allocd_to_host); 2457 } 2458 2459 /** 2460 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2461 * @hw: pointer to the HW struct 2462 * @dev_p: pointer to device capabilities structure 2463 * @cap: capability element to parse 2464 * 2465 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2466 */ 2467 static void 2468 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2469 struct ice_aqc_list_caps_elem *cap) 2470 { 2471 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2472 u32 logical_id = le32_to_cpu(cap->logical_id); 2473 u32 phys_id = le32_to_cpu(cap->phys_id); 2474 u32 number = le32_to_cpu(cap->number); 2475 2476 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2477 dev_p->common_cap.ieee_1588 = info->ena; 2478 2479 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2480 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2481 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2482 2483 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2484 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2485 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2486 2487 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2488 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2489 2490 info->ena_ports = logical_id; 2491 info->tmr_own_map = phys_id; 2492 2493 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2494 dev_p->common_cap.ieee_1588); 2495 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2496 info->tmr0_owner); 2497 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2498 info->tmr0_owned); 2499 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2500 info->tmr0_ena); 2501 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2502 info->tmr1_owner); 2503 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2504 info->tmr1_owned); 2505 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2506 info->tmr1_ena); 2507 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2508 info->ts_ll_read); 2509 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2510 info->ts_ll_int_read); 2511 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2512 info->ena_ports); 2513 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2514 info->tmr_own_map); 2515 } 2516 2517 /** 2518 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2519 * @hw: pointer to the HW struct 2520 * @dev_p: pointer to device capabilities structure 2521 * @cap: capability element to parse 2522 * 2523 * Parse ICE_AQC_CAPS_FD for device capabilities. 2524 */ 2525 static void 2526 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2527 struct ice_aqc_list_caps_elem *cap) 2528 { 2529 u32 number = le32_to_cpu(cap->number); 2530 2531 dev_p->num_flow_director_fltr = number; 2532 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2533 dev_p->num_flow_director_fltr); 2534 } 2535 2536 /** 2537 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2538 * @hw: pointer to the HW struct 2539 * @dev_p: pointer to device capabilities structure 2540 * @cap: capability element to parse 2541 * 2542 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2543 * enabled sensors. 2544 */ 2545 static void 2546 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2547 struct ice_aqc_list_caps_elem *cap) 2548 { 2549 dev_p->supported_sensors = le32_to_cpu(cap->number); 2550 2551 ice_debug(hw, ICE_DBG_INIT, 2552 "dev caps: supported sensors (bitmap) = 0x%x\n", 2553 dev_p->supported_sensors); 2554 } 2555 2556 /** 2557 * ice_parse_dev_caps - Parse device capabilities 2558 * @hw: pointer to the HW struct 2559 * @dev_p: pointer to device capabilities structure 2560 * @buf: buffer containing the device capability records 2561 * @cap_count: the number of capabilities 2562 * 2563 * Helper device to parse device (0x000B) capabilities list. For 2564 * capabilities shared between device and function, this relies on 2565 * ice_parse_common_caps. 2566 * 2567 * Loop through the list of provided capabilities and extract the relevant 2568 * data into the device capabilities structured. 2569 */ 2570 static void 2571 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2572 void *buf, u32 cap_count) 2573 { 2574 struct ice_aqc_list_caps_elem *cap_resp; 2575 u32 i; 2576 2577 cap_resp = buf; 2578 2579 memset(dev_p, 0, sizeof(*dev_p)); 2580 2581 for (i = 0; i < cap_count; i++) { 2582 u16 cap = le16_to_cpu(cap_resp[i].cap); 2583 bool found; 2584 2585 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2586 &cap_resp[i], "dev caps"); 2587 2588 switch (cap) { 2589 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2590 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2591 break; 2592 case ICE_AQC_CAPS_VF: 2593 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2594 break; 2595 case ICE_AQC_CAPS_VSI: 2596 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2597 break; 2598 case ICE_AQC_CAPS_1588: 2599 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2600 break; 2601 case ICE_AQC_CAPS_FD: 2602 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2603 break; 2604 case ICE_AQC_CAPS_SENSOR_READING: 2605 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2606 break; 2607 default: 2608 /* Don't list common capabilities as unknown */ 2609 if (!found) 2610 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2611 i, cap); 2612 break; 2613 } 2614 } 2615 2616 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2617 } 2618 2619 /** 2620 * ice_is_pf_c827 - check if pf contains c827 phy 2621 * @hw: pointer to the hw struct 2622 */ 2623 bool ice_is_pf_c827(struct ice_hw *hw) 2624 { 2625 struct ice_aqc_get_link_topo cmd = {}; 2626 u8 node_part_number; 2627 u16 node_handle; 2628 int status; 2629 2630 if (hw->mac_type != ICE_MAC_E810) 2631 return false; 2632 2633 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2634 return true; 2635 2636 cmd.addr.topo_params.node_type_ctx = 2637 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2638 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2639 cmd.addr.topo_params.index = 0; 2640 2641 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2642 &node_handle); 2643 2644 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2645 return false; 2646 2647 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2648 return true; 2649 2650 return false; 2651 } 2652 2653 /** 2654 * ice_is_phy_rclk_in_netlist 2655 * @hw: pointer to the hw struct 2656 * 2657 * Check if the PHY Recovered Clock device is present in the netlist 2658 */ 2659 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2660 { 2661 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2662 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2663 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2664 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2665 return false; 2666 2667 return true; 2668 } 2669 2670 /** 2671 * ice_is_clock_mux_in_netlist 2672 * @hw: pointer to the hw struct 2673 * 2674 * Check if the Clock Multiplexer device is present in the netlist 2675 */ 2676 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2677 { 2678 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2679 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2680 NULL)) 2681 return false; 2682 2683 return true; 2684 } 2685 2686 /** 2687 * ice_is_cgu_in_netlist - check for CGU presence 2688 * @hw: pointer to the hw struct 2689 * 2690 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2691 * Save the CGU part number in the hw structure for later use. 2692 * Return: 2693 * * true - cgu is present 2694 * * false - cgu is not present 2695 */ 2696 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2697 { 2698 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2699 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2700 NULL)) { 2701 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2702 return true; 2703 } else if (!ice_find_netlist_node(hw, 2704 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2705 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2706 NULL)) { 2707 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2708 return true; 2709 } 2710 2711 return false; 2712 } 2713 2714 /** 2715 * ice_is_gps_in_netlist 2716 * @hw: pointer to the hw struct 2717 * 2718 * Check if the GPS generic device is present in the netlist 2719 */ 2720 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2721 { 2722 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2723 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2724 return false; 2725 2726 return true; 2727 } 2728 2729 /** 2730 * ice_aq_list_caps - query function/device capabilities 2731 * @hw: pointer to the HW struct 2732 * @buf: a buffer to hold the capabilities 2733 * @buf_size: size of the buffer 2734 * @cap_count: if not NULL, set to the number of capabilities reported 2735 * @opc: capabilities type to discover, device or function 2736 * @cd: pointer to command details structure or NULL 2737 * 2738 * Get the function (0x000A) or device (0x000B) capabilities description from 2739 * firmware and store it in the buffer. 2740 * 2741 * If the cap_count pointer is not NULL, then it is set to the number of 2742 * capabilities firmware will report. Note that if the buffer size is too 2743 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2744 * cap_count will still be updated in this case. It is recommended that the 2745 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2746 * firmware could return) to avoid this. 2747 */ 2748 int 2749 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2750 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2751 { 2752 struct ice_aqc_list_caps *cmd; 2753 struct ice_aq_desc desc; 2754 int status; 2755 2756 cmd = &desc.params.get_cap; 2757 2758 if (opc != ice_aqc_opc_list_func_caps && 2759 opc != ice_aqc_opc_list_dev_caps) 2760 return -EINVAL; 2761 2762 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2763 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2764 2765 if (cap_count) 2766 *cap_count = le32_to_cpu(cmd->count); 2767 2768 return status; 2769 } 2770 2771 /** 2772 * ice_discover_dev_caps - Read and extract device capabilities 2773 * @hw: pointer to the hardware structure 2774 * @dev_caps: pointer to device capabilities structure 2775 * 2776 * Read the device capabilities and extract them into the dev_caps structure 2777 * for later use. 2778 */ 2779 int 2780 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2781 { 2782 u32 cap_count = 0; 2783 void *cbuf; 2784 int status; 2785 2786 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2787 if (!cbuf) 2788 return -ENOMEM; 2789 2790 /* Although the driver doesn't know the number of capabilities the 2791 * device will return, we can simply send a 4KB buffer, the maximum 2792 * possible size that firmware can return. 2793 */ 2794 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2795 2796 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2797 ice_aqc_opc_list_dev_caps, NULL); 2798 if (!status) 2799 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2800 kfree(cbuf); 2801 2802 return status; 2803 } 2804 2805 /** 2806 * ice_discover_func_caps - Read and extract function capabilities 2807 * @hw: pointer to the hardware structure 2808 * @func_caps: pointer to function capabilities structure 2809 * 2810 * Read the function capabilities and extract them into the func_caps structure 2811 * for later use. 2812 */ 2813 static int 2814 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2815 { 2816 u32 cap_count = 0; 2817 void *cbuf; 2818 int status; 2819 2820 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2821 if (!cbuf) 2822 return -ENOMEM; 2823 2824 /* Although the driver doesn't know the number of capabilities the 2825 * device will return, we can simply send a 4KB buffer, the maximum 2826 * possible size that firmware can return. 2827 */ 2828 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2829 2830 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2831 ice_aqc_opc_list_func_caps, NULL); 2832 if (!status) 2833 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2834 kfree(cbuf); 2835 2836 return status; 2837 } 2838 2839 /** 2840 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2841 * @hw: pointer to the hardware structure 2842 */ 2843 void ice_set_safe_mode_caps(struct ice_hw *hw) 2844 { 2845 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2846 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2847 struct ice_hw_common_caps cached_caps; 2848 u32 num_funcs; 2849 2850 /* cache some func_caps values that should be restored after memset */ 2851 cached_caps = func_caps->common_cap; 2852 2853 /* unset func capabilities */ 2854 memset(func_caps, 0, sizeof(*func_caps)); 2855 2856 #define ICE_RESTORE_FUNC_CAP(name) \ 2857 func_caps->common_cap.name = cached_caps.name 2858 2859 /* restore cached values */ 2860 ICE_RESTORE_FUNC_CAP(valid_functions); 2861 ICE_RESTORE_FUNC_CAP(txq_first_id); 2862 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2863 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2864 ICE_RESTORE_FUNC_CAP(max_mtu); 2865 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2866 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2867 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2868 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2869 2870 /* one Tx and one Rx queue in safe mode */ 2871 func_caps->common_cap.num_rxq = 1; 2872 func_caps->common_cap.num_txq = 1; 2873 2874 /* two MSIX vectors, one for traffic and one for misc causes */ 2875 func_caps->common_cap.num_msix_vectors = 2; 2876 func_caps->guar_num_vsi = 1; 2877 2878 /* cache some dev_caps values that should be restored after memset */ 2879 cached_caps = dev_caps->common_cap; 2880 num_funcs = dev_caps->num_funcs; 2881 2882 /* unset dev capabilities */ 2883 memset(dev_caps, 0, sizeof(*dev_caps)); 2884 2885 #define ICE_RESTORE_DEV_CAP(name) \ 2886 dev_caps->common_cap.name = cached_caps.name 2887 2888 /* restore cached values */ 2889 ICE_RESTORE_DEV_CAP(valid_functions); 2890 ICE_RESTORE_DEV_CAP(txq_first_id); 2891 ICE_RESTORE_DEV_CAP(rxq_first_id); 2892 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2893 ICE_RESTORE_DEV_CAP(max_mtu); 2894 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2895 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2896 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2897 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2898 dev_caps->num_funcs = num_funcs; 2899 2900 /* one Tx and one Rx queue per function in safe mode */ 2901 dev_caps->common_cap.num_rxq = num_funcs; 2902 dev_caps->common_cap.num_txq = num_funcs; 2903 2904 /* two MSIX vectors per function */ 2905 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2906 } 2907 2908 /** 2909 * ice_get_caps - get info about the HW 2910 * @hw: pointer to the hardware structure 2911 */ 2912 int ice_get_caps(struct ice_hw *hw) 2913 { 2914 int status; 2915 2916 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2917 if (status) 2918 return status; 2919 2920 return ice_discover_func_caps(hw, &hw->func_caps); 2921 } 2922 2923 /** 2924 * ice_aq_manage_mac_write - manage MAC address write command 2925 * @hw: pointer to the HW struct 2926 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2927 * @flags: flags to control write behavior 2928 * @cd: pointer to command details structure or NULL 2929 * 2930 * This function is used to write MAC address to the NVM (0x0108). 2931 */ 2932 int 2933 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2934 struct ice_sq_cd *cd) 2935 { 2936 struct ice_aqc_manage_mac_write *cmd; 2937 struct ice_aq_desc desc; 2938 2939 cmd = &desc.params.mac_write; 2940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2941 2942 cmd->flags = flags; 2943 ether_addr_copy(cmd->mac_addr, mac_addr); 2944 2945 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2946 } 2947 2948 /** 2949 * ice_aq_clear_pxe_mode 2950 * @hw: pointer to the HW struct 2951 * 2952 * Tell the firmware that the driver is taking over from PXE (0x0110). 2953 */ 2954 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2955 { 2956 struct ice_aq_desc desc; 2957 2958 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2959 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2960 2961 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2962 } 2963 2964 /** 2965 * ice_clear_pxe_mode - clear pxe operations mode 2966 * @hw: pointer to the HW struct 2967 * 2968 * Make sure all PXE mode settings are cleared, including things 2969 * like descriptor fetch/write-back mode. 2970 */ 2971 void ice_clear_pxe_mode(struct ice_hw *hw) 2972 { 2973 if (ice_check_sq_alive(hw, &hw->adminq)) 2974 ice_aq_clear_pxe_mode(hw); 2975 } 2976 2977 /** 2978 * ice_aq_set_port_params - set physical port parameters. 2979 * @pi: pointer to the port info struct 2980 * @double_vlan: if set double VLAN is enabled 2981 * @cd: pointer to command details structure or NULL 2982 * 2983 * Set Physical port parameters (0x0203) 2984 */ 2985 int 2986 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2987 struct ice_sq_cd *cd) 2988 2989 { 2990 struct ice_aqc_set_port_params *cmd; 2991 struct ice_hw *hw = pi->hw; 2992 struct ice_aq_desc desc; 2993 u16 cmd_flags = 0; 2994 2995 cmd = &desc.params.set_port_params; 2996 2997 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2998 if (double_vlan) 2999 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3000 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3001 3002 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3003 } 3004 3005 /** 3006 * ice_is_100m_speed_supported 3007 * @hw: pointer to the HW struct 3008 * 3009 * returns true if 100M speeds are supported by the device, 3010 * false otherwise. 3011 */ 3012 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3013 { 3014 switch (hw->device_id) { 3015 case ICE_DEV_ID_E822C_SGMII: 3016 case ICE_DEV_ID_E822L_SGMII: 3017 case ICE_DEV_ID_E823L_1GBE: 3018 case ICE_DEV_ID_E823C_SGMII: 3019 return true; 3020 default: 3021 return false; 3022 } 3023 } 3024 3025 /** 3026 * ice_get_link_speed_based_on_phy_type - returns link speed 3027 * @phy_type_low: lower part of phy_type 3028 * @phy_type_high: higher part of phy_type 3029 * 3030 * This helper function will convert an entry in PHY type structure 3031 * [phy_type_low, phy_type_high] to its corresponding link speed. 3032 * Note: In the structure of [phy_type_low, phy_type_high], there should 3033 * be one bit set, as this function will convert one PHY type to its 3034 * speed. 3035 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3036 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3037 */ 3038 static u16 3039 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3040 { 3041 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3042 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3043 3044 switch (phy_type_low) { 3045 case ICE_PHY_TYPE_LOW_100BASE_TX: 3046 case ICE_PHY_TYPE_LOW_100M_SGMII: 3047 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3048 break; 3049 case ICE_PHY_TYPE_LOW_1000BASE_T: 3050 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3051 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3052 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3053 case ICE_PHY_TYPE_LOW_1G_SGMII: 3054 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3055 break; 3056 case ICE_PHY_TYPE_LOW_2500BASE_T: 3057 case ICE_PHY_TYPE_LOW_2500BASE_X: 3058 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3059 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3060 break; 3061 case ICE_PHY_TYPE_LOW_5GBASE_T: 3062 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3063 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3064 break; 3065 case ICE_PHY_TYPE_LOW_10GBASE_T: 3066 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3067 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3068 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3069 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3070 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3071 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3072 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3073 break; 3074 case ICE_PHY_TYPE_LOW_25GBASE_T: 3075 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3076 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3077 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3078 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3079 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3080 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3081 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3082 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3083 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3084 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3085 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3086 break; 3087 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3088 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3089 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3090 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3091 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3092 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3093 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3094 break; 3095 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3096 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3097 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3098 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3099 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3100 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3101 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3102 case ICE_PHY_TYPE_LOW_50G_AUI2: 3103 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3104 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3105 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3106 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3107 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3108 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3109 case ICE_PHY_TYPE_LOW_50G_AUI1: 3110 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3111 break; 3112 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3113 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3114 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3115 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3116 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3117 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3118 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3119 case ICE_PHY_TYPE_LOW_100G_AUI4: 3120 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3121 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3122 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3123 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3124 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3125 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3126 break; 3127 default: 3128 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3129 break; 3130 } 3131 3132 switch (phy_type_high) { 3133 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3134 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3135 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3136 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3137 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3138 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3139 break; 3140 default: 3141 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3142 break; 3143 } 3144 3145 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3146 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3147 return ICE_AQ_LINK_SPEED_UNKNOWN; 3148 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3149 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3150 return ICE_AQ_LINK_SPEED_UNKNOWN; 3151 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3152 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3153 return speed_phy_type_low; 3154 else 3155 return speed_phy_type_high; 3156 } 3157 3158 /** 3159 * ice_update_phy_type 3160 * @phy_type_low: pointer to the lower part of phy_type 3161 * @phy_type_high: pointer to the higher part of phy_type 3162 * @link_speeds_bitmap: targeted link speeds bitmap 3163 * 3164 * Note: For the link_speeds_bitmap structure, you can check it at 3165 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3166 * link_speeds_bitmap include multiple speeds. 3167 * 3168 * Each entry in this [phy_type_low, phy_type_high] structure will 3169 * present a certain link speed. This helper function will turn on bits 3170 * in [phy_type_low, phy_type_high] structure based on the value of 3171 * link_speeds_bitmap input parameter. 3172 */ 3173 void 3174 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3175 u16 link_speeds_bitmap) 3176 { 3177 u64 pt_high; 3178 u64 pt_low; 3179 int index; 3180 u16 speed; 3181 3182 /* We first check with low part of phy_type */ 3183 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3184 pt_low = BIT_ULL(index); 3185 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3186 3187 if (link_speeds_bitmap & speed) 3188 *phy_type_low |= BIT_ULL(index); 3189 } 3190 3191 /* We then check with high part of phy_type */ 3192 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3193 pt_high = BIT_ULL(index); 3194 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3195 3196 if (link_speeds_bitmap & speed) 3197 *phy_type_high |= BIT_ULL(index); 3198 } 3199 } 3200 3201 /** 3202 * ice_aq_set_phy_cfg 3203 * @hw: pointer to the HW struct 3204 * @pi: port info structure of the interested logical port 3205 * @cfg: structure with PHY configuration data to be set 3206 * @cd: pointer to command details structure or NULL 3207 * 3208 * Set the various PHY configuration parameters supported on the Port. 3209 * One or more of the Set PHY config parameters may be ignored in an MFP 3210 * mode as the PF may not have the privilege to set some of the PHY Config 3211 * parameters. This status will be indicated by the command response (0x0601). 3212 */ 3213 int 3214 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3215 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3216 { 3217 struct ice_aq_desc desc; 3218 int status; 3219 3220 if (!cfg) 3221 return -EINVAL; 3222 3223 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3224 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3225 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3226 cfg->caps); 3227 3228 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3229 } 3230 3231 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3232 desc.params.set_phy.lport_num = pi->lport; 3233 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3234 3235 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3236 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3237 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3238 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3239 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3240 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3241 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3242 cfg->low_power_ctrl_an); 3243 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3244 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3245 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3246 cfg->link_fec_opt); 3247 3248 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3249 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3250 status = 0; 3251 3252 if (!status) 3253 pi->phy.curr_user_phy_cfg = *cfg; 3254 3255 return status; 3256 } 3257 3258 /** 3259 * ice_update_link_info - update status of the HW network link 3260 * @pi: port info structure of the interested logical port 3261 */ 3262 int ice_update_link_info(struct ice_port_info *pi) 3263 { 3264 struct ice_link_status *li; 3265 int status; 3266 3267 if (!pi) 3268 return -EINVAL; 3269 3270 li = &pi->phy.link_info; 3271 3272 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3273 if (status) 3274 return status; 3275 3276 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3277 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3278 3279 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3280 if (!pcaps) 3281 return -ENOMEM; 3282 3283 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3284 pcaps, NULL); 3285 } 3286 3287 return status; 3288 } 3289 3290 /** 3291 * ice_cache_phy_user_req 3292 * @pi: port information structure 3293 * @cache_data: PHY logging data 3294 * @cache_mode: PHY logging mode 3295 * 3296 * Log the user request on (FC, FEC, SPEED) for later use. 3297 */ 3298 static void 3299 ice_cache_phy_user_req(struct ice_port_info *pi, 3300 struct ice_phy_cache_mode_data cache_data, 3301 enum ice_phy_cache_mode cache_mode) 3302 { 3303 if (!pi) 3304 return; 3305 3306 switch (cache_mode) { 3307 case ICE_FC_MODE: 3308 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3309 break; 3310 case ICE_SPEED_MODE: 3311 pi->phy.curr_user_speed_req = 3312 cache_data.data.curr_user_speed_req; 3313 break; 3314 case ICE_FEC_MODE: 3315 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3316 break; 3317 default: 3318 break; 3319 } 3320 } 3321 3322 /** 3323 * ice_caps_to_fc_mode 3324 * @caps: PHY capabilities 3325 * 3326 * Convert PHY FC capabilities to ice FC mode 3327 */ 3328 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3329 { 3330 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3331 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3332 return ICE_FC_FULL; 3333 3334 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3335 return ICE_FC_TX_PAUSE; 3336 3337 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3338 return ICE_FC_RX_PAUSE; 3339 3340 return ICE_FC_NONE; 3341 } 3342 3343 /** 3344 * ice_caps_to_fec_mode 3345 * @caps: PHY capabilities 3346 * @fec_options: Link FEC options 3347 * 3348 * Convert PHY FEC capabilities to ice FEC mode 3349 */ 3350 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3351 { 3352 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3353 return ICE_FEC_AUTO; 3354 3355 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3356 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3357 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3358 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3359 return ICE_FEC_BASER; 3360 3361 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3362 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3363 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3364 return ICE_FEC_RS; 3365 3366 return ICE_FEC_NONE; 3367 } 3368 3369 /** 3370 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3371 * @pi: port information structure 3372 * @cfg: PHY configuration data to set FC mode 3373 * @req_mode: FC mode to configure 3374 */ 3375 int 3376 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3377 enum ice_fc_mode req_mode) 3378 { 3379 struct ice_phy_cache_mode_data cache_data; 3380 u8 pause_mask = 0x0; 3381 3382 if (!pi || !cfg) 3383 return -EINVAL; 3384 3385 switch (req_mode) { 3386 case ICE_FC_FULL: 3387 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3388 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3389 break; 3390 case ICE_FC_RX_PAUSE: 3391 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3392 break; 3393 case ICE_FC_TX_PAUSE: 3394 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3395 break; 3396 default: 3397 break; 3398 } 3399 3400 /* clear the old pause settings */ 3401 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3402 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3403 3404 /* set the new capabilities */ 3405 cfg->caps |= pause_mask; 3406 3407 /* Cache user FC request */ 3408 cache_data.data.curr_user_fc_req = req_mode; 3409 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3410 3411 return 0; 3412 } 3413 3414 /** 3415 * ice_set_fc 3416 * @pi: port information structure 3417 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3418 * @ena_auto_link_update: enable automatic link update 3419 * 3420 * Set the requested flow control mode. 3421 */ 3422 int 3423 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3424 { 3425 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3426 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3427 struct ice_hw *hw; 3428 int status; 3429 3430 if (!pi || !aq_failures) 3431 return -EINVAL; 3432 3433 *aq_failures = 0; 3434 hw = pi->hw; 3435 3436 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3437 if (!pcaps) 3438 return -ENOMEM; 3439 3440 /* Get the current PHY config */ 3441 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3442 pcaps, NULL); 3443 if (status) { 3444 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3445 goto out; 3446 } 3447 3448 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3449 3450 /* Configure the set PHY data */ 3451 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3452 if (status) 3453 goto out; 3454 3455 /* If the capabilities have changed, then set the new config */ 3456 if (cfg.caps != pcaps->caps) { 3457 int retry_count, retry_max = 10; 3458 3459 /* Auto restart link so settings take effect */ 3460 if (ena_auto_link_update) 3461 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3462 3463 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3464 if (status) { 3465 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3466 goto out; 3467 } 3468 3469 /* Update the link info 3470 * It sometimes takes a really long time for link to 3471 * come back from the atomic reset. Thus, we wait a 3472 * little bit. 3473 */ 3474 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3475 status = ice_update_link_info(pi); 3476 3477 if (!status) 3478 break; 3479 3480 mdelay(100); 3481 } 3482 3483 if (status) 3484 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3485 } 3486 3487 out: 3488 return status; 3489 } 3490 3491 /** 3492 * ice_phy_caps_equals_cfg 3493 * @phy_caps: PHY capabilities 3494 * @phy_cfg: PHY configuration 3495 * 3496 * Helper function to determine if PHY capabilities matches PHY 3497 * configuration 3498 */ 3499 bool 3500 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3501 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3502 { 3503 u8 caps_mask, cfg_mask; 3504 3505 if (!phy_caps || !phy_cfg) 3506 return false; 3507 3508 /* These bits are not common between capabilities and configuration. 3509 * Do not use them to determine equality. 3510 */ 3511 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3512 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3513 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3514 3515 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3516 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3517 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3518 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3519 phy_caps->eee_cap != phy_cfg->eee_cap || 3520 phy_caps->eeer_value != phy_cfg->eeer_value || 3521 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3522 return false; 3523 3524 return true; 3525 } 3526 3527 /** 3528 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3529 * @pi: port information structure 3530 * @caps: PHY ability structure to copy date from 3531 * @cfg: PHY configuration structure to copy data to 3532 * 3533 * Helper function to copy AQC PHY get ability data to PHY set configuration 3534 * data structure 3535 */ 3536 void 3537 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3538 struct ice_aqc_get_phy_caps_data *caps, 3539 struct ice_aqc_set_phy_cfg_data *cfg) 3540 { 3541 if (!pi || !caps || !cfg) 3542 return; 3543 3544 memset(cfg, 0, sizeof(*cfg)); 3545 cfg->phy_type_low = caps->phy_type_low; 3546 cfg->phy_type_high = caps->phy_type_high; 3547 cfg->caps = caps->caps; 3548 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3549 cfg->eee_cap = caps->eee_cap; 3550 cfg->eeer_value = caps->eeer_value; 3551 cfg->link_fec_opt = caps->link_fec_options; 3552 cfg->module_compliance_enforcement = 3553 caps->module_compliance_enforcement; 3554 } 3555 3556 /** 3557 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3558 * @pi: port information structure 3559 * @cfg: PHY configuration data to set FEC mode 3560 * @fec: FEC mode to configure 3561 */ 3562 int 3563 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3564 enum ice_fec_mode fec) 3565 { 3566 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3567 struct ice_hw *hw; 3568 int status; 3569 3570 if (!pi || !cfg) 3571 return -EINVAL; 3572 3573 hw = pi->hw; 3574 3575 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3576 if (!pcaps) 3577 return -ENOMEM; 3578 3579 status = ice_aq_get_phy_caps(pi, false, 3580 (ice_fw_supports_report_dflt_cfg(hw) ? 3581 ICE_AQC_REPORT_DFLT_CFG : 3582 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3583 if (status) 3584 goto out; 3585 3586 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3587 cfg->link_fec_opt = pcaps->link_fec_options; 3588 3589 switch (fec) { 3590 case ICE_FEC_BASER: 3591 /* Clear RS bits, and AND BASE-R ability 3592 * bits and OR request bits. 3593 */ 3594 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3595 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3596 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3597 ICE_AQC_PHY_FEC_25G_KR_REQ; 3598 break; 3599 case ICE_FEC_RS: 3600 /* Clear BASE-R bits, and AND RS ability 3601 * bits and OR request bits. 3602 */ 3603 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3604 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3605 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3606 break; 3607 case ICE_FEC_NONE: 3608 /* Clear all FEC option bits. */ 3609 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3610 break; 3611 case ICE_FEC_AUTO: 3612 /* AND auto FEC bit, and all caps bits. */ 3613 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3614 cfg->link_fec_opt |= pcaps->link_fec_options; 3615 break; 3616 default: 3617 status = -EINVAL; 3618 break; 3619 } 3620 3621 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3622 !ice_fw_supports_report_dflt_cfg(hw)) { 3623 struct ice_link_default_override_tlv tlv = { 0 }; 3624 3625 status = ice_get_link_default_override(&tlv, pi); 3626 if (status) 3627 goto out; 3628 3629 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3630 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3631 cfg->link_fec_opt = tlv.fec_options; 3632 } 3633 3634 out: 3635 return status; 3636 } 3637 3638 /** 3639 * ice_get_link_status - get status of the HW network link 3640 * @pi: port information structure 3641 * @link_up: pointer to bool (true/false = linkup/linkdown) 3642 * 3643 * Variable link_up is true if link is up, false if link is down. 3644 * The variable link_up is invalid if status is non zero. As a 3645 * result of this call, link status reporting becomes enabled 3646 */ 3647 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3648 { 3649 struct ice_phy_info *phy_info; 3650 int status = 0; 3651 3652 if (!pi || !link_up) 3653 return -EINVAL; 3654 3655 phy_info = &pi->phy; 3656 3657 if (phy_info->get_link_info) { 3658 status = ice_update_link_info(pi); 3659 3660 if (status) 3661 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3662 status); 3663 } 3664 3665 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3666 3667 return status; 3668 } 3669 3670 /** 3671 * ice_aq_set_link_restart_an 3672 * @pi: pointer to the port information structure 3673 * @ena_link: if true: enable link, if false: disable link 3674 * @cd: pointer to command details structure or NULL 3675 * 3676 * Sets up the link and restarts the Auto-Negotiation over the link. 3677 */ 3678 int 3679 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3680 struct ice_sq_cd *cd) 3681 { 3682 struct ice_aqc_restart_an *cmd; 3683 struct ice_aq_desc desc; 3684 3685 cmd = &desc.params.restart_an; 3686 3687 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3688 3689 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3690 cmd->lport_num = pi->lport; 3691 if (ena_link) 3692 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3693 else 3694 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3695 3696 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3697 } 3698 3699 /** 3700 * ice_aq_set_event_mask 3701 * @hw: pointer to the HW struct 3702 * @port_num: port number of the physical function 3703 * @mask: event mask to be set 3704 * @cd: pointer to command details structure or NULL 3705 * 3706 * Set event mask (0x0613) 3707 */ 3708 int 3709 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3710 struct ice_sq_cd *cd) 3711 { 3712 struct ice_aqc_set_event_mask *cmd; 3713 struct ice_aq_desc desc; 3714 3715 cmd = &desc.params.set_event_mask; 3716 3717 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3718 3719 cmd->lport_num = port_num; 3720 3721 cmd->event_mask = cpu_to_le16(mask); 3722 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3723 } 3724 3725 /** 3726 * ice_aq_set_mac_loopback 3727 * @hw: pointer to the HW struct 3728 * @ena_lpbk: Enable or Disable loopback 3729 * @cd: pointer to command details structure or NULL 3730 * 3731 * Enable/disable loopback on a given port 3732 */ 3733 int 3734 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3735 { 3736 struct ice_aqc_set_mac_lb *cmd; 3737 struct ice_aq_desc desc; 3738 3739 cmd = &desc.params.set_mac_lb; 3740 3741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3742 if (ena_lpbk) 3743 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3744 3745 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3746 } 3747 3748 /** 3749 * ice_aq_set_port_id_led 3750 * @pi: pointer to the port information 3751 * @is_orig_mode: is this LED set to original mode (by the net-list) 3752 * @cd: pointer to command details structure or NULL 3753 * 3754 * Set LED value for the given port (0x06e9) 3755 */ 3756 int 3757 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3758 struct ice_sq_cd *cd) 3759 { 3760 struct ice_aqc_set_port_id_led *cmd; 3761 struct ice_hw *hw = pi->hw; 3762 struct ice_aq_desc desc; 3763 3764 cmd = &desc.params.set_port_id_led; 3765 3766 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3767 3768 if (is_orig_mode) 3769 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3770 else 3771 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3772 3773 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3774 } 3775 3776 /** 3777 * ice_aq_get_port_options 3778 * @hw: pointer to the HW struct 3779 * @options: buffer for the resultant port options 3780 * @option_count: input - size of the buffer in port options structures, 3781 * output - number of returned port options 3782 * @lport: logical port to call the command with (optional) 3783 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3784 * when PF owns more than 1 port it must be true 3785 * @active_option_idx: index of active port option in returned buffer 3786 * @active_option_valid: active option in returned buffer is valid 3787 * @pending_option_idx: index of pending port option in returned buffer 3788 * @pending_option_valid: pending option in returned buffer is valid 3789 * 3790 * Calls Get Port Options AQC (0x06ea) and verifies result. 3791 */ 3792 int 3793 ice_aq_get_port_options(struct ice_hw *hw, 3794 struct ice_aqc_get_port_options_elem *options, 3795 u8 *option_count, u8 lport, bool lport_valid, 3796 u8 *active_option_idx, bool *active_option_valid, 3797 u8 *pending_option_idx, bool *pending_option_valid) 3798 { 3799 struct ice_aqc_get_port_options *cmd; 3800 struct ice_aq_desc desc; 3801 int status; 3802 u8 i; 3803 3804 /* options buffer shall be able to hold max returned options */ 3805 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3806 return -EINVAL; 3807 3808 cmd = &desc.params.get_port_options; 3809 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3810 3811 if (lport_valid) 3812 cmd->lport_num = lport; 3813 cmd->lport_num_valid = lport_valid; 3814 3815 status = ice_aq_send_cmd(hw, &desc, options, 3816 *option_count * sizeof(*options), NULL); 3817 if (status) 3818 return status; 3819 3820 /* verify direct FW response & set output parameters */ 3821 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3822 cmd->port_options_count); 3823 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3824 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3825 cmd->port_options); 3826 if (*active_option_valid) { 3827 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3828 cmd->port_options); 3829 if (*active_option_idx > (*option_count - 1)) 3830 return -EIO; 3831 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3832 *active_option_idx); 3833 } 3834 3835 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3836 cmd->pending_port_option_status); 3837 if (*pending_option_valid) { 3838 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3839 cmd->pending_port_option_status); 3840 if (*pending_option_idx > (*option_count - 1)) 3841 return -EIO; 3842 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3843 *pending_option_idx); 3844 } 3845 3846 /* mask output options fields */ 3847 for (i = 0; i < *option_count; i++) { 3848 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3849 options[i].pmd); 3850 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3851 options[i].max_lane_speed); 3852 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3853 options[i].pmd, options[i].max_lane_speed); 3854 } 3855 3856 return 0; 3857 } 3858 3859 /** 3860 * ice_aq_set_port_option 3861 * @hw: pointer to the HW struct 3862 * @lport: logical port to call the command with 3863 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3864 * when PF owns more than 1 port it must be true 3865 * @new_option: new port option to be written 3866 * 3867 * Calls Set Port Options AQC (0x06eb). 3868 */ 3869 int 3870 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3871 u8 new_option) 3872 { 3873 struct ice_aqc_set_port_option *cmd; 3874 struct ice_aq_desc desc; 3875 3876 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3877 return -EINVAL; 3878 3879 cmd = &desc.params.set_port_option; 3880 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3881 3882 if (lport_valid) 3883 cmd->lport_num = lport; 3884 3885 cmd->lport_num_valid = lport_valid; 3886 cmd->selected_port_option = new_option; 3887 3888 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3889 } 3890 3891 /** 3892 * ice_aq_sff_eeprom 3893 * @hw: pointer to the HW struct 3894 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3895 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3896 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3897 * @page: QSFP page 3898 * @set_page: set or ignore the page 3899 * @data: pointer to data buffer to be read/written to the I2C device. 3900 * @length: 1-16 for read, 1 for write. 3901 * @write: 0 read, 1 for write. 3902 * @cd: pointer to command details structure or NULL 3903 * 3904 * Read/Write SFF EEPROM (0x06EE) 3905 */ 3906 int 3907 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3908 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3909 bool write, struct ice_sq_cd *cd) 3910 { 3911 struct ice_aqc_sff_eeprom *cmd; 3912 struct ice_aq_desc desc; 3913 u16 i2c_bus_addr; 3914 int status; 3915 3916 if (!data || (mem_addr & 0xff00)) 3917 return -EINVAL; 3918 3919 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3920 cmd = &desc.params.read_write_sff_param; 3921 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3922 cmd->lport_num = (u8)(lport & 0xff); 3923 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3924 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 3925 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 3926 if (write) 3927 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 3928 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 3929 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3930 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 3931 3932 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3933 return status; 3934 } 3935 3936 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3937 { 3938 switch (type) { 3939 case ICE_LUT_VSI: 3940 return ICE_LUT_VSI_SIZE; 3941 case ICE_LUT_GLOBAL: 3942 return ICE_LUT_GLOBAL_SIZE; 3943 case ICE_LUT_PF: 3944 return ICE_LUT_PF_SIZE; 3945 } 3946 WARN_ONCE(1, "incorrect type passed"); 3947 return ICE_LUT_VSI_SIZE; 3948 } 3949 3950 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3951 { 3952 switch (size) { 3953 case ICE_LUT_VSI_SIZE: 3954 return ICE_AQC_LUT_SIZE_SMALL; 3955 case ICE_LUT_GLOBAL_SIZE: 3956 return ICE_AQC_LUT_SIZE_512; 3957 case ICE_LUT_PF_SIZE: 3958 return ICE_AQC_LUT_SIZE_2K; 3959 } 3960 WARN_ONCE(1, "incorrect size passed"); 3961 return 0; 3962 } 3963 3964 /** 3965 * __ice_aq_get_set_rss_lut 3966 * @hw: pointer to the hardware structure 3967 * @params: RSS LUT parameters 3968 * @set: set true to set the table, false to get the table 3969 * 3970 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3971 */ 3972 static int 3973 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3974 struct ice_aq_get_set_rss_lut_params *params, bool set) 3975 { 3976 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3977 enum ice_lut_type lut_type = params->lut_type; 3978 struct ice_aqc_get_set_rss_lut *desc_params; 3979 enum ice_aqc_lut_flags flags; 3980 enum ice_lut_size lut_size; 3981 struct ice_aq_desc desc; 3982 u8 *lut = params->lut; 3983 3984 3985 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 3986 return -EINVAL; 3987 3988 lut_size = ice_lut_type_to_size(lut_type); 3989 if (lut_size > params->lut_size) 3990 return -EINVAL; 3991 else if (set && lut_size != params->lut_size) 3992 return -EINVAL; 3993 3994 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 3995 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 3996 if (set) 3997 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3998 3999 desc_params = &desc.params.get_set_rss_lut; 4000 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4001 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4002 4003 if (lut_type == ICE_LUT_GLOBAL) 4004 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4005 params->global_lut_id); 4006 4007 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4008 desc_params->flags = cpu_to_le16(flags); 4009 4010 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4011 } 4012 4013 /** 4014 * ice_aq_get_rss_lut 4015 * @hw: pointer to the hardware structure 4016 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4017 * 4018 * get the RSS lookup table, PF or VSI type 4019 */ 4020 int 4021 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4022 { 4023 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4024 } 4025 4026 /** 4027 * ice_aq_set_rss_lut 4028 * @hw: pointer to the hardware structure 4029 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4030 * 4031 * set the RSS lookup table, PF or VSI type 4032 */ 4033 int 4034 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4035 { 4036 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4037 } 4038 4039 /** 4040 * __ice_aq_get_set_rss_key 4041 * @hw: pointer to the HW struct 4042 * @vsi_id: VSI FW index 4043 * @key: pointer to key info struct 4044 * @set: set true to set the key, false to get the key 4045 * 4046 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4047 */ 4048 static int 4049 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4050 struct ice_aqc_get_set_rss_keys *key, bool set) 4051 { 4052 struct ice_aqc_get_set_rss_key *desc_params; 4053 u16 key_size = sizeof(*key); 4054 struct ice_aq_desc desc; 4055 4056 if (set) { 4057 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4058 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4059 } else { 4060 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4061 } 4062 4063 desc_params = &desc.params.get_set_rss_key; 4064 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4065 4066 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4067 } 4068 4069 /** 4070 * ice_aq_get_rss_key 4071 * @hw: pointer to the HW struct 4072 * @vsi_handle: software VSI handle 4073 * @key: pointer to key info struct 4074 * 4075 * get the RSS key per VSI 4076 */ 4077 int 4078 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4079 struct ice_aqc_get_set_rss_keys *key) 4080 { 4081 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4082 return -EINVAL; 4083 4084 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4085 key, false); 4086 } 4087 4088 /** 4089 * ice_aq_set_rss_key 4090 * @hw: pointer to the HW struct 4091 * @vsi_handle: software VSI handle 4092 * @keys: pointer to key info struct 4093 * 4094 * set the RSS key per VSI 4095 */ 4096 int 4097 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4098 struct ice_aqc_get_set_rss_keys *keys) 4099 { 4100 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4101 return -EINVAL; 4102 4103 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4104 keys, true); 4105 } 4106 4107 /** 4108 * ice_aq_add_lan_txq 4109 * @hw: pointer to the hardware structure 4110 * @num_qgrps: Number of added queue groups 4111 * @qg_list: list of queue groups to be added 4112 * @buf_size: size of buffer for indirect command 4113 * @cd: pointer to command details structure or NULL 4114 * 4115 * Add Tx LAN queue (0x0C30) 4116 * 4117 * NOTE: 4118 * Prior to calling add Tx LAN queue: 4119 * Initialize the following as part of the Tx queue context: 4120 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4121 * Cache profile and Packet shaper profile. 4122 * 4123 * After add Tx LAN queue AQ command is completed: 4124 * Interrupts should be associated with specific queues, 4125 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4126 * flow. 4127 */ 4128 static int 4129 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4130 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4131 struct ice_sq_cd *cd) 4132 { 4133 struct ice_aqc_add_tx_qgrp *list; 4134 struct ice_aqc_add_txqs *cmd; 4135 struct ice_aq_desc desc; 4136 u16 i, sum_size = 0; 4137 4138 cmd = &desc.params.add_txqs; 4139 4140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4141 4142 if (!qg_list) 4143 return -EINVAL; 4144 4145 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4146 return -EINVAL; 4147 4148 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4149 sum_size += struct_size(list, txqs, list->num_txqs); 4150 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4151 list->num_txqs); 4152 } 4153 4154 if (buf_size != sum_size) 4155 return -EINVAL; 4156 4157 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4158 4159 cmd->num_qgrps = num_qgrps; 4160 4161 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4162 } 4163 4164 /** 4165 * ice_aq_dis_lan_txq 4166 * @hw: pointer to the hardware structure 4167 * @num_qgrps: number of groups in the list 4168 * @qg_list: the list of groups to disable 4169 * @buf_size: the total size of the qg_list buffer in bytes 4170 * @rst_src: if called due to reset, specifies the reset source 4171 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4172 * @cd: pointer to command details structure or NULL 4173 * 4174 * Disable LAN Tx queue (0x0C31) 4175 */ 4176 static int 4177 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4178 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4179 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4180 struct ice_sq_cd *cd) 4181 { 4182 struct ice_aqc_dis_txq_item *item; 4183 struct ice_aqc_dis_txqs *cmd; 4184 struct ice_aq_desc desc; 4185 u16 vmvf_and_timeout; 4186 u16 i, sz = 0; 4187 int status; 4188 4189 cmd = &desc.params.dis_txqs; 4190 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4191 4192 /* qg_list can be NULL only in VM/VF reset flow */ 4193 if (!qg_list && !rst_src) 4194 return -EINVAL; 4195 4196 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4197 return -EINVAL; 4198 4199 cmd->num_entries = num_qgrps; 4200 4201 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4202 4203 switch (rst_src) { 4204 case ICE_VM_RESET: 4205 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4206 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4207 break; 4208 case ICE_VF_RESET: 4209 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4210 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4211 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4212 ICE_AQC_Q_DIS_VMVF_NUM_M; 4213 break; 4214 case ICE_NO_RESET: 4215 default: 4216 break; 4217 } 4218 4219 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4220 4221 /* flush pipe on time out */ 4222 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4223 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4224 if (!qg_list) 4225 goto do_aq; 4226 4227 /* set RD bit to indicate that command buffer is provided by the driver 4228 * and it needs to be read by the firmware 4229 */ 4230 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4231 4232 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4233 u16 item_size = struct_size(item, q_id, item->num_qs); 4234 4235 /* If the num of queues is even, add 2 bytes of padding */ 4236 if ((item->num_qs % 2) == 0) 4237 item_size += 2; 4238 4239 sz += item_size; 4240 4241 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4242 } 4243 4244 if (buf_size != sz) 4245 return -EINVAL; 4246 4247 do_aq: 4248 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4249 if (status) { 4250 if (!qg_list) 4251 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4252 vmvf_num, hw->adminq.sq_last_status); 4253 else 4254 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4255 le16_to_cpu(qg_list[0].q_id[0]), 4256 hw->adminq.sq_last_status); 4257 } 4258 return status; 4259 } 4260 4261 /** 4262 * ice_aq_cfg_lan_txq 4263 * @hw: pointer to the hardware structure 4264 * @buf: buffer for command 4265 * @buf_size: size of buffer in bytes 4266 * @num_qs: number of queues being configured 4267 * @oldport: origination lport 4268 * @newport: destination lport 4269 * @cd: pointer to command details structure or NULL 4270 * 4271 * Move/Configure LAN Tx queue (0x0C32) 4272 * 4273 * There is a better AQ command to use for moving nodes, so only coding 4274 * this one for configuring the node. 4275 */ 4276 int 4277 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4278 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4279 struct ice_sq_cd *cd) 4280 { 4281 struct ice_aqc_cfg_txqs *cmd; 4282 struct ice_aq_desc desc; 4283 int status; 4284 4285 cmd = &desc.params.cfg_txqs; 4286 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4287 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4288 4289 if (!buf) 4290 return -EINVAL; 4291 4292 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4293 cmd->num_qs = num_qs; 4294 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4295 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4296 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4297 cmd->blocked_cgds = 0; 4298 4299 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4300 if (status) 4301 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4302 hw->adminq.sq_last_status); 4303 return status; 4304 } 4305 4306 /** 4307 * ice_aq_add_rdma_qsets 4308 * @hw: pointer to the hardware structure 4309 * @num_qset_grps: Number of RDMA Qset groups 4310 * @qset_list: list of Qset groups to be added 4311 * @buf_size: size of buffer for indirect command 4312 * @cd: pointer to command details structure or NULL 4313 * 4314 * Add Tx RDMA Qsets (0x0C33) 4315 */ 4316 static int 4317 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4318 struct ice_aqc_add_rdma_qset_data *qset_list, 4319 u16 buf_size, struct ice_sq_cd *cd) 4320 { 4321 struct ice_aqc_add_rdma_qset_data *list; 4322 struct ice_aqc_add_rdma_qset *cmd; 4323 struct ice_aq_desc desc; 4324 u16 i, sum_size = 0; 4325 4326 cmd = &desc.params.add_rdma_qset; 4327 4328 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4329 4330 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4331 return -EINVAL; 4332 4333 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4334 u16 num_qsets = le16_to_cpu(list->num_qsets); 4335 4336 sum_size += struct_size(list, rdma_qsets, num_qsets); 4337 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4338 num_qsets); 4339 } 4340 4341 if (buf_size != sum_size) 4342 return -EINVAL; 4343 4344 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4345 4346 cmd->num_qset_grps = num_qset_grps; 4347 4348 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4349 } 4350 4351 /* End of FW Admin Queue command wrappers */ 4352 4353 /** 4354 * ice_pack_ctx_byte - write a byte to a packed context structure 4355 * @src_ctx: unpacked source context structure 4356 * @dest_ctx: packed destination context data 4357 * @ce_info: context element description 4358 */ 4359 static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, 4360 const struct ice_ctx_ele *ce_info) 4361 { 4362 u8 src_byte, dest_byte, mask; 4363 u8 *from, *dest; 4364 u16 shift_width; 4365 4366 /* copy from the next struct field */ 4367 from = src_ctx + ce_info->offset; 4368 4369 /* prepare the bits and mask */ 4370 shift_width = ce_info->lsb % 8; 4371 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4372 4373 src_byte = *from; 4374 src_byte <<= shift_width; 4375 src_byte &= mask; 4376 4377 /* get the current bits from the target bit string */ 4378 dest = dest_ctx + (ce_info->lsb / 8); 4379 4380 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4381 4382 dest_byte &= ~mask; /* get the bits not changing */ 4383 dest_byte |= src_byte; /* add in the new bits */ 4384 4385 /* put it all back */ 4386 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4387 } 4388 4389 /** 4390 * ice_pack_ctx_word - write a word to a packed context structure 4391 * @src_ctx: unpacked source context structure 4392 * @dest_ctx: packed destination context data 4393 * @ce_info: context element description 4394 */ 4395 static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, 4396 const struct ice_ctx_ele *ce_info) 4397 { 4398 u16 src_word, mask; 4399 __le16 dest_word; 4400 u8 *from, *dest; 4401 u16 shift_width; 4402 4403 /* copy from the next struct field */ 4404 from = src_ctx + ce_info->offset; 4405 4406 /* prepare the bits and mask */ 4407 shift_width = ce_info->lsb % 8; 4408 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4409 4410 /* don't swizzle the bits until after the mask because the mask bits 4411 * will be in a different bit position on big endian machines 4412 */ 4413 src_word = *(u16 *)from; 4414 src_word <<= shift_width; 4415 src_word &= mask; 4416 4417 /* get the current bits from the target bit string */ 4418 dest = dest_ctx + (ce_info->lsb / 8); 4419 4420 memcpy(&dest_word, dest, sizeof(dest_word)); 4421 4422 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4423 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4424 4425 /* put it all back */ 4426 memcpy(dest, &dest_word, sizeof(dest_word)); 4427 } 4428 4429 /** 4430 * ice_pack_ctx_dword - write a dword to a packed context structure 4431 * @src_ctx: unpacked source context structure 4432 * @dest_ctx: packed destination context data 4433 * @ce_info: context element description 4434 */ 4435 static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, 4436 const struct ice_ctx_ele *ce_info) 4437 { 4438 u32 src_dword, mask; 4439 __le32 dest_dword; 4440 u8 *from, *dest; 4441 u16 shift_width; 4442 4443 /* copy from the next struct field */ 4444 from = src_ctx + ce_info->offset; 4445 4446 /* prepare the bits and mask */ 4447 shift_width = ce_info->lsb % 8; 4448 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4449 4450 /* don't swizzle the bits until after the mask because the mask bits 4451 * will be in a different bit position on big endian machines 4452 */ 4453 src_dword = *(u32 *)from; 4454 src_dword <<= shift_width; 4455 src_dword &= mask; 4456 4457 /* get the current bits from the target bit string */ 4458 dest = dest_ctx + (ce_info->lsb / 8); 4459 4460 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4461 4462 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4463 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4464 4465 /* put it all back */ 4466 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4467 } 4468 4469 /** 4470 * ice_pack_ctx_qword - write a qword to a packed context structure 4471 * @src_ctx: unpacked source context structure 4472 * @dest_ctx: packed destination context data 4473 * @ce_info: context element description 4474 */ 4475 static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, 4476 const struct ice_ctx_ele *ce_info) 4477 { 4478 u64 src_qword, mask; 4479 __le64 dest_qword; 4480 u8 *from, *dest; 4481 u16 shift_width; 4482 4483 /* copy from the next struct field */ 4484 from = src_ctx + ce_info->offset; 4485 4486 /* prepare the bits and mask */ 4487 shift_width = ce_info->lsb % 8; 4488 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); 4489 4490 /* don't swizzle the bits until after the mask because the mask bits 4491 * will be in a different bit position on big endian machines 4492 */ 4493 src_qword = *(u64 *)from; 4494 src_qword <<= shift_width; 4495 src_qword &= mask; 4496 4497 /* get the current bits from the target bit string */ 4498 dest = dest_ctx + (ce_info->lsb / 8); 4499 4500 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4501 4502 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4503 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4504 4505 /* put it all back */ 4506 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4507 } 4508 4509 /** 4510 * ice_set_ctx - set context bits in packed structure 4511 * @hw: pointer to the hardware structure 4512 * @src_ctx: pointer to a generic non-packed context structure 4513 * @dest_ctx: pointer to memory for the packed structure 4514 * @ce_info: List of Rx context elements 4515 */ 4516 int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4517 const struct ice_ctx_ele *ce_info) 4518 { 4519 int f; 4520 4521 for (f = 0; ce_info[f].width; f++) { 4522 /* We have to deal with each element of the FW response 4523 * using the correct size so that we are correct regardless 4524 * of the endianness of the machine. 4525 */ 4526 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4527 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4528 f, ce_info[f].width, ce_info[f].size_of); 4529 continue; 4530 } 4531 switch (ce_info[f].size_of) { 4532 case sizeof(u8): 4533 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); 4534 break; 4535 case sizeof(u16): 4536 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); 4537 break; 4538 case sizeof(u32): 4539 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); 4540 break; 4541 case sizeof(u64): 4542 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); 4543 break; 4544 default: 4545 return -EINVAL; 4546 } 4547 } 4548 4549 return 0; 4550 } 4551 4552 /** 4553 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4554 * @hw: pointer to the HW struct 4555 * @vsi_handle: software VSI handle 4556 * @tc: TC number 4557 * @q_handle: software queue handle 4558 */ 4559 struct ice_q_ctx * 4560 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4561 { 4562 struct ice_vsi_ctx *vsi; 4563 struct ice_q_ctx *q_ctx; 4564 4565 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4566 if (!vsi) 4567 return NULL; 4568 if (q_handle >= vsi->num_lan_q_entries[tc]) 4569 return NULL; 4570 if (!vsi->lan_q_ctx[tc]) 4571 return NULL; 4572 q_ctx = vsi->lan_q_ctx[tc]; 4573 return &q_ctx[q_handle]; 4574 } 4575 4576 /** 4577 * ice_ena_vsi_txq 4578 * @pi: port information structure 4579 * @vsi_handle: software VSI handle 4580 * @tc: TC number 4581 * @q_handle: software queue handle 4582 * @num_qgrps: Number of added queue groups 4583 * @buf: list of queue groups to be added 4584 * @buf_size: size of buffer for indirect command 4585 * @cd: pointer to command details structure or NULL 4586 * 4587 * This function adds one LAN queue 4588 */ 4589 int 4590 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4591 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4592 struct ice_sq_cd *cd) 4593 { 4594 struct ice_aqc_txsched_elem_data node = { 0 }; 4595 struct ice_sched_node *parent; 4596 struct ice_q_ctx *q_ctx; 4597 struct ice_hw *hw; 4598 int status; 4599 4600 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4601 return -EIO; 4602 4603 if (num_qgrps > 1 || buf->num_txqs > 1) 4604 return -ENOSPC; 4605 4606 hw = pi->hw; 4607 4608 if (!ice_is_vsi_valid(hw, vsi_handle)) 4609 return -EINVAL; 4610 4611 mutex_lock(&pi->sched_lock); 4612 4613 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4614 if (!q_ctx) { 4615 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4616 q_handle); 4617 status = -EINVAL; 4618 goto ena_txq_exit; 4619 } 4620 4621 /* find a parent node */ 4622 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4623 ICE_SCHED_NODE_OWNER_LAN); 4624 if (!parent) { 4625 status = -EINVAL; 4626 goto ena_txq_exit; 4627 } 4628 4629 buf->parent_teid = parent->info.node_teid; 4630 node.parent_teid = parent->info.node_teid; 4631 /* Mark that the values in the "generic" section as valid. The default 4632 * value in the "generic" section is zero. This means that : 4633 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4634 * - 0 priority among siblings, indicated by Bit 1-3. 4635 * - WFQ, indicated by Bit 4. 4636 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4637 * Bit 5-6. 4638 * - Bit 7 is reserved. 4639 * Without setting the generic section as valid in valid_sections, the 4640 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4641 */ 4642 buf->txqs[0].info.valid_sections = 4643 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4644 ICE_AQC_ELEM_VALID_EIR; 4645 buf->txqs[0].info.generic = 0; 4646 buf->txqs[0].info.cir_bw.bw_profile_idx = 4647 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4648 buf->txqs[0].info.cir_bw.bw_alloc = 4649 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4650 buf->txqs[0].info.eir_bw.bw_profile_idx = 4651 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4652 buf->txqs[0].info.eir_bw.bw_alloc = 4653 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4654 4655 /* add the LAN queue */ 4656 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4657 if (status) { 4658 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4659 le16_to_cpu(buf->txqs[0].txq_id), 4660 hw->adminq.sq_last_status); 4661 goto ena_txq_exit; 4662 } 4663 4664 node.node_teid = buf->txqs[0].q_teid; 4665 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4666 q_ctx->q_handle = q_handle; 4667 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4668 4669 /* add a leaf node into scheduler tree queue layer */ 4670 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4671 if (!status) 4672 status = ice_sched_replay_q_bw(pi, q_ctx); 4673 4674 ena_txq_exit: 4675 mutex_unlock(&pi->sched_lock); 4676 return status; 4677 } 4678 4679 /** 4680 * ice_dis_vsi_txq 4681 * @pi: port information structure 4682 * @vsi_handle: software VSI handle 4683 * @tc: TC number 4684 * @num_queues: number of queues 4685 * @q_handles: pointer to software queue handle array 4686 * @q_ids: pointer to the q_id array 4687 * @q_teids: pointer to queue node teids 4688 * @rst_src: if called due to reset, specifies the reset source 4689 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4690 * @cd: pointer to command details structure or NULL 4691 * 4692 * This function removes queues and their corresponding nodes in SW DB 4693 */ 4694 int 4695 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4696 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4697 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4698 struct ice_sq_cd *cd) 4699 { 4700 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4701 u16 i, buf_size = __struct_size(qg_list); 4702 struct ice_q_ctx *q_ctx; 4703 int status = -ENOENT; 4704 struct ice_hw *hw; 4705 4706 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4707 return -EIO; 4708 4709 hw = pi->hw; 4710 4711 if (!num_queues) { 4712 /* if queue is disabled already yet the disable queue command 4713 * has to be sent to complete the VF reset, then call 4714 * ice_aq_dis_lan_txq without any queue information 4715 */ 4716 if (rst_src) 4717 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4718 vmvf_num, NULL); 4719 return -EIO; 4720 } 4721 4722 mutex_lock(&pi->sched_lock); 4723 4724 for (i = 0; i < num_queues; i++) { 4725 struct ice_sched_node *node; 4726 4727 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4728 if (!node) 4729 continue; 4730 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4731 if (!q_ctx) { 4732 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4733 q_handles[i]); 4734 continue; 4735 } 4736 if (q_ctx->q_handle != q_handles[i]) { 4737 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4738 q_ctx->q_handle, q_handles[i]); 4739 continue; 4740 } 4741 qg_list->parent_teid = node->info.parent_teid; 4742 qg_list->num_qs = 1; 4743 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4744 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4745 vmvf_num, cd); 4746 4747 if (status) 4748 break; 4749 ice_free_sched_node(pi, node); 4750 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4751 q_ctx->q_teid = ICE_INVAL_TEID; 4752 } 4753 mutex_unlock(&pi->sched_lock); 4754 return status; 4755 } 4756 4757 /** 4758 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4759 * @pi: port information structure 4760 * @vsi_handle: software VSI handle 4761 * @tc_bitmap: TC bitmap 4762 * @maxqs: max queues array per TC 4763 * @owner: LAN or RDMA 4764 * 4765 * This function adds/updates the VSI queues per TC. 4766 */ 4767 static int 4768 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4769 u16 *maxqs, u8 owner) 4770 { 4771 int status = 0; 4772 u8 i; 4773 4774 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4775 return -EIO; 4776 4777 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4778 return -EINVAL; 4779 4780 mutex_lock(&pi->sched_lock); 4781 4782 ice_for_each_traffic_class(i) { 4783 /* configuration is possible only if TC node is present */ 4784 if (!ice_sched_get_tc_node(pi, i)) 4785 continue; 4786 4787 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4788 ice_is_tc_ena(tc_bitmap, i)); 4789 if (status) 4790 break; 4791 } 4792 4793 mutex_unlock(&pi->sched_lock); 4794 return status; 4795 } 4796 4797 /** 4798 * ice_cfg_vsi_lan - configure VSI LAN queues 4799 * @pi: port information structure 4800 * @vsi_handle: software VSI handle 4801 * @tc_bitmap: TC bitmap 4802 * @max_lanqs: max LAN queues array per TC 4803 * 4804 * This function adds/updates the VSI LAN queues per TC. 4805 */ 4806 int 4807 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4808 u16 *max_lanqs) 4809 { 4810 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4811 ICE_SCHED_NODE_OWNER_LAN); 4812 } 4813 4814 /** 4815 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4816 * @pi: port information structure 4817 * @vsi_handle: software VSI handle 4818 * @tc_bitmap: TC bitmap 4819 * @max_rdmaqs: max RDMA queues array per TC 4820 * 4821 * This function adds/updates the VSI RDMA queues per TC. 4822 */ 4823 int 4824 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4825 u16 *max_rdmaqs) 4826 { 4827 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4828 ICE_SCHED_NODE_OWNER_RDMA); 4829 } 4830 4831 /** 4832 * ice_ena_vsi_rdma_qset 4833 * @pi: port information structure 4834 * @vsi_handle: software VSI handle 4835 * @tc: TC number 4836 * @rdma_qset: pointer to RDMA Qset 4837 * @num_qsets: number of RDMA Qsets 4838 * @qset_teid: pointer to Qset node TEIDs 4839 * 4840 * This function adds RDMA Qset 4841 */ 4842 int 4843 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4844 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4845 { 4846 struct ice_aqc_txsched_elem_data node = { 0 }; 4847 struct ice_aqc_add_rdma_qset_data *buf; 4848 struct ice_sched_node *parent; 4849 struct ice_hw *hw; 4850 u16 i, buf_size; 4851 int ret; 4852 4853 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4854 return -EIO; 4855 hw = pi->hw; 4856 4857 if (!ice_is_vsi_valid(hw, vsi_handle)) 4858 return -EINVAL; 4859 4860 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4861 buf = kzalloc(buf_size, GFP_KERNEL); 4862 if (!buf) 4863 return -ENOMEM; 4864 mutex_lock(&pi->sched_lock); 4865 4866 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4867 ICE_SCHED_NODE_OWNER_RDMA); 4868 if (!parent) { 4869 ret = -EINVAL; 4870 goto rdma_error_exit; 4871 } 4872 buf->parent_teid = parent->info.node_teid; 4873 node.parent_teid = parent->info.node_teid; 4874 4875 buf->num_qsets = cpu_to_le16(num_qsets); 4876 for (i = 0; i < num_qsets; i++) { 4877 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4878 buf->rdma_qsets[i].info.valid_sections = 4879 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4880 ICE_AQC_ELEM_VALID_EIR; 4881 buf->rdma_qsets[i].info.generic = 0; 4882 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4883 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4884 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4885 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4886 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4887 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4888 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4889 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4890 } 4891 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4892 if (ret) { 4893 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4894 goto rdma_error_exit; 4895 } 4896 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4897 for (i = 0; i < num_qsets; i++) { 4898 node.node_teid = buf->rdma_qsets[i].qset_teid; 4899 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4900 &node, NULL); 4901 if (ret) 4902 break; 4903 qset_teid[i] = le32_to_cpu(node.node_teid); 4904 } 4905 rdma_error_exit: 4906 mutex_unlock(&pi->sched_lock); 4907 kfree(buf); 4908 return ret; 4909 } 4910 4911 /** 4912 * ice_dis_vsi_rdma_qset - free RDMA resources 4913 * @pi: port_info struct 4914 * @count: number of RDMA Qsets to free 4915 * @qset_teid: TEID of Qset node 4916 * @q_id: list of queue IDs being disabled 4917 */ 4918 int 4919 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4920 u16 *q_id) 4921 { 4922 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4923 u16 qg_size = __struct_size(qg_list); 4924 struct ice_hw *hw; 4925 int status = 0; 4926 int i; 4927 4928 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4929 return -EIO; 4930 4931 hw = pi->hw; 4932 4933 mutex_lock(&pi->sched_lock); 4934 4935 for (i = 0; i < count; i++) { 4936 struct ice_sched_node *node; 4937 4938 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4939 if (!node) 4940 continue; 4941 4942 qg_list->parent_teid = node->info.parent_teid; 4943 qg_list->num_qs = 1; 4944 qg_list->q_id[0] = 4945 cpu_to_le16(q_id[i] | 4946 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4947 4948 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4949 ICE_NO_RESET, 0, NULL); 4950 if (status) 4951 break; 4952 4953 ice_free_sched_node(pi, node); 4954 } 4955 4956 mutex_unlock(&pi->sched_lock); 4957 return status; 4958 } 4959 4960 /** 4961 * ice_aq_get_cgu_abilities - get cgu abilities 4962 * @hw: pointer to the HW struct 4963 * @abilities: CGU abilities 4964 * 4965 * Get CGU abilities (0x0C61) 4966 * Return: 0 on success or negative value on failure. 4967 */ 4968 int 4969 ice_aq_get_cgu_abilities(struct ice_hw *hw, 4970 struct ice_aqc_get_cgu_abilities *abilities) 4971 { 4972 struct ice_aq_desc desc; 4973 4974 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 4975 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 4976 } 4977 4978 /** 4979 * ice_aq_set_input_pin_cfg - set input pin config 4980 * @hw: pointer to the HW struct 4981 * @input_idx: Input index 4982 * @flags1: Input flags 4983 * @flags2: Input flags 4984 * @freq: Frequency in Hz 4985 * @phase_delay: Delay in ps 4986 * 4987 * Set CGU input config (0x0C62) 4988 * Return: 0 on success or negative value on failure. 4989 */ 4990 int 4991 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 4992 u32 freq, s32 phase_delay) 4993 { 4994 struct ice_aqc_set_cgu_input_config *cmd; 4995 struct ice_aq_desc desc; 4996 4997 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 4998 cmd = &desc.params.set_cgu_input_config; 4999 cmd->input_idx = input_idx; 5000 cmd->flags1 = flags1; 5001 cmd->flags2 = flags2; 5002 cmd->freq = cpu_to_le32(freq); 5003 cmd->phase_delay = cpu_to_le32(phase_delay); 5004 5005 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5006 } 5007 5008 /** 5009 * ice_aq_get_input_pin_cfg - get input pin config 5010 * @hw: pointer to the HW struct 5011 * @input_idx: Input index 5012 * @status: Pin status 5013 * @type: Pin type 5014 * @flags1: Input flags 5015 * @flags2: Input flags 5016 * @freq: Frequency in Hz 5017 * @phase_delay: Delay in ps 5018 * 5019 * Get CGU input config (0x0C63) 5020 * Return: 0 on success or negative value on failure. 5021 */ 5022 int 5023 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5024 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5025 { 5026 struct ice_aqc_get_cgu_input_config *cmd; 5027 struct ice_aq_desc desc; 5028 int ret; 5029 5030 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5031 cmd = &desc.params.get_cgu_input_config; 5032 cmd->input_idx = input_idx; 5033 5034 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5035 if (!ret) { 5036 if (status) 5037 *status = cmd->status; 5038 if (type) 5039 *type = cmd->type; 5040 if (flags1) 5041 *flags1 = cmd->flags1; 5042 if (flags2) 5043 *flags2 = cmd->flags2; 5044 if (freq) 5045 *freq = le32_to_cpu(cmd->freq); 5046 if (phase_delay) 5047 *phase_delay = le32_to_cpu(cmd->phase_delay); 5048 } 5049 5050 return ret; 5051 } 5052 5053 /** 5054 * ice_aq_set_output_pin_cfg - set output pin config 5055 * @hw: pointer to the HW struct 5056 * @output_idx: Output index 5057 * @flags: Output flags 5058 * @src_sel: Index of DPLL block 5059 * @freq: Output frequency 5060 * @phase_delay: Output phase compensation 5061 * 5062 * Set CGU output config (0x0C64) 5063 * Return: 0 on success or negative value on failure. 5064 */ 5065 int 5066 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5067 u8 src_sel, u32 freq, s32 phase_delay) 5068 { 5069 struct ice_aqc_set_cgu_output_config *cmd; 5070 struct ice_aq_desc desc; 5071 5072 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5073 cmd = &desc.params.set_cgu_output_config; 5074 cmd->output_idx = output_idx; 5075 cmd->flags = flags; 5076 cmd->src_sel = src_sel; 5077 cmd->freq = cpu_to_le32(freq); 5078 cmd->phase_delay = cpu_to_le32(phase_delay); 5079 5080 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5081 } 5082 5083 /** 5084 * ice_aq_get_output_pin_cfg - get output pin config 5085 * @hw: pointer to the HW struct 5086 * @output_idx: Output index 5087 * @flags: Output flags 5088 * @src_sel: Internal DPLL source 5089 * @freq: Output frequency 5090 * @src_freq: Source frequency 5091 * 5092 * Get CGU output config (0x0C65) 5093 * Return: 0 on success or negative value on failure. 5094 */ 5095 int 5096 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5097 u8 *src_sel, u32 *freq, u32 *src_freq) 5098 { 5099 struct ice_aqc_get_cgu_output_config *cmd; 5100 struct ice_aq_desc desc; 5101 int ret; 5102 5103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5104 cmd = &desc.params.get_cgu_output_config; 5105 cmd->output_idx = output_idx; 5106 5107 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5108 if (!ret) { 5109 if (flags) 5110 *flags = cmd->flags; 5111 if (src_sel) 5112 *src_sel = cmd->src_sel; 5113 if (freq) 5114 *freq = le32_to_cpu(cmd->freq); 5115 if (src_freq) 5116 *src_freq = le32_to_cpu(cmd->src_freq); 5117 } 5118 5119 return ret; 5120 } 5121 5122 /** 5123 * ice_aq_get_cgu_dpll_status - get dpll status 5124 * @hw: pointer to the HW struct 5125 * @dpll_num: DPLL index 5126 * @ref_state: Reference clock state 5127 * @config: current DPLL config 5128 * @dpll_state: current DPLL state 5129 * @phase_offset: Phase offset in ns 5130 * @eec_mode: EEC_mode 5131 * 5132 * Get CGU DPLL status (0x0C66) 5133 * Return: 0 on success or negative value on failure. 5134 */ 5135 int 5136 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5137 u8 *dpll_state, u8 *config, s64 *phase_offset, 5138 u8 *eec_mode) 5139 { 5140 struct ice_aqc_get_cgu_dpll_status *cmd; 5141 struct ice_aq_desc desc; 5142 int status; 5143 5144 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5145 cmd = &desc.params.get_cgu_dpll_status; 5146 cmd->dpll_num = dpll_num; 5147 5148 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5149 if (!status) { 5150 *ref_state = cmd->ref_state; 5151 *dpll_state = cmd->dpll_state; 5152 *config = cmd->config; 5153 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5154 *phase_offset <<= 32; 5155 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5156 *phase_offset = sign_extend64(*phase_offset, 47); 5157 *eec_mode = cmd->eec_mode; 5158 } 5159 5160 return status; 5161 } 5162 5163 /** 5164 * ice_aq_set_cgu_dpll_config - set dpll config 5165 * @hw: pointer to the HW struct 5166 * @dpll_num: DPLL index 5167 * @ref_state: Reference clock state 5168 * @config: DPLL config 5169 * @eec_mode: EEC mode 5170 * 5171 * Set CGU DPLL config (0x0C67) 5172 * Return: 0 on success or negative value on failure. 5173 */ 5174 int 5175 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5176 u8 config, u8 eec_mode) 5177 { 5178 struct ice_aqc_set_cgu_dpll_config *cmd; 5179 struct ice_aq_desc desc; 5180 5181 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5182 cmd = &desc.params.set_cgu_dpll_config; 5183 cmd->dpll_num = dpll_num; 5184 cmd->ref_state = ref_state; 5185 cmd->config = config; 5186 cmd->eec_mode = eec_mode; 5187 5188 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5189 } 5190 5191 /** 5192 * ice_aq_set_cgu_ref_prio - set input reference priority 5193 * @hw: pointer to the HW struct 5194 * @dpll_num: DPLL index 5195 * @ref_idx: Reference pin index 5196 * @ref_priority: Reference input priority 5197 * 5198 * Set CGU reference priority (0x0C68) 5199 * Return: 0 on success or negative value on failure. 5200 */ 5201 int 5202 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5203 u8 ref_priority) 5204 { 5205 struct ice_aqc_set_cgu_ref_prio *cmd; 5206 struct ice_aq_desc desc; 5207 5208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5209 cmd = &desc.params.set_cgu_ref_prio; 5210 cmd->dpll_num = dpll_num; 5211 cmd->ref_idx = ref_idx; 5212 cmd->ref_priority = ref_priority; 5213 5214 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5215 } 5216 5217 /** 5218 * ice_aq_get_cgu_ref_prio - get input reference priority 5219 * @hw: pointer to the HW struct 5220 * @dpll_num: DPLL index 5221 * @ref_idx: Reference pin index 5222 * @ref_prio: Reference input priority 5223 * 5224 * Get CGU reference priority (0x0C69) 5225 * Return: 0 on success or negative value on failure. 5226 */ 5227 int 5228 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5229 u8 *ref_prio) 5230 { 5231 struct ice_aqc_get_cgu_ref_prio *cmd; 5232 struct ice_aq_desc desc; 5233 int status; 5234 5235 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5236 cmd = &desc.params.get_cgu_ref_prio; 5237 cmd->dpll_num = dpll_num; 5238 cmd->ref_idx = ref_idx; 5239 5240 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5241 if (!status) 5242 *ref_prio = cmd->ref_priority; 5243 5244 return status; 5245 } 5246 5247 /** 5248 * ice_aq_get_cgu_info - get cgu info 5249 * @hw: pointer to the HW struct 5250 * @cgu_id: CGU ID 5251 * @cgu_cfg_ver: CGU config version 5252 * @cgu_fw_ver: CGU firmware version 5253 * 5254 * Get CGU info (0x0C6A) 5255 * Return: 0 on success or negative value on failure. 5256 */ 5257 int 5258 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5259 u32 *cgu_fw_ver) 5260 { 5261 struct ice_aqc_get_cgu_info *cmd; 5262 struct ice_aq_desc desc; 5263 int status; 5264 5265 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5266 cmd = &desc.params.get_cgu_info; 5267 5268 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5269 if (!status) { 5270 *cgu_id = le32_to_cpu(cmd->cgu_id); 5271 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5272 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5273 } 5274 5275 return status; 5276 } 5277 5278 /** 5279 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5280 * @hw: pointer to the HW struct 5281 * @phy_output: PHY reference clock output pin 5282 * @enable: GPIO state to be applied 5283 * @freq: PHY output frequency 5284 * 5285 * Set phy recovered clock as reference (0x0630) 5286 * Return: 0 on success or negative value on failure. 5287 */ 5288 int 5289 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5290 u32 *freq) 5291 { 5292 struct ice_aqc_set_phy_rec_clk_out *cmd; 5293 struct ice_aq_desc desc; 5294 int status; 5295 5296 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5297 cmd = &desc.params.set_phy_rec_clk_out; 5298 cmd->phy_output = phy_output; 5299 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5300 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5301 cmd->freq = cpu_to_le32(*freq); 5302 5303 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5304 if (!status) 5305 *freq = le32_to_cpu(cmd->freq); 5306 5307 return status; 5308 } 5309 5310 /** 5311 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5312 * @hw: pointer to the HW struct 5313 * @phy_output: PHY reference clock output pin 5314 * @port_num: Port number 5315 * @flags: PHY flags 5316 * @node_handle: PHY output frequency 5317 * 5318 * Get PHY recovered clock output info (0x0631) 5319 * Return: 0 on success or negative value on failure. 5320 */ 5321 int 5322 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5323 u8 *flags, u16 *node_handle) 5324 { 5325 struct ice_aqc_get_phy_rec_clk_out *cmd; 5326 struct ice_aq_desc desc; 5327 int status; 5328 5329 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5330 cmd = &desc.params.get_phy_rec_clk_out; 5331 cmd->phy_output = *phy_output; 5332 5333 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5334 if (!status) { 5335 *phy_output = cmd->phy_output; 5336 if (port_num) 5337 *port_num = cmd->port_num; 5338 if (flags) 5339 *flags = cmd->flags; 5340 if (node_handle) 5341 *node_handle = le16_to_cpu(cmd->node_handle); 5342 } 5343 5344 return status; 5345 } 5346 5347 /** 5348 * ice_aq_get_sensor_reading 5349 * @hw: pointer to the HW struct 5350 * @data: pointer to data to be read from the sensor 5351 * 5352 * Get sensor reading (0x0632) 5353 */ 5354 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5355 struct ice_aqc_get_sensor_reading_resp *data) 5356 { 5357 struct ice_aqc_get_sensor_reading *cmd; 5358 struct ice_aq_desc desc; 5359 int status; 5360 5361 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5362 cmd = &desc.params.get_sensor_reading; 5363 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5364 #define ICE_INTERNAL_TEMP_SENSOR 0 5365 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5366 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5367 5368 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5369 if (!status) 5370 memcpy(data, &desc.params.get_sensor_reading_resp, 5371 sizeof(*data)); 5372 5373 return status; 5374 } 5375 5376 /** 5377 * ice_replay_pre_init - replay pre initialization 5378 * @hw: pointer to the HW struct 5379 * 5380 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5381 */ 5382 static int ice_replay_pre_init(struct ice_hw *hw) 5383 { 5384 struct ice_switch_info *sw = hw->switch_info; 5385 u8 i; 5386 5387 /* Delete old entries from replay filter list head if there is any */ 5388 ice_rm_all_sw_replay_rule_info(hw); 5389 /* In start of replay, move entries into replay_rules list, it 5390 * will allow adding rules entries back to filt_rules list, 5391 * which is operational list. 5392 */ 5393 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5394 list_replace_init(&sw->recp_list[i].filt_rules, 5395 &sw->recp_list[i].filt_replay_rules); 5396 ice_sched_replay_agg_vsi_preinit(hw); 5397 5398 return 0; 5399 } 5400 5401 /** 5402 * ice_replay_vsi - replay VSI configuration 5403 * @hw: pointer to the HW struct 5404 * @vsi_handle: driver VSI handle 5405 * 5406 * Restore all VSI configuration after reset. It is required to call this 5407 * function with main VSI first. 5408 */ 5409 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5410 { 5411 int status; 5412 5413 if (!ice_is_vsi_valid(hw, vsi_handle)) 5414 return -EINVAL; 5415 5416 /* Replay pre-initialization if there is any */ 5417 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5418 status = ice_replay_pre_init(hw); 5419 if (status) 5420 return status; 5421 } 5422 /* Replay per VSI all RSS configurations */ 5423 status = ice_replay_rss_cfg(hw, vsi_handle); 5424 if (status) 5425 return status; 5426 /* Replay per VSI all filters */ 5427 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5428 if (!status) 5429 status = ice_replay_vsi_agg(hw, vsi_handle); 5430 return status; 5431 } 5432 5433 /** 5434 * ice_replay_post - post replay configuration cleanup 5435 * @hw: pointer to the HW struct 5436 * 5437 * Post replay cleanup. 5438 */ 5439 void ice_replay_post(struct ice_hw *hw) 5440 { 5441 /* Delete old entries from replay filter list head */ 5442 ice_rm_all_sw_replay_rule_info(hw); 5443 ice_sched_replay_agg(hw); 5444 } 5445 5446 /** 5447 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5448 * @hw: ptr to the hardware info 5449 * @reg: offset of 64 bit HW register to read from 5450 * @prev_stat_loaded: bool to specify if previous stats are loaded 5451 * @prev_stat: ptr to previous loaded stat value 5452 * @cur_stat: ptr to current stat value 5453 */ 5454 void 5455 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5456 u64 *prev_stat, u64 *cur_stat) 5457 { 5458 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5459 5460 /* device stats are not reset at PFR, they likely will not be zeroed 5461 * when the driver starts. Thus, save the value from the first read 5462 * without adding to the statistic value so that we report stats which 5463 * count up from zero. 5464 */ 5465 if (!prev_stat_loaded) { 5466 *prev_stat = new_data; 5467 return; 5468 } 5469 5470 /* Calculate the difference between the new and old values, and then 5471 * add it to the software stat value. 5472 */ 5473 if (new_data >= *prev_stat) 5474 *cur_stat += new_data - *prev_stat; 5475 else 5476 /* to manage the potential roll-over */ 5477 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5478 5479 /* Update the previously stored value to prepare for next read */ 5480 *prev_stat = new_data; 5481 } 5482 5483 /** 5484 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5485 * @hw: ptr to the hardware info 5486 * @reg: offset of HW register to read from 5487 * @prev_stat_loaded: bool to specify if previous stats are loaded 5488 * @prev_stat: ptr to previous loaded stat value 5489 * @cur_stat: ptr to current stat value 5490 */ 5491 void 5492 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5493 u64 *prev_stat, u64 *cur_stat) 5494 { 5495 u32 new_data; 5496 5497 new_data = rd32(hw, reg); 5498 5499 /* device stats are not reset at PFR, they likely will not be zeroed 5500 * when the driver starts. Thus, save the value from the first read 5501 * without adding to the statistic value so that we report stats which 5502 * count up from zero. 5503 */ 5504 if (!prev_stat_loaded) { 5505 *prev_stat = new_data; 5506 return; 5507 } 5508 5509 /* Calculate the difference between the new and old values, and then 5510 * add it to the software stat value. 5511 */ 5512 if (new_data >= *prev_stat) 5513 *cur_stat += new_data - *prev_stat; 5514 else 5515 /* to manage the potential roll-over */ 5516 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5517 5518 /* Update the previously stored value to prepare for next read */ 5519 *prev_stat = new_data; 5520 } 5521 5522 /** 5523 * ice_sched_query_elem - query element information from HW 5524 * @hw: pointer to the HW struct 5525 * @node_teid: node TEID to be queried 5526 * @buf: buffer to element information 5527 * 5528 * This function queries HW element information 5529 */ 5530 int 5531 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5532 struct ice_aqc_txsched_elem_data *buf) 5533 { 5534 u16 buf_size, num_elem_ret = 0; 5535 int status; 5536 5537 buf_size = sizeof(*buf); 5538 memset(buf, 0, buf_size); 5539 buf->node_teid = cpu_to_le32(node_teid); 5540 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5541 NULL); 5542 if (status || num_elem_ret != 1) 5543 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5544 return status; 5545 } 5546 5547 /** 5548 * ice_aq_read_i2c 5549 * @hw: pointer to the hw struct 5550 * @topo_addr: topology address for a device to communicate with 5551 * @bus_addr: 7-bit I2C bus address 5552 * @addr: I2C memory address (I2C offset) with up to 16 bits 5553 * @params: I2C parameters: bit [7] - Repeated start, 5554 * bits [6:5] data offset size, 5555 * bit [4] - I2C address type, 5556 * bits [3:0] - data size to read (0-16 bytes) 5557 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5558 * @cd: pointer to command details structure or NULL 5559 * 5560 * Read I2C (0x06E2) 5561 */ 5562 int 5563 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5564 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5565 struct ice_sq_cd *cd) 5566 { 5567 struct ice_aq_desc desc = { 0 }; 5568 struct ice_aqc_i2c *cmd; 5569 u8 data_size; 5570 int status; 5571 5572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5573 cmd = &desc.params.read_write_i2c; 5574 5575 if (!data) 5576 return -EINVAL; 5577 5578 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5579 5580 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5581 cmd->topo_addr = topo_addr; 5582 cmd->i2c_params = params; 5583 cmd->i2c_addr = addr; 5584 5585 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5586 if (!status) { 5587 struct ice_aqc_read_i2c_resp *resp; 5588 u8 i; 5589 5590 resp = &desc.params.read_i2c_resp; 5591 for (i = 0; i < data_size; i++) { 5592 *data = resp->i2c_data[i]; 5593 data++; 5594 } 5595 } 5596 5597 return status; 5598 } 5599 5600 /** 5601 * ice_aq_write_i2c 5602 * @hw: pointer to the hw struct 5603 * @topo_addr: topology address for a device to communicate with 5604 * @bus_addr: 7-bit I2C bus address 5605 * @addr: I2C memory address (I2C offset) with up to 16 bits 5606 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5607 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5608 * @cd: pointer to command details structure or NULL 5609 * 5610 * Write I2C (0x06E3) 5611 * 5612 * * Return: 5613 * * 0 - Successful write to the i2c device 5614 * * -EINVAL - Data size greater than 4 bytes 5615 * * -EIO - FW error 5616 */ 5617 int 5618 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5619 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5620 struct ice_sq_cd *cd) 5621 { 5622 struct ice_aq_desc desc = { 0 }; 5623 struct ice_aqc_i2c *cmd; 5624 u8 data_size; 5625 5626 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5627 cmd = &desc.params.read_write_i2c; 5628 5629 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5630 5631 /* data_size limited to 4 */ 5632 if (data_size > 4) 5633 return -EINVAL; 5634 5635 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5636 cmd->topo_addr = topo_addr; 5637 cmd->i2c_params = params; 5638 cmd->i2c_addr = addr; 5639 5640 memcpy(cmd->i2c_data, data, data_size); 5641 5642 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5643 } 5644 5645 /** 5646 * ice_aq_set_gpio 5647 * @hw: pointer to the hw struct 5648 * @gpio_ctrl_handle: GPIO controller node handle 5649 * @pin_idx: IO Number of the GPIO that needs to be set 5650 * @value: SW provide IO value to set in the LSB 5651 * @cd: pointer to command details structure or NULL 5652 * 5653 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5654 */ 5655 int 5656 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5657 struct ice_sq_cd *cd) 5658 { 5659 struct ice_aqc_gpio *cmd; 5660 struct ice_aq_desc desc; 5661 5662 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5663 cmd = &desc.params.read_write_gpio; 5664 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5665 cmd->gpio_num = pin_idx; 5666 cmd->gpio_val = value ? 1 : 0; 5667 5668 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5669 } 5670 5671 /** 5672 * ice_aq_get_gpio 5673 * @hw: pointer to the hw struct 5674 * @gpio_ctrl_handle: GPIO controller node handle 5675 * @pin_idx: IO Number of the GPIO that needs to be set 5676 * @value: IO value read 5677 * @cd: pointer to command details structure or NULL 5678 * 5679 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5680 * the topology 5681 */ 5682 int 5683 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5684 bool *value, struct ice_sq_cd *cd) 5685 { 5686 struct ice_aqc_gpio *cmd; 5687 struct ice_aq_desc desc; 5688 int status; 5689 5690 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5691 cmd = &desc.params.read_write_gpio; 5692 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5693 cmd->gpio_num = pin_idx; 5694 5695 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5696 if (status) 5697 return status; 5698 5699 *value = !!cmd->gpio_val; 5700 return 0; 5701 } 5702 5703 /** 5704 * ice_is_fw_api_min_ver 5705 * @hw: pointer to the hardware structure 5706 * @maj: major version 5707 * @min: minor version 5708 * @patch: patch version 5709 * 5710 * Checks if the firmware API is minimum version 5711 */ 5712 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5713 { 5714 if (hw->api_maj_ver == maj) { 5715 if (hw->api_min_ver > min) 5716 return true; 5717 if (hw->api_min_ver == min && hw->api_patch >= patch) 5718 return true; 5719 } else if (hw->api_maj_ver > maj) { 5720 return true; 5721 } 5722 5723 return false; 5724 } 5725 5726 /** 5727 * ice_fw_supports_link_override 5728 * @hw: pointer to the hardware structure 5729 * 5730 * Checks if the firmware supports link override 5731 */ 5732 bool ice_fw_supports_link_override(struct ice_hw *hw) 5733 { 5734 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5735 ICE_FW_API_LINK_OVERRIDE_MIN, 5736 ICE_FW_API_LINK_OVERRIDE_PATCH); 5737 } 5738 5739 /** 5740 * ice_get_link_default_override 5741 * @ldo: pointer to the link default override struct 5742 * @pi: pointer to the port info struct 5743 * 5744 * Gets the link default override for a port 5745 */ 5746 int 5747 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5748 struct ice_port_info *pi) 5749 { 5750 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5751 struct ice_hw *hw = pi->hw; 5752 int status; 5753 5754 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5755 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5756 if (status) { 5757 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5758 return status; 5759 } 5760 5761 /* Each port has its own config; calculate for our port */ 5762 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5763 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5764 5765 /* link options first */ 5766 status = ice_read_sr_word(hw, tlv_start, &buf); 5767 if (status) { 5768 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5769 return status; 5770 } 5771 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5772 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5773 ICE_LINK_OVERRIDE_PHY_CFG_S; 5774 5775 /* link PHY config */ 5776 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5777 status = ice_read_sr_word(hw, offset, &buf); 5778 if (status) { 5779 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5780 return status; 5781 } 5782 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5783 5784 /* PHY types low */ 5785 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5786 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5787 status = ice_read_sr_word(hw, (offset + i), &buf); 5788 if (status) { 5789 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5790 return status; 5791 } 5792 /* shift 16 bits at a time to fill 64 bits */ 5793 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5794 } 5795 5796 /* PHY types high */ 5797 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5798 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5799 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5800 status = ice_read_sr_word(hw, (offset + i), &buf); 5801 if (status) { 5802 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5803 return status; 5804 } 5805 /* shift 16 bits at a time to fill 64 bits */ 5806 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5807 } 5808 5809 return status; 5810 } 5811 5812 /** 5813 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5814 * @caps: get PHY capability data 5815 */ 5816 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5817 { 5818 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5819 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5820 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5821 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5822 return true; 5823 5824 return false; 5825 } 5826 5827 /** 5828 * ice_aq_set_lldp_mib - Set the LLDP MIB 5829 * @hw: pointer to the HW struct 5830 * @mib_type: Local, Remote or both Local and Remote MIBs 5831 * @buf: pointer to the caller-supplied buffer to store the MIB block 5832 * @buf_size: size of the buffer (in bytes) 5833 * @cd: pointer to command details structure or NULL 5834 * 5835 * Set the LLDP MIB. (0x0A08) 5836 */ 5837 int 5838 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5839 struct ice_sq_cd *cd) 5840 { 5841 struct ice_aqc_lldp_set_local_mib *cmd; 5842 struct ice_aq_desc desc; 5843 5844 cmd = &desc.params.lldp_set_mib; 5845 5846 if (buf_size == 0 || !buf) 5847 return -EINVAL; 5848 5849 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5850 5851 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5852 desc.datalen = cpu_to_le16(buf_size); 5853 5854 cmd->type = mib_type; 5855 cmd->length = cpu_to_le16(buf_size); 5856 5857 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5858 } 5859 5860 /** 5861 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5862 * @hw: pointer to HW struct 5863 */ 5864 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5865 { 5866 if (hw->mac_type != ICE_MAC_E810) 5867 return false; 5868 5869 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5870 ICE_FW_API_LLDP_FLTR_MIN, 5871 ICE_FW_API_LLDP_FLTR_PATCH); 5872 } 5873 5874 /** 5875 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5876 * @hw: pointer to HW struct 5877 * @vsi_num: absolute HW index for VSI 5878 * @add: boolean for if adding or removing a filter 5879 */ 5880 int 5881 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5882 { 5883 struct ice_aqc_lldp_filter_ctrl *cmd; 5884 struct ice_aq_desc desc; 5885 5886 cmd = &desc.params.lldp_filter_ctrl; 5887 5888 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5889 5890 if (add) 5891 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5892 else 5893 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5894 5895 cmd->vsi_num = cpu_to_le16(vsi_num); 5896 5897 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5898 } 5899 5900 /** 5901 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5902 * @hw: pointer to HW struct 5903 */ 5904 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5905 { 5906 struct ice_aq_desc desc; 5907 5908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5909 5910 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5911 } 5912 5913 /** 5914 * ice_fw_supports_report_dflt_cfg 5915 * @hw: pointer to the hardware structure 5916 * 5917 * Checks if the firmware supports report default configuration 5918 */ 5919 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5920 { 5921 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5922 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5923 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5924 } 5925 5926 /* each of the indexes into the following array match the speed of a return 5927 * value from the list of AQ returned speeds like the range: 5928 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5929 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5930 * array. The array is defined as 15 elements long because the link_speed 5931 * returned by the firmware is a 16 bit * value, but is indexed 5932 * by [fls(speed) - 1] 5933 */ 5934 static const u32 ice_aq_to_link_speed[] = { 5935 SPEED_10, /* BIT(0) */ 5936 SPEED_100, 5937 SPEED_1000, 5938 SPEED_2500, 5939 SPEED_5000, 5940 SPEED_10000, 5941 SPEED_20000, 5942 SPEED_25000, 5943 SPEED_40000, 5944 SPEED_50000, 5945 SPEED_100000, /* BIT(10) */ 5946 SPEED_200000, 5947 }; 5948 5949 /** 5950 * ice_get_link_speed - get integer speed from table 5951 * @index: array index from fls(aq speed) - 1 5952 * 5953 * Returns: u32 value containing integer speed 5954 */ 5955 u32 ice_get_link_speed(u16 index) 5956 { 5957 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5958 return 0; 5959 5960 return ice_aq_to_link_speed[index]; 5961 } 5962