1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830_BACKPLANE: 164 case ICE_DEV_ID_E830_QSFP56: 165 case ICE_DEV_ID_E830_SFP: 166 case ICE_DEV_ID_E830_SFP_DD: 167 hw->mac_type = ICE_MAC_E830; 168 break; 169 default: 170 hw->mac_type = ICE_MAC_UNKNOWN; 171 break; 172 } 173 174 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 175 return 0; 176 } 177 178 /** 179 * ice_is_generic_mac - check if device's mac_type is generic 180 * @hw: pointer to the hardware structure 181 * 182 * Return: true if mac_type is generic (with SBQ support), false if not 183 */ 184 bool ice_is_generic_mac(struct ice_hw *hw) 185 { 186 return (hw->mac_type == ICE_MAC_GENERIC || 187 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 188 } 189 190 /** 191 * ice_is_e810 192 * @hw: pointer to the hardware structure 193 * 194 * returns true if the device is E810 based, false if not. 195 */ 196 bool ice_is_e810(struct ice_hw *hw) 197 { 198 return hw->mac_type == ICE_MAC_E810; 199 } 200 201 /** 202 * ice_is_e810t 203 * @hw: pointer to the hardware structure 204 * 205 * returns true if the device is E810T based, false if not. 206 */ 207 bool ice_is_e810t(struct ice_hw *hw) 208 { 209 switch (hw->device_id) { 210 case ICE_DEV_ID_E810C_SFP: 211 switch (hw->subsystem_device_id) { 212 case ICE_SUBDEV_ID_E810T: 213 case ICE_SUBDEV_ID_E810T2: 214 case ICE_SUBDEV_ID_E810T3: 215 case ICE_SUBDEV_ID_E810T4: 216 case ICE_SUBDEV_ID_E810T6: 217 case ICE_SUBDEV_ID_E810T7: 218 return true; 219 } 220 break; 221 case ICE_DEV_ID_E810C_QSFP: 222 switch (hw->subsystem_device_id) { 223 case ICE_SUBDEV_ID_E810T2: 224 case ICE_SUBDEV_ID_E810T3: 225 case ICE_SUBDEV_ID_E810T5: 226 return true; 227 } 228 break; 229 default: 230 break; 231 } 232 233 return false; 234 } 235 236 /** 237 * ice_is_e823 238 * @hw: pointer to the hardware structure 239 * 240 * returns true if the device is E823-L or E823-C based, false if not. 241 */ 242 bool ice_is_e823(struct ice_hw *hw) 243 { 244 switch (hw->device_id) { 245 case ICE_DEV_ID_E823L_BACKPLANE: 246 case ICE_DEV_ID_E823L_SFP: 247 case ICE_DEV_ID_E823L_10G_BASE_T: 248 case ICE_DEV_ID_E823L_1GBE: 249 case ICE_DEV_ID_E823L_QSFP: 250 case ICE_DEV_ID_E823C_BACKPLANE: 251 case ICE_DEV_ID_E823C_QSFP: 252 case ICE_DEV_ID_E823C_SFP: 253 case ICE_DEV_ID_E823C_10G_BASE_T: 254 case ICE_DEV_ID_E823C_SGMII: 255 return true; 256 default: 257 return false; 258 } 259 } 260 261 /** 262 * ice_is_e825c - Check if a device is E825C family device 263 * @hw: pointer to the hardware structure 264 * 265 * Return: true if the device is E825-C based, false if not. 266 */ 267 bool ice_is_e825c(struct ice_hw *hw) 268 { 269 switch (hw->device_id) { 270 case ICE_DEV_ID_E825C_BACKPLANE: 271 case ICE_DEV_ID_E825C_QSFP: 272 case ICE_DEV_ID_E825C_SFP: 273 case ICE_DEV_ID_E825C_SGMII: 274 return true; 275 default: 276 return false; 277 } 278 } 279 280 /** 281 * ice_clear_pf_cfg - Clear PF configuration 282 * @hw: pointer to the hardware structure 283 * 284 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 285 * configuration, flow director filters, etc.). 286 */ 287 int ice_clear_pf_cfg(struct ice_hw *hw) 288 { 289 struct ice_aq_desc desc; 290 291 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 292 293 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 294 } 295 296 /** 297 * ice_aq_manage_mac_read - manage MAC address read command 298 * @hw: pointer to the HW struct 299 * @buf: a virtual buffer to hold the manage MAC read response 300 * @buf_size: Size of the virtual buffer 301 * @cd: pointer to command details structure or NULL 302 * 303 * This function is used to return per PF station MAC address (0x0107). 304 * NOTE: Upon successful completion of this command, MAC address information 305 * is returned in user specified buffer. Please interpret user specified 306 * buffer as "manage_mac_read" response. 307 * Response such as various MAC addresses are stored in HW struct (port.mac) 308 * ice_discover_dev_caps is expected to be called before this function is 309 * called. 310 */ 311 static int 312 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 313 struct ice_sq_cd *cd) 314 { 315 struct ice_aqc_manage_mac_read_resp *resp; 316 struct ice_aqc_manage_mac_read *cmd; 317 struct ice_aq_desc desc; 318 int status; 319 u16 flags; 320 u8 i; 321 322 cmd = &desc.params.mac_read; 323 324 if (buf_size < sizeof(*resp)) 325 return -EINVAL; 326 327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 328 329 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 330 if (status) 331 return status; 332 333 resp = buf; 334 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 335 336 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 337 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 338 return -EIO; 339 } 340 341 /* A single port can report up to two (LAN and WoL) addresses */ 342 for (i = 0; i < cmd->num_addr; i++) 343 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 344 ether_addr_copy(hw->port_info->mac.lan_addr, 345 resp[i].mac_addr); 346 ether_addr_copy(hw->port_info->mac.perm_addr, 347 resp[i].mac_addr); 348 break; 349 } 350 351 return 0; 352 } 353 354 /** 355 * ice_aq_get_phy_caps - returns PHY capabilities 356 * @pi: port information structure 357 * @qual_mods: report qualified modules 358 * @report_mode: report mode capabilities 359 * @pcaps: structure for PHY capabilities to be filled 360 * @cd: pointer to command details structure or NULL 361 * 362 * Returns the various PHY capabilities supported on the Port (0x0600) 363 */ 364 int 365 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 366 struct ice_aqc_get_phy_caps_data *pcaps, 367 struct ice_sq_cd *cd) 368 { 369 struct ice_aqc_get_phy_caps *cmd; 370 u16 pcaps_size = sizeof(*pcaps); 371 struct ice_aq_desc desc; 372 const char *prefix; 373 struct ice_hw *hw; 374 int status; 375 376 cmd = &desc.params.get_phy; 377 378 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 379 return -EINVAL; 380 hw = pi->hw; 381 382 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 383 !ice_fw_supports_report_dflt_cfg(hw)) 384 return -EINVAL; 385 386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 387 388 if (qual_mods) 389 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 390 391 cmd->param0 |= cpu_to_le16(report_mode); 392 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 393 394 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 395 396 switch (report_mode) { 397 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 398 prefix = "phy_caps_media"; 399 break; 400 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 401 prefix = "phy_caps_no_media"; 402 break; 403 case ICE_AQC_REPORT_ACTIVE_CFG: 404 prefix = "phy_caps_active"; 405 break; 406 case ICE_AQC_REPORT_DFLT_CFG: 407 prefix = "phy_caps_default"; 408 break; 409 default: 410 prefix = "phy_caps_invalid"; 411 } 412 413 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 414 le64_to_cpu(pcaps->phy_type_high), prefix); 415 416 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 417 prefix, report_mode); 418 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 419 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 420 pcaps->low_power_ctrl_an); 421 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 422 pcaps->eee_cap); 423 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 424 pcaps->eeer_value); 425 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 426 pcaps->link_fec_options); 427 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 428 prefix, pcaps->module_compliance_enforcement); 429 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 430 prefix, pcaps->extended_compliance_code); 431 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 432 pcaps->module_type[0]); 433 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 434 pcaps->module_type[1]); 435 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 436 pcaps->module_type[2]); 437 438 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 439 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 440 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 441 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 442 sizeof(pi->phy.link_info.module_type)); 443 } 444 445 return status; 446 } 447 448 /** 449 * ice_aq_get_link_topo_handle - get link topology node return status 450 * @pi: port information structure 451 * @node_type: requested node type 452 * @cd: pointer to command details structure or NULL 453 * 454 * Get link topology node return status for specified node type (0x06E0) 455 * 456 * Node type cage can be used to determine if cage is present. If AQC 457 * returns error (ENOENT), then no cage present. If no cage present, then 458 * connection type is backplane or BASE-T. 459 */ 460 static int 461 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 462 struct ice_sq_cd *cd) 463 { 464 struct ice_aqc_get_link_topo *cmd; 465 struct ice_aq_desc desc; 466 467 cmd = &desc.params.get_link_topo; 468 469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 470 471 cmd->addr.topo_params.node_type_ctx = 472 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 473 ICE_AQC_LINK_TOPO_NODE_CTX_S); 474 475 /* set node type */ 476 cmd->addr.topo_params.node_type_ctx |= 477 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 478 479 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 480 } 481 482 /** 483 * ice_aq_get_netlist_node 484 * @hw: pointer to the hw struct 485 * @cmd: get_link_topo AQ structure 486 * @node_part_number: output node part number if node found 487 * @node_handle: output node handle parameter if node found 488 * 489 * Get netlist node handle. 490 */ 491 int 492 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 493 u8 *node_part_number, u16 *node_handle) 494 { 495 struct ice_aq_desc desc; 496 497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 498 desc.params.get_link_topo = *cmd; 499 500 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 501 return -EINTR; 502 503 if (node_handle) 504 *node_handle = 505 le16_to_cpu(desc.params.get_link_topo.addr.handle); 506 if (node_part_number) 507 *node_part_number = desc.params.get_link_topo.node_part_num; 508 509 return 0; 510 } 511 512 /** 513 * ice_find_netlist_node 514 * @hw: pointer to the hw struct 515 * @node_type_ctx: type of netlist node to look for 516 * @node_part_number: node part number to look for 517 * @node_handle: output parameter if node found - optional 518 * 519 * Scan the netlist for a node handle of the given node type and part number. 520 * 521 * If node_handle is non-NULL it will be modified on function exit. It is only 522 * valid if the function returns zero, and should be ignored on any non-zero 523 * return value. 524 * 525 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 526 * a negative error code on failure to access the AQ. 527 */ 528 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 529 u8 node_part_number, u16 *node_handle) 530 { 531 u8 idx; 532 533 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 534 struct ice_aqc_get_link_topo cmd = {}; 535 u8 rec_node_part_number; 536 int status; 537 538 cmd.addr.topo_params.node_type_ctx = 539 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 540 node_type_ctx); 541 cmd.addr.topo_params.index = idx; 542 543 status = ice_aq_get_netlist_node(hw, &cmd, 544 &rec_node_part_number, 545 node_handle); 546 if (status) 547 return status; 548 549 if (rec_node_part_number == node_part_number) 550 return 0; 551 } 552 553 return -ENOENT; 554 } 555 556 /** 557 * ice_is_media_cage_present 558 * @pi: port information structure 559 * 560 * Returns true if media cage is present, else false. If no cage, then 561 * media type is backplane or BASE-T. 562 */ 563 static bool ice_is_media_cage_present(struct ice_port_info *pi) 564 { 565 /* Node type cage can be used to determine if cage is present. If AQC 566 * returns error (ENOENT), then no cage present. If no cage present then 567 * connection type is backplane or BASE-T. 568 */ 569 return !ice_aq_get_link_topo_handle(pi, 570 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 571 NULL); 572 } 573 574 /** 575 * ice_get_media_type - Gets media type 576 * @pi: port information structure 577 */ 578 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 579 { 580 struct ice_link_status *hw_link_info; 581 582 if (!pi) 583 return ICE_MEDIA_UNKNOWN; 584 585 hw_link_info = &pi->phy.link_info; 586 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 587 /* If more than one media type is selected, report unknown */ 588 return ICE_MEDIA_UNKNOWN; 589 590 if (hw_link_info->phy_type_low) { 591 /* 1G SGMII is a special case where some DA cable PHYs 592 * may show this as an option when it really shouldn't 593 * be since SGMII is meant to be between a MAC and a PHY 594 * in a backplane. Try to detect this case and handle it 595 */ 596 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 597 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 598 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 599 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 600 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 601 return ICE_MEDIA_DA; 602 603 switch (hw_link_info->phy_type_low) { 604 case ICE_PHY_TYPE_LOW_1000BASE_SX: 605 case ICE_PHY_TYPE_LOW_1000BASE_LX: 606 case ICE_PHY_TYPE_LOW_10GBASE_SR: 607 case ICE_PHY_TYPE_LOW_10GBASE_LR: 608 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 609 case ICE_PHY_TYPE_LOW_25GBASE_SR: 610 case ICE_PHY_TYPE_LOW_25GBASE_LR: 611 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 612 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 613 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 614 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 615 case ICE_PHY_TYPE_LOW_50GBASE_SR: 616 case ICE_PHY_TYPE_LOW_50GBASE_FR: 617 case ICE_PHY_TYPE_LOW_50GBASE_LR: 618 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 619 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 620 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 621 case ICE_PHY_TYPE_LOW_100GBASE_DR: 622 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 623 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 624 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 625 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 626 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 627 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 628 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 629 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 630 return ICE_MEDIA_FIBER; 631 case ICE_PHY_TYPE_LOW_100BASE_TX: 632 case ICE_PHY_TYPE_LOW_1000BASE_T: 633 case ICE_PHY_TYPE_LOW_2500BASE_T: 634 case ICE_PHY_TYPE_LOW_5GBASE_T: 635 case ICE_PHY_TYPE_LOW_10GBASE_T: 636 case ICE_PHY_TYPE_LOW_25GBASE_T: 637 return ICE_MEDIA_BASET; 638 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 639 case ICE_PHY_TYPE_LOW_25GBASE_CR: 640 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 641 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 642 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 644 case ICE_PHY_TYPE_LOW_50GBASE_CP: 645 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 646 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 647 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 648 return ICE_MEDIA_DA; 649 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 650 case ICE_PHY_TYPE_LOW_40G_XLAUI: 651 case ICE_PHY_TYPE_LOW_50G_LAUI2: 652 case ICE_PHY_TYPE_LOW_50G_AUI2: 653 case ICE_PHY_TYPE_LOW_50G_AUI1: 654 case ICE_PHY_TYPE_LOW_100G_AUI4: 655 case ICE_PHY_TYPE_LOW_100G_CAUI4: 656 if (ice_is_media_cage_present(pi)) 657 return ICE_MEDIA_DA; 658 fallthrough; 659 case ICE_PHY_TYPE_LOW_1000BASE_KX: 660 case ICE_PHY_TYPE_LOW_2500BASE_KX: 661 case ICE_PHY_TYPE_LOW_2500BASE_X: 662 case ICE_PHY_TYPE_LOW_5GBASE_KR: 663 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 664 case ICE_PHY_TYPE_LOW_25GBASE_KR: 665 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 666 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 667 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 668 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 669 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 670 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 671 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 672 return ICE_MEDIA_BACKPLANE; 673 } 674 } else { 675 switch (hw_link_info->phy_type_high) { 676 case ICE_PHY_TYPE_HIGH_100G_AUI2: 677 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 678 if (ice_is_media_cage_present(pi)) 679 return ICE_MEDIA_DA; 680 fallthrough; 681 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 682 return ICE_MEDIA_BACKPLANE; 683 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 684 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 685 return ICE_MEDIA_FIBER; 686 } 687 } 688 return ICE_MEDIA_UNKNOWN; 689 } 690 691 /** 692 * ice_get_link_status_datalen 693 * @hw: pointer to the HW struct 694 * 695 * Returns datalength for the Get Link Status AQ command, which is bigger for 696 * newer adapter families handled by ice driver. 697 */ 698 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 699 { 700 switch (hw->mac_type) { 701 case ICE_MAC_E830: 702 return ICE_AQC_LS_DATA_SIZE_V2; 703 case ICE_MAC_E810: 704 default: 705 return ICE_AQC_LS_DATA_SIZE_V1; 706 } 707 } 708 709 /** 710 * ice_aq_get_link_info 711 * @pi: port information structure 712 * @ena_lse: enable/disable LinkStatusEvent reporting 713 * @link: pointer to link status structure - optional 714 * @cd: pointer to command details structure or NULL 715 * 716 * Get Link Status (0x607). Returns the link status of the adapter. 717 */ 718 int 719 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 720 struct ice_link_status *link, struct ice_sq_cd *cd) 721 { 722 struct ice_aqc_get_link_status_data link_data = { 0 }; 723 struct ice_aqc_get_link_status *resp; 724 struct ice_link_status *li_old, *li; 725 enum ice_media_type *hw_media_type; 726 struct ice_fc_info *hw_fc_info; 727 bool tx_pause, rx_pause; 728 struct ice_aq_desc desc; 729 struct ice_hw *hw; 730 u16 cmd_flags; 731 int status; 732 733 if (!pi) 734 return -EINVAL; 735 hw = pi->hw; 736 li_old = &pi->phy.link_info_old; 737 hw_media_type = &pi->phy.media_type; 738 li = &pi->phy.link_info; 739 hw_fc_info = &pi->fc; 740 741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 742 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 743 resp = &desc.params.get_link_status; 744 resp->cmd_flags = cpu_to_le16(cmd_flags); 745 resp->lport_num = pi->lport; 746 747 status = ice_aq_send_cmd(hw, &desc, &link_data, 748 ice_get_link_status_datalen(hw), cd); 749 if (status) 750 return status; 751 752 /* save off old link status information */ 753 *li_old = *li; 754 755 /* update current link status information */ 756 li->link_speed = le16_to_cpu(link_data.link_speed); 757 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 758 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 759 *hw_media_type = ice_get_media_type(pi); 760 li->link_info = link_data.link_info; 761 li->link_cfg_err = link_data.link_cfg_err; 762 li->an_info = link_data.an_info; 763 li->ext_info = link_data.ext_info; 764 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 765 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 766 li->topo_media_conflict = link_data.topo_media_conflict; 767 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 768 ICE_AQ_CFG_PACING_TYPE_M); 769 770 /* update fc info */ 771 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 772 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 773 if (tx_pause && rx_pause) 774 hw_fc_info->current_mode = ICE_FC_FULL; 775 else if (tx_pause) 776 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 777 else if (rx_pause) 778 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 779 else 780 hw_fc_info->current_mode = ICE_FC_NONE; 781 782 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 783 784 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 785 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 786 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 787 (unsigned long long)li->phy_type_low); 788 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 789 (unsigned long long)li->phy_type_high); 790 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 791 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 792 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 793 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 794 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 795 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 796 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 797 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 798 li->max_frame_size); 799 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 800 801 /* save link status information */ 802 if (link) 803 *link = *li; 804 805 /* flag cleared so calling functions don't call AQ again */ 806 pi->phy.get_link_info = false; 807 808 return 0; 809 } 810 811 /** 812 * ice_fill_tx_timer_and_fc_thresh 813 * @hw: pointer to the HW struct 814 * @cmd: pointer to MAC cfg structure 815 * 816 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 817 * descriptor 818 */ 819 static void 820 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 821 struct ice_aqc_set_mac_cfg *cmd) 822 { 823 u32 val, fc_thres_m; 824 825 /* We read back the transmit timer and FC threshold value of 826 * LFC. Thus, we will use index = 827 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 828 * 829 * Also, because we are operating on transmit timer and FC 830 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 831 */ 832 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 833 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 834 835 if (hw->mac_type == ICE_MAC_E830) { 836 /* Retrieve the transmit timer */ 837 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 838 cmd->tx_tmr_value = 839 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 840 841 /* Retrieve the fc threshold */ 842 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 843 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 844 } else { 845 /* Retrieve the transmit timer */ 846 val = rd32(hw, 847 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 848 cmd->tx_tmr_value = 849 le16_encode_bits(val, 850 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 851 852 /* Retrieve the fc threshold */ 853 val = rd32(hw, 854 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 855 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 856 } 857 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 858 } 859 860 /** 861 * ice_aq_set_mac_cfg 862 * @hw: pointer to the HW struct 863 * @max_frame_size: Maximum Frame Size to be supported 864 * @cd: pointer to command details structure or NULL 865 * 866 * Set MAC configuration (0x0603) 867 */ 868 int 869 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 870 { 871 struct ice_aqc_set_mac_cfg *cmd; 872 struct ice_aq_desc desc; 873 874 cmd = &desc.params.set_mac_cfg; 875 876 if (max_frame_size == 0) 877 return -EINVAL; 878 879 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 880 881 cmd->max_frame_size = cpu_to_le16(max_frame_size); 882 883 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 884 885 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 886 } 887 888 /** 889 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 890 * @hw: pointer to the HW struct 891 */ 892 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 893 { 894 struct ice_switch_info *sw; 895 int status; 896 897 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 898 sizeof(*hw->switch_info), GFP_KERNEL); 899 sw = hw->switch_info; 900 901 if (!sw) 902 return -ENOMEM; 903 904 INIT_LIST_HEAD(&sw->vsi_list_map_head); 905 sw->prof_res_bm_init = 0; 906 907 status = ice_init_def_sw_recp(hw); 908 if (status) { 909 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 910 return status; 911 } 912 return 0; 913 } 914 915 /** 916 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 917 * @hw: pointer to the HW struct 918 */ 919 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 920 { 921 struct ice_switch_info *sw = hw->switch_info; 922 struct ice_vsi_list_map_info *v_pos_map; 923 struct ice_vsi_list_map_info *v_tmp_map; 924 struct ice_sw_recipe *recps; 925 u8 i; 926 927 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 928 list_entry) { 929 list_del(&v_pos_map->list_entry); 930 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 931 } 932 recps = sw->recp_list; 933 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 934 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 935 936 recps[i].root_rid = i; 937 list_for_each_entry_safe(rg_entry, tmprg_entry, 938 &recps[i].rg_list, l_entry) { 939 list_del(&rg_entry->l_entry); 940 devm_kfree(ice_hw_to_dev(hw), rg_entry); 941 } 942 943 if (recps[i].adv_rule) { 944 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 945 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 946 947 mutex_destroy(&recps[i].filt_rule_lock); 948 list_for_each_entry_safe(lst_itr, tmp_entry, 949 &recps[i].filt_rules, 950 list_entry) { 951 list_del(&lst_itr->list_entry); 952 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 953 devm_kfree(ice_hw_to_dev(hw), lst_itr); 954 } 955 } else { 956 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 957 958 mutex_destroy(&recps[i].filt_rule_lock); 959 list_for_each_entry_safe(lst_itr, tmp_entry, 960 &recps[i].filt_rules, 961 list_entry) { 962 list_del(&lst_itr->list_entry); 963 devm_kfree(ice_hw_to_dev(hw), lst_itr); 964 } 965 } 966 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 967 } 968 ice_rm_all_sw_replay_rule_info(hw); 969 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 970 devm_kfree(ice_hw_to_dev(hw), sw); 971 } 972 973 /** 974 * ice_get_itr_intrl_gran 975 * @hw: pointer to the HW struct 976 * 977 * Determines the ITR/INTRL granularities based on the maximum aggregate 978 * bandwidth according to the device's configuration during power-on. 979 */ 980 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 981 { 982 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 983 rd32(hw, GL_PWR_MODE_CTL)); 984 985 switch (max_agg_bw) { 986 case ICE_MAX_AGG_BW_200G: 987 case ICE_MAX_AGG_BW_100G: 988 case ICE_MAX_AGG_BW_50G: 989 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 990 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 991 break; 992 case ICE_MAX_AGG_BW_25G: 993 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 994 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 995 break; 996 } 997 } 998 999 /** 1000 * ice_init_hw - main hardware initialization routine 1001 * @hw: pointer to the hardware structure 1002 */ 1003 int ice_init_hw(struct ice_hw *hw) 1004 { 1005 struct ice_aqc_get_phy_caps_data *pcaps; 1006 u16 mac_buf_len; 1007 void *mac_buf; 1008 int status; 1009 1010 /* Set MAC type based on DeviceID */ 1011 status = ice_set_mac_type(hw); 1012 if (status) 1013 return status; 1014 1015 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1016 1017 status = ice_reset(hw, ICE_RESET_PFR); 1018 if (status) 1019 return status; 1020 1021 ice_get_itr_intrl_gran(hw); 1022 1023 status = ice_create_all_ctrlq(hw); 1024 if (status) 1025 goto err_unroll_cqinit; 1026 1027 status = ice_fwlog_init(hw); 1028 if (status) 1029 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1030 status); 1031 1032 status = ice_clear_pf_cfg(hw); 1033 if (status) 1034 goto err_unroll_cqinit; 1035 1036 /* Set bit to enable Flow Director filters */ 1037 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1038 INIT_LIST_HEAD(&hw->fdir_list_head); 1039 1040 ice_clear_pxe_mode(hw); 1041 1042 status = ice_init_nvm(hw); 1043 if (status) 1044 goto err_unroll_cqinit; 1045 1046 status = ice_get_caps(hw); 1047 if (status) 1048 goto err_unroll_cqinit; 1049 1050 if (!hw->port_info) 1051 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1052 sizeof(*hw->port_info), 1053 GFP_KERNEL); 1054 if (!hw->port_info) { 1055 status = -ENOMEM; 1056 goto err_unroll_cqinit; 1057 } 1058 1059 /* set the back pointer to HW */ 1060 hw->port_info->hw = hw; 1061 1062 /* Initialize port_info struct with switch configuration data */ 1063 status = ice_get_initial_sw_cfg(hw); 1064 if (status) 1065 goto err_unroll_alloc; 1066 1067 hw->evb_veb = true; 1068 1069 /* init xarray for identifying scheduling nodes uniquely */ 1070 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1071 1072 /* Query the allocated resources for Tx scheduler */ 1073 status = ice_sched_query_res_alloc(hw); 1074 if (status) { 1075 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1076 goto err_unroll_alloc; 1077 } 1078 ice_sched_get_psm_clk_freq(hw); 1079 1080 /* Initialize port_info struct with scheduler data */ 1081 status = ice_sched_init_port(hw->port_info); 1082 if (status) 1083 goto err_unroll_sched; 1084 1085 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1086 if (!pcaps) { 1087 status = -ENOMEM; 1088 goto err_unroll_sched; 1089 } 1090 1091 /* Initialize port_info struct with PHY capabilities */ 1092 status = ice_aq_get_phy_caps(hw->port_info, false, 1093 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1094 NULL); 1095 devm_kfree(ice_hw_to_dev(hw), pcaps); 1096 if (status) 1097 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1098 status); 1099 1100 /* Initialize port_info struct with link information */ 1101 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1102 if (status) 1103 goto err_unroll_sched; 1104 1105 /* need a valid SW entry point to build a Tx tree */ 1106 if (!hw->sw_entry_point_layer) { 1107 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1108 status = -EIO; 1109 goto err_unroll_sched; 1110 } 1111 INIT_LIST_HEAD(&hw->agg_list); 1112 /* Initialize max burst size */ 1113 if (!hw->max_burst_size) 1114 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1115 1116 status = ice_init_fltr_mgmt_struct(hw); 1117 if (status) 1118 goto err_unroll_sched; 1119 1120 /* Get MAC information */ 1121 /* A single port can report up to two (LAN and WoL) addresses */ 1122 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1123 sizeof(struct ice_aqc_manage_mac_read_resp), 1124 GFP_KERNEL); 1125 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1126 1127 if (!mac_buf) { 1128 status = -ENOMEM; 1129 goto err_unroll_fltr_mgmt_struct; 1130 } 1131 1132 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1133 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1134 1135 if (status) 1136 goto err_unroll_fltr_mgmt_struct; 1137 /* enable jumbo frame support at MAC level */ 1138 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1139 if (status) 1140 goto err_unroll_fltr_mgmt_struct; 1141 /* Obtain counter base index which would be used by flow director */ 1142 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1143 if (status) 1144 goto err_unroll_fltr_mgmt_struct; 1145 status = ice_init_hw_tbls(hw); 1146 if (status) 1147 goto err_unroll_fltr_mgmt_struct; 1148 mutex_init(&hw->tnl_lock); 1149 return 0; 1150 1151 err_unroll_fltr_mgmt_struct: 1152 ice_cleanup_fltr_mgmt_struct(hw); 1153 err_unroll_sched: 1154 ice_sched_cleanup_all(hw); 1155 err_unroll_alloc: 1156 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1157 err_unroll_cqinit: 1158 ice_destroy_all_ctrlq(hw); 1159 return status; 1160 } 1161 1162 /** 1163 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1164 * @hw: pointer to the hardware structure 1165 * 1166 * This should be called only during nominal operation, not as a result of 1167 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1168 * applicable initializations if it fails for any reason. 1169 */ 1170 void ice_deinit_hw(struct ice_hw *hw) 1171 { 1172 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1173 ice_cleanup_fltr_mgmt_struct(hw); 1174 1175 ice_sched_cleanup_all(hw); 1176 ice_sched_clear_agg(hw); 1177 ice_free_seg(hw); 1178 ice_free_hw_tbls(hw); 1179 mutex_destroy(&hw->tnl_lock); 1180 1181 ice_fwlog_deinit(hw); 1182 ice_destroy_all_ctrlq(hw); 1183 1184 /* Clear VSI contexts if not already cleared */ 1185 ice_clear_all_vsi_ctx(hw); 1186 } 1187 1188 /** 1189 * ice_check_reset - Check to see if a global reset is complete 1190 * @hw: pointer to the hardware structure 1191 */ 1192 int ice_check_reset(struct ice_hw *hw) 1193 { 1194 u32 cnt, reg = 0, grst_timeout, uld_mask; 1195 1196 /* Poll for Device Active state in case a recent CORER, GLOBR, 1197 * or EMPR has occurred. The grst delay value is in 100ms units. 1198 * Add 1sec for outstanding AQ commands that can take a long time. 1199 */ 1200 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1201 rd32(hw, GLGEN_RSTCTL)) + 10; 1202 1203 for (cnt = 0; cnt < grst_timeout; cnt++) { 1204 mdelay(100); 1205 reg = rd32(hw, GLGEN_RSTAT); 1206 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1207 break; 1208 } 1209 1210 if (cnt == grst_timeout) { 1211 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1212 return -EIO; 1213 } 1214 1215 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1216 GLNVM_ULD_PCIER_DONE_1_M |\ 1217 GLNVM_ULD_CORER_DONE_M |\ 1218 GLNVM_ULD_GLOBR_DONE_M |\ 1219 GLNVM_ULD_POR_DONE_M |\ 1220 GLNVM_ULD_POR_DONE_1_M |\ 1221 GLNVM_ULD_PCIER_DONE_2_M) 1222 1223 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1224 GLNVM_ULD_PE_DONE_M : 0); 1225 1226 /* Device is Active; check Global Reset processes are done */ 1227 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1228 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1229 if (reg == uld_mask) { 1230 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1231 break; 1232 } 1233 mdelay(10); 1234 } 1235 1236 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1237 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1238 reg); 1239 return -EIO; 1240 } 1241 1242 return 0; 1243 } 1244 1245 /** 1246 * ice_pf_reset - Reset the PF 1247 * @hw: pointer to the hardware structure 1248 * 1249 * If a global reset has been triggered, this function checks 1250 * for its completion and then issues the PF reset 1251 */ 1252 static int ice_pf_reset(struct ice_hw *hw) 1253 { 1254 u32 cnt, reg; 1255 1256 /* If at function entry a global reset was already in progress, i.e. 1257 * state is not 'device active' or any of the reset done bits are not 1258 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1259 * global reset is done. 1260 */ 1261 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1262 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1263 /* poll on global reset currently in progress until done */ 1264 if (ice_check_reset(hw)) 1265 return -EIO; 1266 1267 return 0; 1268 } 1269 1270 /* Reset the PF */ 1271 reg = rd32(hw, PFGEN_CTRL); 1272 1273 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1274 1275 /* Wait for the PFR to complete. The wait time is the global config lock 1276 * timeout plus the PFR timeout which will account for a possible reset 1277 * that is occurring during a download package operation. 1278 */ 1279 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1280 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1281 reg = rd32(hw, PFGEN_CTRL); 1282 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1283 break; 1284 1285 mdelay(1); 1286 } 1287 1288 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1289 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1290 return -EIO; 1291 } 1292 1293 return 0; 1294 } 1295 1296 /** 1297 * ice_reset - Perform different types of reset 1298 * @hw: pointer to the hardware structure 1299 * @req: reset request 1300 * 1301 * This function triggers a reset as specified by the req parameter. 1302 * 1303 * Note: 1304 * If anything other than a PF reset is triggered, PXE mode is restored. 1305 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1306 * interface has been restored in the rebuild flow. 1307 */ 1308 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1309 { 1310 u32 val = 0; 1311 1312 switch (req) { 1313 case ICE_RESET_PFR: 1314 return ice_pf_reset(hw); 1315 case ICE_RESET_CORER: 1316 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1317 val = GLGEN_RTRIG_CORER_M; 1318 break; 1319 case ICE_RESET_GLOBR: 1320 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1321 val = GLGEN_RTRIG_GLOBR_M; 1322 break; 1323 default: 1324 return -EINVAL; 1325 } 1326 1327 val |= rd32(hw, GLGEN_RTRIG); 1328 wr32(hw, GLGEN_RTRIG, val); 1329 ice_flush(hw); 1330 1331 /* wait for the FW to be ready */ 1332 return ice_check_reset(hw); 1333 } 1334 1335 /** 1336 * ice_copy_rxq_ctx_to_hw 1337 * @hw: pointer to the hardware structure 1338 * @ice_rxq_ctx: pointer to the rxq context 1339 * @rxq_index: the index of the Rx queue 1340 * 1341 * Copies rxq context from dense structure to HW register space 1342 */ 1343 static int 1344 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1345 { 1346 u8 i; 1347 1348 if (!ice_rxq_ctx) 1349 return -EINVAL; 1350 1351 if (rxq_index > QRX_CTRL_MAX_INDEX) 1352 return -EINVAL; 1353 1354 /* Copy each dword separately to HW */ 1355 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1356 wr32(hw, QRX_CONTEXT(i, rxq_index), 1357 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1358 1359 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1360 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1361 } 1362 1363 return 0; 1364 } 1365 1366 /* LAN Rx Queue Context */ 1367 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1368 /* Field Width LSB */ 1369 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1370 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1371 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1372 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1373 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1374 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1375 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1376 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1377 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1378 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1379 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1380 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1381 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1382 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1383 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1384 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1385 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1386 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1387 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1388 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1389 { 0 } 1390 }; 1391 1392 /** 1393 * ice_write_rxq_ctx 1394 * @hw: pointer to the hardware structure 1395 * @rlan_ctx: pointer to the rxq context 1396 * @rxq_index: the index of the Rx queue 1397 * 1398 * Converts rxq context from sparse to dense structure and then writes 1399 * it to HW register space and enables the hardware to prefetch descriptors 1400 * instead of only fetching them on demand 1401 */ 1402 int 1403 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1404 u32 rxq_index) 1405 { 1406 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1407 1408 if (!rlan_ctx) 1409 return -EINVAL; 1410 1411 rlan_ctx->prefena = 1; 1412 1413 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1414 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1415 } 1416 1417 /* LAN Tx Queue Context */ 1418 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1419 /* Field Width LSB */ 1420 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1421 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1422 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1423 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1424 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1425 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1426 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1427 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1428 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1429 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1430 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1431 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1432 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1433 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1434 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1435 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1436 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1437 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1438 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1439 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1440 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1441 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1442 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1443 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1444 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1445 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1446 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1447 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1448 { 0 } 1449 }; 1450 1451 /* Sideband Queue command wrappers */ 1452 1453 /** 1454 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1455 * @hw: pointer to the HW struct 1456 * @desc: descriptor describing the command 1457 * @buf: buffer to use for indirect commands (NULL for direct commands) 1458 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1459 * @cd: pointer to command details structure 1460 */ 1461 static int 1462 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1463 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1464 { 1465 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1466 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1467 } 1468 1469 /** 1470 * ice_sbq_rw_reg - Fill Sideband Queue command 1471 * @hw: pointer to the HW struct 1472 * @in: message info to be filled in descriptor 1473 */ 1474 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1475 { 1476 struct ice_sbq_cmd_desc desc = {0}; 1477 struct ice_sbq_msg_req msg = {0}; 1478 u16 msg_len; 1479 int status; 1480 1481 msg_len = sizeof(msg); 1482 1483 msg.dest_dev = in->dest_dev; 1484 msg.opcode = in->opcode; 1485 msg.flags = ICE_SBQ_MSG_FLAGS; 1486 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1487 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1488 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1489 1490 if (in->opcode) 1491 msg.data = cpu_to_le32(in->data); 1492 else 1493 /* data read comes back in completion, so shorten the struct by 1494 * sizeof(msg.data) 1495 */ 1496 msg_len -= sizeof(msg.data); 1497 1498 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1499 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1500 desc.param0.cmd_len = cpu_to_le16(msg_len); 1501 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1502 if (!status && !in->opcode) 1503 in->data = le32_to_cpu 1504 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1505 return status; 1506 } 1507 1508 /* FW Admin Queue command wrappers */ 1509 1510 /* Software lock/mutex that is meant to be held while the Global Config Lock 1511 * in firmware is acquired by the software to prevent most (but not all) types 1512 * of AQ commands from being sent to FW 1513 */ 1514 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1515 1516 /** 1517 * ice_should_retry_sq_send_cmd 1518 * @opcode: AQ opcode 1519 * 1520 * Decide if we should retry the send command routine for the ATQ, depending 1521 * on the opcode. 1522 */ 1523 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1524 { 1525 switch (opcode) { 1526 case ice_aqc_opc_get_link_topo: 1527 case ice_aqc_opc_lldp_stop: 1528 case ice_aqc_opc_lldp_start: 1529 case ice_aqc_opc_lldp_filter_ctrl: 1530 return true; 1531 } 1532 1533 return false; 1534 } 1535 1536 /** 1537 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1538 * @hw: pointer to the HW struct 1539 * @cq: pointer to the specific Control queue 1540 * @desc: prefilled descriptor describing the command 1541 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1542 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1543 * @cd: pointer to command details structure 1544 * 1545 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1546 * Queue if the EBUSY AQ error is returned. 1547 */ 1548 static int 1549 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1550 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1551 struct ice_sq_cd *cd) 1552 { 1553 struct ice_aq_desc desc_cpy; 1554 bool is_cmd_for_retry; 1555 u8 idx = 0; 1556 u16 opcode; 1557 int status; 1558 1559 opcode = le16_to_cpu(desc->opcode); 1560 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1561 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1562 1563 if (is_cmd_for_retry) { 1564 /* All retryable cmds are direct, without buf. */ 1565 WARN_ON(buf); 1566 1567 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1568 } 1569 1570 do { 1571 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1572 1573 if (!is_cmd_for_retry || !status || 1574 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1575 break; 1576 1577 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1578 1579 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1580 1581 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1582 1583 return status; 1584 } 1585 1586 /** 1587 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1588 * @hw: pointer to the HW struct 1589 * @desc: descriptor describing the command 1590 * @buf: buffer to use for indirect commands (NULL for direct commands) 1591 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1592 * @cd: pointer to command details structure 1593 * 1594 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1595 */ 1596 int 1597 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1598 u16 buf_size, struct ice_sq_cd *cd) 1599 { 1600 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1601 bool lock_acquired = false; 1602 int status; 1603 1604 /* When a package download is in process (i.e. when the firmware's 1605 * Global Configuration Lock resource is held), only the Download 1606 * Package, Get Version, Get Package Info List, Upload Section, 1607 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1608 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1609 * Recipes to Profile Association, and Release Resource (with resource 1610 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1611 * must block until the package download completes and the Global Config 1612 * Lock is released. See also ice_acquire_global_cfg_lock(). 1613 */ 1614 switch (le16_to_cpu(desc->opcode)) { 1615 case ice_aqc_opc_download_pkg: 1616 case ice_aqc_opc_get_pkg_info_list: 1617 case ice_aqc_opc_get_ver: 1618 case ice_aqc_opc_upload_section: 1619 case ice_aqc_opc_update_pkg: 1620 case ice_aqc_opc_set_port_params: 1621 case ice_aqc_opc_get_vlan_mode_parameters: 1622 case ice_aqc_opc_set_vlan_mode_parameters: 1623 case ice_aqc_opc_add_recipe: 1624 case ice_aqc_opc_recipe_to_profile: 1625 case ice_aqc_opc_get_recipe: 1626 case ice_aqc_opc_get_recipe_to_profile: 1627 break; 1628 case ice_aqc_opc_release_res: 1629 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1630 break; 1631 fallthrough; 1632 default: 1633 mutex_lock(&ice_global_cfg_lock_sw); 1634 lock_acquired = true; 1635 break; 1636 } 1637 1638 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1639 if (lock_acquired) 1640 mutex_unlock(&ice_global_cfg_lock_sw); 1641 1642 return status; 1643 } 1644 1645 /** 1646 * ice_aq_get_fw_ver 1647 * @hw: pointer to the HW struct 1648 * @cd: pointer to command details structure or NULL 1649 * 1650 * Get the firmware version (0x0001) from the admin queue commands 1651 */ 1652 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1653 { 1654 struct ice_aqc_get_ver *resp; 1655 struct ice_aq_desc desc; 1656 int status; 1657 1658 resp = &desc.params.get_ver; 1659 1660 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1661 1662 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1663 1664 if (!status) { 1665 hw->fw_branch = resp->fw_branch; 1666 hw->fw_maj_ver = resp->fw_major; 1667 hw->fw_min_ver = resp->fw_minor; 1668 hw->fw_patch = resp->fw_patch; 1669 hw->fw_build = le32_to_cpu(resp->fw_build); 1670 hw->api_branch = resp->api_branch; 1671 hw->api_maj_ver = resp->api_major; 1672 hw->api_min_ver = resp->api_minor; 1673 hw->api_patch = resp->api_patch; 1674 } 1675 1676 return status; 1677 } 1678 1679 /** 1680 * ice_aq_send_driver_ver 1681 * @hw: pointer to the HW struct 1682 * @dv: driver's major, minor version 1683 * @cd: pointer to command details structure or NULL 1684 * 1685 * Send the driver version (0x0002) to the firmware 1686 */ 1687 int 1688 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1689 struct ice_sq_cd *cd) 1690 { 1691 struct ice_aqc_driver_ver *cmd; 1692 struct ice_aq_desc desc; 1693 u16 len; 1694 1695 cmd = &desc.params.driver_ver; 1696 1697 if (!dv) 1698 return -EINVAL; 1699 1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1701 1702 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1703 cmd->major_ver = dv->major_ver; 1704 cmd->minor_ver = dv->minor_ver; 1705 cmd->build_ver = dv->build_ver; 1706 cmd->subbuild_ver = dv->subbuild_ver; 1707 1708 len = 0; 1709 while (len < sizeof(dv->driver_string) && 1710 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1711 len++; 1712 1713 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1714 } 1715 1716 /** 1717 * ice_aq_q_shutdown 1718 * @hw: pointer to the HW struct 1719 * @unloading: is the driver unloading itself 1720 * 1721 * Tell the Firmware that we're shutting down the AdminQ and whether 1722 * or not the driver is unloading as well (0x0003). 1723 */ 1724 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1725 { 1726 struct ice_aqc_q_shutdown *cmd; 1727 struct ice_aq_desc desc; 1728 1729 cmd = &desc.params.q_shutdown; 1730 1731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1732 1733 if (unloading) 1734 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1735 1736 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1737 } 1738 1739 /** 1740 * ice_aq_req_res 1741 * @hw: pointer to the HW struct 1742 * @res: resource ID 1743 * @access: access type 1744 * @sdp_number: resource number 1745 * @timeout: the maximum time in ms that the driver may hold the resource 1746 * @cd: pointer to command details structure or NULL 1747 * 1748 * Requests common resource using the admin queue commands (0x0008). 1749 * When attempting to acquire the Global Config Lock, the driver can 1750 * learn of three states: 1751 * 1) 0 - acquired lock, and can perform download package 1752 * 2) -EIO - did not get lock, driver should fail to load 1753 * 3) -EALREADY - did not get lock, but another driver has 1754 * successfully downloaded the package; the driver does 1755 * not have to download the package and can continue 1756 * loading 1757 * 1758 * Note that if the caller is in an acquire lock, perform action, release lock 1759 * phase of operation, it is possible that the FW may detect a timeout and issue 1760 * a CORER. In this case, the driver will receive a CORER interrupt and will 1761 * have to determine its cause. The calling thread that is handling this flow 1762 * will likely get an error propagated back to it indicating the Download 1763 * Package, Update Package or the Release Resource AQ commands timed out. 1764 */ 1765 static int 1766 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1767 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1768 struct ice_sq_cd *cd) 1769 { 1770 struct ice_aqc_req_res *cmd_resp; 1771 struct ice_aq_desc desc; 1772 int status; 1773 1774 cmd_resp = &desc.params.res_owner; 1775 1776 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1777 1778 cmd_resp->res_id = cpu_to_le16(res); 1779 cmd_resp->access_type = cpu_to_le16(access); 1780 cmd_resp->res_number = cpu_to_le32(sdp_number); 1781 cmd_resp->timeout = cpu_to_le32(*timeout); 1782 *timeout = 0; 1783 1784 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1785 1786 /* The completion specifies the maximum time in ms that the driver 1787 * may hold the resource in the Timeout field. 1788 */ 1789 1790 /* Global config lock response utilizes an additional status field. 1791 * 1792 * If the Global config lock resource is held by some other driver, the 1793 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1794 * and the timeout field indicates the maximum time the current owner 1795 * of the resource has to free it. 1796 */ 1797 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1798 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1799 *timeout = le32_to_cpu(cmd_resp->timeout); 1800 return 0; 1801 } else if (le16_to_cpu(cmd_resp->status) == 1802 ICE_AQ_RES_GLBL_IN_PROG) { 1803 *timeout = le32_to_cpu(cmd_resp->timeout); 1804 return -EIO; 1805 } else if (le16_to_cpu(cmd_resp->status) == 1806 ICE_AQ_RES_GLBL_DONE) { 1807 return -EALREADY; 1808 } 1809 1810 /* invalid FW response, force a timeout immediately */ 1811 *timeout = 0; 1812 return -EIO; 1813 } 1814 1815 /* If the resource is held by some other driver, the command completes 1816 * with a busy return value and the timeout field indicates the maximum 1817 * time the current owner of the resource has to free it. 1818 */ 1819 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1820 *timeout = le32_to_cpu(cmd_resp->timeout); 1821 1822 return status; 1823 } 1824 1825 /** 1826 * ice_aq_release_res 1827 * @hw: pointer to the HW struct 1828 * @res: resource ID 1829 * @sdp_number: resource number 1830 * @cd: pointer to command details structure or NULL 1831 * 1832 * release common resource using the admin queue commands (0x0009) 1833 */ 1834 static int 1835 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1836 struct ice_sq_cd *cd) 1837 { 1838 struct ice_aqc_req_res *cmd; 1839 struct ice_aq_desc desc; 1840 1841 cmd = &desc.params.res_owner; 1842 1843 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1844 1845 cmd->res_id = cpu_to_le16(res); 1846 cmd->res_number = cpu_to_le32(sdp_number); 1847 1848 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1849 } 1850 1851 /** 1852 * ice_acquire_res 1853 * @hw: pointer to the HW structure 1854 * @res: resource ID 1855 * @access: access type (read or write) 1856 * @timeout: timeout in milliseconds 1857 * 1858 * This function will attempt to acquire the ownership of a resource. 1859 */ 1860 int 1861 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1862 enum ice_aq_res_access_type access, u32 timeout) 1863 { 1864 #define ICE_RES_POLLING_DELAY_MS 10 1865 u32 delay = ICE_RES_POLLING_DELAY_MS; 1866 u32 time_left = timeout; 1867 int status; 1868 1869 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1870 1871 /* A return code of -EALREADY means that another driver has 1872 * previously acquired the resource and performed any necessary updates; 1873 * in this case the caller does not obtain the resource and has no 1874 * further work to do. 1875 */ 1876 if (status == -EALREADY) 1877 goto ice_acquire_res_exit; 1878 1879 if (status) 1880 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1881 1882 /* If necessary, poll until the current lock owner timeouts */ 1883 timeout = time_left; 1884 while (status && timeout && time_left) { 1885 mdelay(delay); 1886 timeout = (timeout > delay) ? timeout - delay : 0; 1887 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1888 1889 if (status == -EALREADY) 1890 /* lock free, but no work to do */ 1891 break; 1892 1893 if (!status) 1894 /* lock acquired */ 1895 break; 1896 } 1897 if (status && status != -EALREADY) 1898 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1899 1900 ice_acquire_res_exit: 1901 if (status == -EALREADY) { 1902 if (access == ICE_RES_WRITE) 1903 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1904 else 1905 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1906 } 1907 return status; 1908 } 1909 1910 /** 1911 * ice_release_res 1912 * @hw: pointer to the HW structure 1913 * @res: resource ID 1914 * 1915 * This function will release a resource using the proper Admin Command. 1916 */ 1917 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1918 { 1919 unsigned long timeout; 1920 int status; 1921 1922 /* there are some rare cases when trying to release the resource 1923 * results in an admin queue timeout, so handle them correctly 1924 */ 1925 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1926 do { 1927 status = ice_aq_release_res(hw, res, 0, NULL); 1928 if (status != -EIO) 1929 break; 1930 usleep_range(1000, 2000); 1931 } while (time_before(jiffies, timeout)); 1932 } 1933 1934 /** 1935 * ice_aq_alloc_free_res - command to allocate/free resources 1936 * @hw: pointer to the HW struct 1937 * @buf: Indirect buffer to hold data parameters and response 1938 * @buf_size: size of buffer for indirect commands 1939 * @opc: pass in the command opcode 1940 * 1941 * Helper function to allocate/free resources using the admin queue commands 1942 */ 1943 int ice_aq_alloc_free_res(struct ice_hw *hw, 1944 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1945 enum ice_adminq_opc opc) 1946 { 1947 struct ice_aqc_alloc_free_res_cmd *cmd; 1948 struct ice_aq_desc desc; 1949 1950 cmd = &desc.params.sw_res_ctrl; 1951 1952 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1953 return -EINVAL; 1954 1955 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1956 1957 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1958 1959 cmd->num_entries = cpu_to_le16(1); 1960 1961 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1962 } 1963 1964 /** 1965 * ice_alloc_hw_res - allocate resource 1966 * @hw: pointer to the HW struct 1967 * @type: type of resource 1968 * @num: number of resources to allocate 1969 * @btm: allocate from bottom 1970 * @res: pointer to array that will receive the resources 1971 */ 1972 int 1973 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1974 { 1975 struct ice_aqc_alloc_free_res_elem *buf; 1976 u16 buf_len; 1977 int status; 1978 1979 buf_len = struct_size(buf, elem, num); 1980 buf = kzalloc(buf_len, GFP_KERNEL); 1981 if (!buf) 1982 return -ENOMEM; 1983 1984 /* Prepare buffer to allocate resource. */ 1985 buf->num_elems = cpu_to_le16(num); 1986 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1987 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1988 if (btm) 1989 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1990 1991 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 1992 if (status) 1993 goto ice_alloc_res_exit; 1994 1995 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1996 1997 ice_alloc_res_exit: 1998 kfree(buf); 1999 return status; 2000 } 2001 2002 /** 2003 * ice_free_hw_res - free allocated HW resource 2004 * @hw: pointer to the HW struct 2005 * @type: type of resource to free 2006 * @num: number of resources 2007 * @res: pointer to array that contains the resources to free 2008 */ 2009 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2010 { 2011 struct ice_aqc_alloc_free_res_elem *buf; 2012 u16 buf_len; 2013 int status; 2014 2015 buf_len = struct_size(buf, elem, num); 2016 buf = kzalloc(buf_len, GFP_KERNEL); 2017 if (!buf) 2018 return -ENOMEM; 2019 2020 /* Prepare buffer to free resource. */ 2021 buf->num_elems = cpu_to_le16(num); 2022 buf->res_type = cpu_to_le16(type); 2023 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2024 2025 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2026 if (status) 2027 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2028 2029 kfree(buf); 2030 return status; 2031 } 2032 2033 /** 2034 * ice_get_num_per_func - determine number of resources per PF 2035 * @hw: pointer to the HW structure 2036 * @max: value to be evenly split between each PF 2037 * 2038 * Determine the number of valid functions by going through the bitmap returned 2039 * from parsing capabilities and use this to calculate the number of resources 2040 * per PF based on the max value passed in. 2041 */ 2042 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2043 { 2044 u8 funcs; 2045 2046 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2047 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2048 ICE_CAPS_VALID_FUNCS_M); 2049 2050 if (!funcs) 2051 return 0; 2052 2053 return max / funcs; 2054 } 2055 2056 /** 2057 * ice_parse_common_caps - parse common device/function capabilities 2058 * @hw: pointer to the HW struct 2059 * @caps: pointer to common capabilities structure 2060 * @elem: the capability element to parse 2061 * @prefix: message prefix for tracing capabilities 2062 * 2063 * Given a capability element, extract relevant details into the common 2064 * capability structure. 2065 * 2066 * Returns: true if the capability matches one of the common capability ids, 2067 * false otherwise. 2068 */ 2069 static bool 2070 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2071 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2072 { 2073 u32 logical_id = le32_to_cpu(elem->logical_id); 2074 u32 phys_id = le32_to_cpu(elem->phys_id); 2075 u32 number = le32_to_cpu(elem->number); 2076 u16 cap = le16_to_cpu(elem->cap); 2077 bool found = true; 2078 2079 switch (cap) { 2080 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2081 caps->valid_functions = number; 2082 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2083 caps->valid_functions); 2084 break; 2085 case ICE_AQC_CAPS_SRIOV: 2086 caps->sr_iov_1_1 = (number == 1); 2087 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2088 caps->sr_iov_1_1); 2089 break; 2090 case ICE_AQC_CAPS_DCB: 2091 caps->dcb = (number == 1); 2092 caps->active_tc_bitmap = logical_id; 2093 caps->maxtc = phys_id; 2094 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2095 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2096 caps->active_tc_bitmap); 2097 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2098 break; 2099 case ICE_AQC_CAPS_RSS: 2100 caps->rss_table_size = number; 2101 caps->rss_table_entry_width = logical_id; 2102 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2103 caps->rss_table_size); 2104 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2105 caps->rss_table_entry_width); 2106 break; 2107 case ICE_AQC_CAPS_RXQS: 2108 caps->num_rxq = number; 2109 caps->rxq_first_id = phys_id; 2110 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2111 caps->num_rxq); 2112 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2113 caps->rxq_first_id); 2114 break; 2115 case ICE_AQC_CAPS_TXQS: 2116 caps->num_txq = number; 2117 caps->txq_first_id = phys_id; 2118 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2119 caps->num_txq); 2120 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2121 caps->txq_first_id); 2122 break; 2123 case ICE_AQC_CAPS_MSIX: 2124 caps->num_msix_vectors = number; 2125 caps->msix_vector_first_id = phys_id; 2126 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2127 caps->num_msix_vectors); 2128 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2129 caps->msix_vector_first_id); 2130 break; 2131 case ICE_AQC_CAPS_PENDING_NVM_VER: 2132 caps->nvm_update_pending_nvm = true; 2133 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2134 break; 2135 case ICE_AQC_CAPS_PENDING_OROM_VER: 2136 caps->nvm_update_pending_orom = true; 2137 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2138 break; 2139 case ICE_AQC_CAPS_PENDING_NET_VER: 2140 caps->nvm_update_pending_netlist = true; 2141 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2142 break; 2143 case ICE_AQC_CAPS_NVM_MGMT: 2144 caps->nvm_unified_update = 2145 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2146 true : false; 2147 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2148 caps->nvm_unified_update); 2149 break; 2150 case ICE_AQC_CAPS_RDMA: 2151 caps->rdma = (number == 1); 2152 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2153 break; 2154 case ICE_AQC_CAPS_MAX_MTU: 2155 caps->max_mtu = number; 2156 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2157 prefix, caps->max_mtu); 2158 break; 2159 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2160 caps->pcie_reset_avoidance = (number > 0); 2161 ice_debug(hw, ICE_DBG_INIT, 2162 "%s: pcie_reset_avoidance = %d\n", prefix, 2163 caps->pcie_reset_avoidance); 2164 break; 2165 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2166 caps->reset_restrict_support = (number == 1); 2167 ice_debug(hw, ICE_DBG_INIT, 2168 "%s: reset_restrict_support = %d\n", prefix, 2169 caps->reset_restrict_support); 2170 break; 2171 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2172 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2173 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2174 prefix, caps->roce_lag); 2175 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2176 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2177 prefix, caps->sriov_lag); 2178 break; 2179 default: 2180 /* Not one of the recognized common capabilities */ 2181 found = false; 2182 } 2183 2184 return found; 2185 } 2186 2187 /** 2188 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2189 * @hw: pointer to the HW structure 2190 * @caps: pointer to capabilities structure to fix 2191 * 2192 * Re-calculate the capabilities that are dependent on the number of physical 2193 * ports; i.e. some features are not supported or function differently on 2194 * devices with more than 4 ports. 2195 */ 2196 static void 2197 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2198 { 2199 /* This assumes device capabilities are always scanned before function 2200 * capabilities during the initialization flow. 2201 */ 2202 if (hw->dev_caps.num_funcs > 4) { 2203 /* Max 4 TCs per port */ 2204 caps->maxtc = 4; 2205 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2206 caps->maxtc); 2207 if (caps->rdma) { 2208 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2209 caps->rdma = 0; 2210 } 2211 2212 /* print message only when processing device capabilities 2213 * during initialization. 2214 */ 2215 if (caps == &hw->dev_caps.common_cap) 2216 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2217 } 2218 } 2219 2220 /** 2221 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2222 * @hw: pointer to the HW struct 2223 * @func_p: pointer to function capabilities structure 2224 * @cap: pointer to the capability element to parse 2225 * 2226 * Extract function capabilities for ICE_AQC_CAPS_VF. 2227 */ 2228 static void 2229 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2230 struct ice_aqc_list_caps_elem *cap) 2231 { 2232 u32 logical_id = le32_to_cpu(cap->logical_id); 2233 u32 number = le32_to_cpu(cap->number); 2234 2235 func_p->num_allocd_vfs = number; 2236 func_p->vf_base_id = logical_id; 2237 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2238 func_p->num_allocd_vfs); 2239 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2240 func_p->vf_base_id); 2241 } 2242 2243 /** 2244 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2245 * @hw: pointer to the HW struct 2246 * @func_p: pointer to function capabilities structure 2247 * @cap: pointer to the capability element to parse 2248 * 2249 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2250 */ 2251 static void 2252 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2253 struct ice_aqc_list_caps_elem *cap) 2254 { 2255 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2256 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2257 le32_to_cpu(cap->number)); 2258 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2259 func_p->guar_num_vsi); 2260 } 2261 2262 /** 2263 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2264 * @hw: pointer to the HW struct 2265 * @func_p: pointer to function capabilities structure 2266 * @cap: pointer to the capability element to parse 2267 * 2268 * Extract function capabilities for ICE_AQC_CAPS_1588. 2269 */ 2270 static void 2271 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2272 struct ice_aqc_list_caps_elem *cap) 2273 { 2274 struct ice_ts_func_info *info = &func_p->ts_func_info; 2275 u32 number = le32_to_cpu(cap->number); 2276 2277 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2278 func_p->common_cap.ieee_1588 = info->ena; 2279 2280 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2281 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2282 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2283 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2284 2285 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2286 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2287 2288 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2289 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2290 } else { 2291 /* Unknown clock frequency, so assume a (probably incorrect) 2292 * default to avoid out-of-bounds look ups of frequency 2293 * related information. 2294 */ 2295 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2296 info->clk_freq); 2297 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2298 } 2299 2300 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2301 func_p->common_cap.ieee_1588); 2302 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2303 info->src_tmr_owned); 2304 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2305 info->tmr_ena); 2306 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2307 info->tmr_index_owned); 2308 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2309 info->tmr_index_assoc); 2310 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2311 info->clk_freq); 2312 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2313 info->clk_src); 2314 } 2315 2316 /** 2317 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2318 * @hw: pointer to the HW struct 2319 * @func_p: pointer to function capabilities structure 2320 * 2321 * Extract function capabilities for ICE_AQC_CAPS_FD. 2322 */ 2323 static void 2324 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2325 { 2326 u32 reg_val, gsize, bsize; 2327 2328 reg_val = rd32(hw, GLQF_FD_SIZE); 2329 switch (hw->mac_type) { 2330 case ICE_MAC_E830: 2331 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2332 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2333 break; 2334 case ICE_MAC_E810: 2335 default: 2336 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2337 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2338 } 2339 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2340 func_p->fd_fltr_best_effort = bsize; 2341 2342 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2343 func_p->fd_fltr_guar); 2344 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2345 func_p->fd_fltr_best_effort); 2346 } 2347 2348 /** 2349 * ice_parse_func_caps - Parse function capabilities 2350 * @hw: pointer to the HW struct 2351 * @func_p: pointer to function capabilities structure 2352 * @buf: buffer containing the function capability records 2353 * @cap_count: the number of capabilities 2354 * 2355 * Helper function to parse function (0x000A) capabilities list. For 2356 * capabilities shared between device and function, this relies on 2357 * ice_parse_common_caps. 2358 * 2359 * Loop through the list of provided capabilities and extract the relevant 2360 * data into the function capabilities structured. 2361 */ 2362 static void 2363 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2364 void *buf, u32 cap_count) 2365 { 2366 struct ice_aqc_list_caps_elem *cap_resp; 2367 u32 i; 2368 2369 cap_resp = buf; 2370 2371 memset(func_p, 0, sizeof(*func_p)); 2372 2373 for (i = 0; i < cap_count; i++) { 2374 u16 cap = le16_to_cpu(cap_resp[i].cap); 2375 bool found; 2376 2377 found = ice_parse_common_caps(hw, &func_p->common_cap, 2378 &cap_resp[i], "func caps"); 2379 2380 switch (cap) { 2381 case ICE_AQC_CAPS_VF: 2382 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2383 break; 2384 case ICE_AQC_CAPS_VSI: 2385 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2386 break; 2387 case ICE_AQC_CAPS_1588: 2388 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2389 break; 2390 case ICE_AQC_CAPS_FD: 2391 ice_parse_fdir_func_caps(hw, func_p); 2392 break; 2393 default: 2394 /* Don't list common capabilities as unknown */ 2395 if (!found) 2396 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2397 i, cap); 2398 break; 2399 } 2400 } 2401 2402 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2403 } 2404 2405 /** 2406 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2407 * @hw: pointer to the HW struct 2408 * @dev_p: pointer to device capabilities structure 2409 * @cap: capability element to parse 2410 * 2411 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2412 */ 2413 static void 2414 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2415 struct ice_aqc_list_caps_elem *cap) 2416 { 2417 u32 number = le32_to_cpu(cap->number); 2418 2419 dev_p->num_funcs = hweight32(number); 2420 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2421 dev_p->num_funcs); 2422 } 2423 2424 /** 2425 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2426 * @hw: pointer to the HW struct 2427 * @dev_p: pointer to device capabilities structure 2428 * @cap: capability element to parse 2429 * 2430 * Parse ICE_AQC_CAPS_VF for device capabilities. 2431 */ 2432 static void 2433 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2434 struct ice_aqc_list_caps_elem *cap) 2435 { 2436 u32 number = le32_to_cpu(cap->number); 2437 2438 dev_p->num_vfs_exposed = number; 2439 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2440 dev_p->num_vfs_exposed); 2441 } 2442 2443 /** 2444 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2445 * @hw: pointer to the HW struct 2446 * @dev_p: pointer to device capabilities structure 2447 * @cap: capability element to parse 2448 * 2449 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2450 */ 2451 static void 2452 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2453 struct ice_aqc_list_caps_elem *cap) 2454 { 2455 u32 number = le32_to_cpu(cap->number); 2456 2457 dev_p->num_vsi_allocd_to_host = number; 2458 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2459 dev_p->num_vsi_allocd_to_host); 2460 } 2461 2462 /** 2463 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2464 * @hw: pointer to the HW struct 2465 * @dev_p: pointer to device capabilities structure 2466 * @cap: capability element to parse 2467 * 2468 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2469 */ 2470 static void 2471 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2472 struct ice_aqc_list_caps_elem *cap) 2473 { 2474 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2475 u32 logical_id = le32_to_cpu(cap->logical_id); 2476 u32 phys_id = le32_to_cpu(cap->phys_id); 2477 u32 number = le32_to_cpu(cap->number); 2478 2479 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2480 dev_p->common_cap.ieee_1588 = info->ena; 2481 2482 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2483 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2484 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2485 2486 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2487 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2488 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2489 2490 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2491 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2492 2493 info->ena_ports = logical_id; 2494 info->tmr_own_map = phys_id; 2495 2496 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2497 dev_p->common_cap.ieee_1588); 2498 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2499 info->tmr0_owner); 2500 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2501 info->tmr0_owned); 2502 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2503 info->tmr0_ena); 2504 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2505 info->tmr1_owner); 2506 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2507 info->tmr1_owned); 2508 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2509 info->tmr1_ena); 2510 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2511 info->ts_ll_read); 2512 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2513 info->ts_ll_int_read); 2514 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2515 info->ena_ports); 2516 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2517 info->tmr_own_map); 2518 } 2519 2520 /** 2521 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2522 * @hw: pointer to the HW struct 2523 * @dev_p: pointer to device capabilities structure 2524 * @cap: capability element to parse 2525 * 2526 * Parse ICE_AQC_CAPS_FD for device capabilities. 2527 */ 2528 static void 2529 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2530 struct ice_aqc_list_caps_elem *cap) 2531 { 2532 u32 number = le32_to_cpu(cap->number); 2533 2534 dev_p->num_flow_director_fltr = number; 2535 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2536 dev_p->num_flow_director_fltr); 2537 } 2538 2539 /** 2540 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2541 * @hw: pointer to the HW struct 2542 * @dev_p: pointer to device capabilities structure 2543 * @cap: capability element to parse 2544 * 2545 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2546 * enabled sensors. 2547 */ 2548 static void 2549 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2550 struct ice_aqc_list_caps_elem *cap) 2551 { 2552 dev_p->supported_sensors = le32_to_cpu(cap->number); 2553 2554 ice_debug(hw, ICE_DBG_INIT, 2555 "dev caps: supported sensors (bitmap) = 0x%x\n", 2556 dev_p->supported_sensors); 2557 } 2558 2559 /** 2560 * ice_parse_dev_caps - Parse device capabilities 2561 * @hw: pointer to the HW struct 2562 * @dev_p: pointer to device capabilities structure 2563 * @buf: buffer containing the device capability records 2564 * @cap_count: the number of capabilities 2565 * 2566 * Helper device to parse device (0x000B) capabilities list. For 2567 * capabilities shared between device and function, this relies on 2568 * ice_parse_common_caps. 2569 * 2570 * Loop through the list of provided capabilities and extract the relevant 2571 * data into the device capabilities structured. 2572 */ 2573 static void 2574 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2575 void *buf, u32 cap_count) 2576 { 2577 struct ice_aqc_list_caps_elem *cap_resp; 2578 u32 i; 2579 2580 cap_resp = buf; 2581 2582 memset(dev_p, 0, sizeof(*dev_p)); 2583 2584 for (i = 0; i < cap_count; i++) { 2585 u16 cap = le16_to_cpu(cap_resp[i].cap); 2586 bool found; 2587 2588 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2589 &cap_resp[i], "dev caps"); 2590 2591 switch (cap) { 2592 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2593 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2594 break; 2595 case ICE_AQC_CAPS_VF: 2596 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2597 break; 2598 case ICE_AQC_CAPS_VSI: 2599 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2600 break; 2601 case ICE_AQC_CAPS_1588: 2602 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2603 break; 2604 case ICE_AQC_CAPS_FD: 2605 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2606 break; 2607 case ICE_AQC_CAPS_SENSOR_READING: 2608 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2609 break; 2610 default: 2611 /* Don't list common capabilities as unknown */ 2612 if (!found) 2613 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2614 i, cap); 2615 break; 2616 } 2617 } 2618 2619 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2620 } 2621 2622 /** 2623 * ice_is_pf_c827 - check if pf contains c827 phy 2624 * @hw: pointer to the hw struct 2625 */ 2626 bool ice_is_pf_c827(struct ice_hw *hw) 2627 { 2628 struct ice_aqc_get_link_topo cmd = {}; 2629 u8 node_part_number; 2630 u16 node_handle; 2631 int status; 2632 2633 if (hw->mac_type != ICE_MAC_E810) 2634 return false; 2635 2636 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2637 return true; 2638 2639 cmd.addr.topo_params.node_type_ctx = 2640 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2641 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2642 cmd.addr.topo_params.index = 0; 2643 2644 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2645 &node_handle); 2646 2647 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2648 return false; 2649 2650 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2651 return true; 2652 2653 return false; 2654 } 2655 2656 /** 2657 * ice_is_phy_rclk_in_netlist 2658 * @hw: pointer to the hw struct 2659 * 2660 * Check if the PHY Recovered Clock device is present in the netlist 2661 */ 2662 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2663 { 2664 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2665 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2666 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2667 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2668 return false; 2669 2670 return true; 2671 } 2672 2673 /** 2674 * ice_is_clock_mux_in_netlist 2675 * @hw: pointer to the hw struct 2676 * 2677 * Check if the Clock Multiplexer device is present in the netlist 2678 */ 2679 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2680 { 2681 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2682 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2683 NULL)) 2684 return false; 2685 2686 return true; 2687 } 2688 2689 /** 2690 * ice_is_cgu_in_netlist - check for CGU presence 2691 * @hw: pointer to the hw struct 2692 * 2693 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2694 * Save the CGU part number in the hw structure for later use. 2695 * Return: 2696 * * true - cgu is present 2697 * * false - cgu is not present 2698 */ 2699 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2700 { 2701 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2702 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2703 NULL)) { 2704 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2705 return true; 2706 } else if (!ice_find_netlist_node(hw, 2707 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2708 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2709 NULL)) { 2710 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2711 return true; 2712 } 2713 2714 return false; 2715 } 2716 2717 /** 2718 * ice_is_gps_in_netlist 2719 * @hw: pointer to the hw struct 2720 * 2721 * Check if the GPS generic device is present in the netlist 2722 */ 2723 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2724 { 2725 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2726 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2727 return false; 2728 2729 return true; 2730 } 2731 2732 /** 2733 * ice_aq_list_caps - query function/device capabilities 2734 * @hw: pointer to the HW struct 2735 * @buf: a buffer to hold the capabilities 2736 * @buf_size: size of the buffer 2737 * @cap_count: if not NULL, set to the number of capabilities reported 2738 * @opc: capabilities type to discover, device or function 2739 * @cd: pointer to command details structure or NULL 2740 * 2741 * Get the function (0x000A) or device (0x000B) capabilities description from 2742 * firmware and store it in the buffer. 2743 * 2744 * If the cap_count pointer is not NULL, then it is set to the number of 2745 * capabilities firmware will report. Note that if the buffer size is too 2746 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2747 * cap_count will still be updated in this case. It is recommended that the 2748 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2749 * firmware could return) to avoid this. 2750 */ 2751 int 2752 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2753 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2754 { 2755 struct ice_aqc_list_caps *cmd; 2756 struct ice_aq_desc desc; 2757 int status; 2758 2759 cmd = &desc.params.get_cap; 2760 2761 if (opc != ice_aqc_opc_list_func_caps && 2762 opc != ice_aqc_opc_list_dev_caps) 2763 return -EINVAL; 2764 2765 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2766 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2767 2768 if (cap_count) 2769 *cap_count = le32_to_cpu(cmd->count); 2770 2771 return status; 2772 } 2773 2774 /** 2775 * ice_discover_dev_caps - Read and extract device capabilities 2776 * @hw: pointer to the hardware structure 2777 * @dev_caps: pointer to device capabilities structure 2778 * 2779 * Read the device capabilities and extract them into the dev_caps structure 2780 * for later use. 2781 */ 2782 int 2783 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2784 { 2785 u32 cap_count = 0; 2786 void *cbuf; 2787 int status; 2788 2789 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2790 if (!cbuf) 2791 return -ENOMEM; 2792 2793 /* Although the driver doesn't know the number of capabilities the 2794 * device will return, we can simply send a 4KB buffer, the maximum 2795 * possible size that firmware can return. 2796 */ 2797 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2798 2799 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2800 ice_aqc_opc_list_dev_caps, NULL); 2801 if (!status) 2802 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2803 kfree(cbuf); 2804 2805 return status; 2806 } 2807 2808 /** 2809 * ice_discover_func_caps - Read and extract function capabilities 2810 * @hw: pointer to the hardware structure 2811 * @func_caps: pointer to function capabilities structure 2812 * 2813 * Read the function capabilities and extract them into the func_caps structure 2814 * for later use. 2815 */ 2816 static int 2817 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2818 { 2819 u32 cap_count = 0; 2820 void *cbuf; 2821 int status; 2822 2823 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2824 if (!cbuf) 2825 return -ENOMEM; 2826 2827 /* Although the driver doesn't know the number of capabilities the 2828 * device will return, we can simply send a 4KB buffer, the maximum 2829 * possible size that firmware can return. 2830 */ 2831 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2832 2833 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2834 ice_aqc_opc_list_func_caps, NULL); 2835 if (!status) 2836 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2837 kfree(cbuf); 2838 2839 return status; 2840 } 2841 2842 /** 2843 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2844 * @hw: pointer to the hardware structure 2845 */ 2846 void ice_set_safe_mode_caps(struct ice_hw *hw) 2847 { 2848 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2849 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2850 struct ice_hw_common_caps cached_caps; 2851 u32 num_funcs; 2852 2853 /* cache some func_caps values that should be restored after memset */ 2854 cached_caps = func_caps->common_cap; 2855 2856 /* unset func capabilities */ 2857 memset(func_caps, 0, sizeof(*func_caps)); 2858 2859 #define ICE_RESTORE_FUNC_CAP(name) \ 2860 func_caps->common_cap.name = cached_caps.name 2861 2862 /* restore cached values */ 2863 ICE_RESTORE_FUNC_CAP(valid_functions); 2864 ICE_RESTORE_FUNC_CAP(txq_first_id); 2865 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2866 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2867 ICE_RESTORE_FUNC_CAP(max_mtu); 2868 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2869 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2870 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2871 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2872 2873 /* one Tx and one Rx queue in safe mode */ 2874 func_caps->common_cap.num_rxq = 1; 2875 func_caps->common_cap.num_txq = 1; 2876 2877 /* two MSIX vectors, one for traffic and one for misc causes */ 2878 func_caps->common_cap.num_msix_vectors = 2; 2879 func_caps->guar_num_vsi = 1; 2880 2881 /* cache some dev_caps values that should be restored after memset */ 2882 cached_caps = dev_caps->common_cap; 2883 num_funcs = dev_caps->num_funcs; 2884 2885 /* unset dev capabilities */ 2886 memset(dev_caps, 0, sizeof(*dev_caps)); 2887 2888 #define ICE_RESTORE_DEV_CAP(name) \ 2889 dev_caps->common_cap.name = cached_caps.name 2890 2891 /* restore cached values */ 2892 ICE_RESTORE_DEV_CAP(valid_functions); 2893 ICE_RESTORE_DEV_CAP(txq_first_id); 2894 ICE_RESTORE_DEV_CAP(rxq_first_id); 2895 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2896 ICE_RESTORE_DEV_CAP(max_mtu); 2897 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2898 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2899 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2900 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2901 dev_caps->num_funcs = num_funcs; 2902 2903 /* one Tx and one Rx queue per function in safe mode */ 2904 dev_caps->common_cap.num_rxq = num_funcs; 2905 dev_caps->common_cap.num_txq = num_funcs; 2906 2907 /* two MSIX vectors per function */ 2908 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2909 } 2910 2911 /** 2912 * ice_get_caps - get info about the HW 2913 * @hw: pointer to the hardware structure 2914 */ 2915 int ice_get_caps(struct ice_hw *hw) 2916 { 2917 int status; 2918 2919 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2920 if (status) 2921 return status; 2922 2923 return ice_discover_func_caps(hw, &hw->func_caps); 2924 } 2925 2926 /** 2927 * ice_aq_manage_mac_write - manage MAC address write command 2928 * @hw: pointer to the HW struct 2929 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2930 * @flags: flags to control write behavior 2931 * @cd: pointer to command details structure or NULL 2932 * 2933 * This function is used to write MAC address to the NVM (0x0108). 2934 */ 2935 int 2936 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2937 struct ice_sq_cd *cd) 2938 { 2939 struct ice_aqc_manage_mac_write *cmd; 2940 struct ice_aq_desc desc; 2941 2942 cmd = &desc.params.mac_write; 2943 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2944 2945 cmd->flags = flags; 2946 ether_addr_copy(cmd->mac_addr, mac_addr); 2947 2948 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2949 } 2950 2951 /** 2952 * ice_aq_clear_pxe_mode 2953 * @hw: pointer to the HW struct 2954 * 2955 * Tell the firmware that the driver is taking over from PXE (0x0110). 2956 */ 2957 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2958 { 2959 struct ice_aq_desc desc; 2960 2961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2962 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2963 2964 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2965 } 2966 2967 /** 2968 * ice_clear_pxe_mode - clear pxe operations mode 2969 * @hw: pointer to the HW struct 2970 * 2971 * Make sure all PXE mode settings are cleared, including things 2972 * like descriptor fetch/write-back mode. 2973 */ 2974 void ice_clear_pxe_mode(struct ice_hw *hw) 2975 { 2976 if (ice_check_sq_alive(hw, &hw->adminq)) 2977 ice_aq_clear_pxe_mode(hw); 2978 } 2979 2980 /** 2981 * ice_aq_set_port_params - set physical port parameters. 2982 * @pi: pointer to the port info struct 2983 * @double_vlan: if set double VLAN is enabled 2984 * @cd: pointer to command details structure or NULL 2985 * 2986 * Set Physical port parameters (0x0203) 2987 */ 2988 int 2989 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2990 struct ice_sq_cd *cd) 2991 2992 { 2993 struct ice_aqc_set_port_params *cmd; 2994 struct ice_hw *hw = pi->hw; 2995 struct ice_aq_desc desc; 2996 u16 cmd_flags = 0; 2997 2998 cmd = &desc.params.set_port_params; 2999 3000 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3001 if (double_vlan) 3002 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3003 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3004 3005 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3006 } 3007 3008 /** 3009 * ice_is_100m_speed_supported 3010 * @hw: pointer to the HW struct 3011 * 3012 * returns true if 100M speeds are supported by the device, 3013 * false otherwise. 3014 */ 3015 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3016 { 3017 switch (hw->device_id) { 3018 case ICE_DEV_ID_E822C_SGMII: 3019 case ICE_DEV_ID_E822L_SGMII: 3020 case ICE_DEV_ID_E823L_1GBE: 3021 case ICE_DEV_ID_E823C_SGMII: 3022 return true; 3023 default: 3024 return false; 3025 } 3026 } 3027 3028 /** 3029 * ice_get_link_speed_based_on_phy_type - returns link speed 3030 * @phy_type_low: lower part of phy_type 3031 * @phy_type_high: higher part of phy_type 3032 * 3033 * This helper function will convert an entry in PHY type structure 3034 * [phy_type_low, phy_type_high] to its corresponding link speed. 3035 * Note: In the structure of [phy_type_low, phy_type_high], there should 3036 * be one bit set, as this function will convert one PHY type to its 3037 * speed. 3038 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3039 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3040 */ 3041 static u16 3042 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3043 { 3044 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3045 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3046 3047 switch (phy_type_low) { 3048 case ICE_PHY_TYPE_LOW_100BASE_TX: 3049 case ICE_PHY_TYPE_LOW_100M_SGMII: 3050 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3051 break; 3052 case ICE_PHY_TYPE_LOW_1000BASE_T: 3053 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3054 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3055 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3056 case ICE_PHY_TYPE_LOW_1G_SGMII: 3057 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3058 break; 3059 case ICE_PHY_TYPE_LOW_2500BASE_T: 3060 case ICE_PHY_TYPE_LOW_2500BASE_X: 3061 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3062 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3063 break; 3064 case ICE_PHY_TYPE_LOW_5GBASE_T: 3065 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3066 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3067 break; 3068 case ICE_PHY_TYPE_LOW_10GBASE_T: 3069 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3070 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3071 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3072 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3073 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3074 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3075 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3076 break; 3077 case ICE_PHY_TYPE_LOW_25GBASE_T: 3078 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3079 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3080 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3081 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3082 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3083 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3084 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3085 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3086 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3087 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3088 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3089 break; 3090 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3091 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3092 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3093 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3094 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3095 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3096 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3097 break; 3098 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3099 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3100 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3101 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3102 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3103 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3104 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3105 case ICE_PHY_TYPE_LOW_50G_AUI2: 3106 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3107 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3108 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3109 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3110 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3111 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3112 case ICE_PHY_TYPE_LOW_50G_AUI1: 3113 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3114 break; 3115 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3116 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3117 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3118 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3119 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3120 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3121 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3122 case ICE_PHY_TYPE_LOW_100G_AUI4: 3123 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3124 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3125 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3126 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3127 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3128 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3129 break; 3130 default: 3131 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3132 break; 3133 } 3134 3135 switch (phy_type_high) { 3136 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3137 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3138 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3139 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3140 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3141 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3142 break; 3143 default: 3144 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3145 break; 3146 } 3147 3148 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3149 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3150 return ICE_AQ_LINK_SPEED_UNKNOWN; 3151 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3152 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3153 return ICE_AQ_LINK_SPEED_UNKNOWN; 3154 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3155 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3156 return speed_phy_type_low; 3157 else 3158 return speed_phy_type_high; 3159 } 3160 3161 /** 3162 * ice_update_phy_type 3163 * @phy_type_low: pointer to the lower part of phy_type 3164 * @phy_type_high: pointer to the higher part of phy_type 3165 * @link_speeds_bitmap: targeted link speeds bitmap 3166 * 3167 * Note: For the link_speeds_bitmap structure, you can check it at 3168 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3169 * link_speeds_bitmap include multiple speeds. 3170 * 3171 * Each entry in this [phy_type_low, phy_type_high] structure will 3172 * present a certain link speed. This helper function will turn on bits 3173 * in [phy_type_low, phy_type_high] structure based on the value of 3174 * link_speeds_bitmap input parameter. 3175 */ 3176 void 3177 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3178 u16 link_speeds_bitmap) 3179 { 3180 u64 pt_high; 3181 u64 pt_low; 3182 int index; 3183 u16 speed; 3184 3185 /* We first check with low part of phy_type */ 3186 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3187 pt_low = BIT_ULL(index); 3188 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3189 3190 if (link_speeds_bitmap & speed) 3191 *phy_type_low |= BIT_ULL(index); 3192 } 3193 3194 /* We then check with high part of phy_type */ 3195 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3196 pt_high = BIT_ULL(index); 3197 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3198 3199 if (link_speeds_bitmap & speed) 3200 *phy_type_high |= BIT_ULL(index); 3201 } 3202 } 3203 3204 /** 3205 * ice_aq_set_phy_cfg 3206 * @hw: pointer to the HW struct 3207 * @pi: port info structure of the interested logical port 3208 * @cfg: structure with PHY configuration data to be set 3209 * @cd: pointer to command details structure or NULL 3210 * 3211 * Set the various PHY configuration parameters supported on the Port. 3212 * One or more of the Set PHY config parameters may be ignored in an MFP 3213 * mode as the PF may not have the privilege to set some of the PHY Config 3214 * parameters. This status will be indicated by the command response (0x0601). 3215 */ 3216 int 3217 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3218 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3219 { 3220 struct ice_aq_desc desc; 3221 int status; 3222 3223 if (!cfg) 3224 return -EINVAL; 3225 3226 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3227 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3228 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3229 cfg->caps); 3230 3231 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3232 } 3233 3234 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3235 desc.params.set_phy.lport_num = pi->lport; 3236 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3237 3238 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3239 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3240 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3241 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3242 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3243 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3244 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3245 cfg->low_power_ctrl_an); 3246 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3247 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3248 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3249 cfg->link_fec_opt); 3250 3251 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3252 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3253 status = 0; 3254 3255 if (!status) 3256 pi->phy.curr_user_phy_cfg = *cfg; 3257 3258 return status; 3259 } 3260 3261 /** 3262 * ice_update_link_info - update status of the HW network link 3263 * @pi: port info structure of the interested logical port 3264 */ 3265 int ice_update_link_info(struct ice_port_info *pi) 3266 { 3267 struct ice_link_status *li; 3268 int status; 3269 3270 if (!pi) 3271 return -EINVAL; 3272 3273 li = &pi->phy.link_info; 3274 3275 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3276 if (status) 3277 return status; 3278 3279 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3280 struct ice_aqc_get_phy_caps_data *pcaps; 3281 struct ice_hw *hw; 3282 3283 hw = pi->hw; 3284 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3285 GFP_KERNEL); 3286 if (!pcaps) 3287 return -ENOMEM; 3288 3289 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3290 pcaps, NULL); 3291 3292 devm_kfree(ice_hw_to_dev(hw), pcaps); 3293 } 3294 3295 return status; 3296 } 3297 3298 /** 3299 * ice_cache_phy_user_req 3300 * @pi: port information structure 3301 * @cache_data: PHY logging data 3302 * @cache_mode: PHY logging mode 3303 * 3304 * Log the user request on (FC, FEC, SPEED) for later use. 3305 */ 3306 static void 3307 ice_cache_phy_user_req(struct ice_port_info *pi, 3308 struct ice_phy_cache_mode_data cache_data, 3309 enum ice_phy_cache_mode cache_mode) 3310 { 3311 if (!pi) 3312 return; 3313 3314 switch (cache_mode) { 3315 case ICE_FC_MODE: 3316 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3317 break; 3318 case ICE_SPEED_MODE: 3319 pi->phy.curr_user_speed_req = 3320 cache_data.data.curr_user_speed_req; 3321 break; 3322 case ICE_FEC_MODE: 3323 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3324 break; 3325 default: 3326 break; 3327 } 3328 } 3329 3330 /** 3331 * ice_caps_to_fc_mode 3332 * @caps: PHY capabilities 3333 * 3334 * Convert PHY FC capabilities to ice FC mode 3335 */ 3336 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3337 { 3338 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3339 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3340 return ICE_FC_FULL; 3341 3342 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3343 return ICE_FC_TX_PAUSE; 3344 3345 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3346 return ICE_FC_RX_PAUSE; 3347 3348 return ICE_FC_NONE; 3349 } 3350 3351 /** 3352 * ice_caps_to_fec_mode 3353 * @caps: PHY capabilities 3354 * @fec_options: Link FEC options 3355 * 3356 * Convert PHY FEC capabilities to ice FEC mode 3357 */ 3358 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3359 { 3360 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3361 return ICE_FEC_AUTO; 3362 3363 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3364 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3365 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3366 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3367 return ICE_FEC_BASER; 3368 3369 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3370 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3371 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3372 return ICE_FEC_RS; 3373 3374 return ICE_FEC_NONE; 3375 } 3376 3377 /** 3378 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3379 * @pi: port information structure 3380 * @cfg: PHY configuration data to set FC mode 3381 * @req_mode: FC mode to configure 3382 */ 3383 int 3384 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3385 enum ice_fc_mode req_mode) 3386 { 3387 struct ice_phy_cache_mode_data cache_data; 3388 u8 pause_mask = 0x0; 3389 3390 if (!pi || !cfg) 3391 return -EINVAL; 3392 3393 switch (req_mode) { 3394 case ICE_FC_FULL: 3395 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3396 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3397 break; 3398 case ICE_FC_RX_PAUSE: 3399 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3400 break; 3401 case ICE_FC_TX_PAUSE: 3402 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3403 break; 3404 default: 3405 break; 3406 } 3407 3408 /* clear the old pause settings */ 3409 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3410 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3411 3412 /* set the new capabilities */ 3413 cfg->caps |= pause_mask; 3414 3415 /* Cache user FC request */ 3416 cache_data.data.curr_user_fc_req = req_mode; 3417 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3418 3419 return 0; 3420 } 3421 3422 /** 3423 * ice_set_fc 3424 * @pi: port information structure 3425 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3426 * @ena_auto_link_update: enable automatic link update 3427 * 3428 * Set the requested flow control mode. 3429 */ 3430 int 3431 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3432 { 3433 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3434 struct ice_aqc_get_phy_caps_data *pcaps; 3435 struct ice_hw *hw; 3436 int status; 3437 3438 if (!pi || !aq_failures) 3439 return -EINVAL; 3440 3441 *aq_failures = 0; 3442 hw = pi->hw; 3443 3444 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3445 if (!pcaps) 3446 return -ENOMEM; 3447 3448 /* Get the current PHY config */ 3449 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3450 pcaps, NULL); 3451 if (status) { 3452 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3453 goto out; 3454 } 3455 3456 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3457 3458 /* Configure the set PHY data */ 3459 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3460 if (status) 3461 goto out; 3462 3463 /* If the capabilities have changed, then set the new config */ 3464 if (cfg.caps != pcaps->caps) { 3465 int retry_count, retry_max = 10; 3466 3467 /* Auto restart link so settings take effect */ 3468 if (ena_auto_link_update) 3469 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3470 3471 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3472 if (status) { 3473 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3474 goto out; 3475 } 3476 3477 /* Update the link info 3478 * It sometimes takes a really long time for link to 3479 * come back from the atomic reset. Thus, we wait a 3480 * little bit. 3481 */ 3482 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3483 status = ice_update_link_info(pi); 3484 3485 if (!status) 3486 break; 3487 3488 mdelay(100); 3489 } 3490 3491 if (status) 3492 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3493 } 3494 3495 out: 3496 devm_kfree(ice_hw_to_dev(hw), pcaps); 3497 return status; 3498 } 3499 3500 /** 3501 * ice_phy_caps_equals_cfg 3502 * @phy_caps: PHY capabilities 3503 * @phy_cfg: PHY configuration 3504 * 3505 * Helper function to determine if PHY capabilities matches PHY 3506 * configuration 3507 */ 3508 bool 3509 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3510 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3511 { 3512 u8 caps_mask, cfg_mask; 3513 3514 if (!phy_caps || !phy_cfg) 3515 return false; 3516 3517 /* These bits are not common between capabilities and configuration. 3518 * Do not use them to determine equality. 3519 */ 3520 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3521 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3522 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3523 3524 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3525 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3526 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3527 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3528 phy_caps->eee_cap != phy_cfg->eee_cap || 3529 phy_caps->eeer_value != phy_cfg->eeer_value || 3530 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3531 return false; 3532 3533 return true; 3534 } 3535 3536 /** 3537 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3538 * @pi: port information structure 3539 * @caps: PHY ability structure to copy date from 3540 * @cfg: PHY configuration structure to copy data to 3541 * 3542 * Helper function to copy AQC PHY get ability data to PHY set configuration 3543 * data structure 3544 */ 3545 void 3546 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3547 struct ice_aqc_get_phy_caps_data *caps, 3548 struct ice_aqc_set_phy_cfg_data *cfg) 3549 { 3550 if (!pi || !caps || !cfg) 3551 return; 3552 3553 memset(cfg, 0, sizeof(*cfg)); 3554 cfg->phy_type_low = caps->phy_type_low; 3555 cfg->phy_type_high = caps->phy_type_high; 3556 cfg->caps = caps->caps; 3557 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3558 cfg->eee_cap = caps->eee_cap; 3559 cfg->eeer_value = caps->eeer_value; 3560 cfg->link_fec_opt = caps->link_fec_options; 3561 cfg->module_compliance_enforcement = 3562 caps->module_compliance_enforcement; 3563 } 3564 3565 /** 3566 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3567 * @pi: port information structure 3568 * @cfg: PHY configuration data to set FEC mode 3569 * @fec: FEC mode to configure 3570 */ 3571 int 3572 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3573 enum ice_fec_mode fec) 3574 { 3575 struct ice_aqc_get_phy_caps_data *pcaps; 3576 struct ice_hw *hw; 3577 int status; 3578 3579 if (!pi || !cfg) 3580 return -EINVAL; 3581 3582 hw = pi->hw; 3583 3584 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3585 if (!pcaps) 3586 return -ENOMEM; 3587 3588 status = ice_aq_get_phy_caps(pi, false, 3589 (ice_fw_supports_report_dflt_cfg(hw) ? 3590 ICE_AQC_REPORT_DFLT_CFG : 3591 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3592 if (status) 3593 goto out; 3594 3595 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3596 cfg->link_fec_opt = pcaps->link_fec_options; 3597 3598 switch (fec) { 3599 case ICE_FEC_BASER: 3600 /* Clear RS bits, and AND BASE-R ability 3601 * bits and OR request bits. 3602 */ 3603 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3604 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3605 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3606 ICE_AQC_PHY_FEC_25G_KR_REQ; 3607 break; 3608 case ICE_FEC_RS: 3609 /* Clear BASE-R bits, and AND RS ability 3610 * bits and OR request bits. 3611 */ 3612 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3613 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3614 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3615 break; 3616 case ICE_FEC_NONE: 3617 /* Clear all FEC option bits. */ 3618 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3619 break; 3620 case ICE_FEC_AUTO: 3621 /* AND auto FEC bit, and all caps bits. */ 3622 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3623 cfg->link_fec_opt |= pcaps->link_fec_options; 3624 break; 3625 default: 3626 status = -EINVAL; 3627 break; 3628 } 3629 3630 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3631 !ice_fw_supports_report_dflt_cfg(hw)) { 3632 struct ice_link_default_override_tlv tlv = { 0 }; 3633 3634 status = ice_get_link_default_override(&tlv, pi); 3635 if (status) 3636 goto out; 3637 3638 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3639 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3640 cfg->link_fec_opt = tlv.fec_options; 3641 } 3642 3643 out: 3644 kfree(pcaps); 3645 3646 return status; 3647 } 3648 3649 /** 3650 * ice_get_link_status - get status of the HW network link 3651 * @pi: port information structure 3652 * @link_up: pointer to bool (true/false = linkup/linkdown) 3653 * 3654 * Variable link_up is true if link is up, false if link is down. 3655 * The variable link_up is invalid if status is non zero. As a 3656 * result of this call, link status reporting becomes enabled 3657 */ 3658 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3659 { 3660 struct ice_phy_info *phy_info; 3661 int status = 0; 3662 3663 if (!pi || !link_up) 3664 return -EINVAL; 3665 3666 phy_info = &pi->phy; 3667 3668 if (phy_info->get_link_info) { 3669 status = ice_update_link_info(pi); 3670 3671 if (status) 3672 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3673 status); 3674 } 3675 3676 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3677 3678 return status; 3679 } 3680 3681 /** 3682 * ice_aq_set_link_restart_an 3683 * @pi: pointer to the port information structure 3684 * @ena_link: if true: enable link, if false: disable link 3685 * @cd: pointer to command details structure or NULL 3686 * 3687 * Sets up the link and restarts the Auto-Negotiation over the link. 3688 */ 3689 int 3690 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3691 struct ice_sq_cd *cd) 3692 { 3693 struct ice_aqc_restart_an *cmd; 3694 struct ice_aq_desc desc; 3695 3696 cmd = &desc.params.restart_an; 3697 3698 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3699 3700 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3701 cmd->lport_num = pi->lport; 3702 if (ena_link) 3703 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3704 else 3705 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3706 3707 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3708 } 3709 3710 /** 3711 * ice_aq_set_event_mask 3712 * @hw: pointer to the HW struct 3713 * @port_num: port number of the physical function 3714 * @mask: event mask to be set 3715 * @cd: pointer to command details structure or NULL 3716 * 3717 * Set event mask (0x0613) 3718 */ 3719 int 3720 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3721 struct ice_sq_cd *cd) 3722 { 3723 struct ice_aqc_set_event_mask *cmd; 3724 struct ice_aq_desc desc; 3725 3726 cmd = &desc.params.set_event_mask; 3727 3728 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3729 3730 cmd->lport_num = port_num; 3731 3732 cmd->event_mask = cpu_to_le16(mask); 3733 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3734 } 3735 3736 /** 3737 * ice_aq_set_mac_loopback 3738 * @hw: pointer to the HW struct 3739 * @ena_lpbk: Enable or Disable loopback 3740 * @cd: pointer to command details structure or NULL 3741 * 3742 * Enable/disable loopback on a given port 3743 */ 3744 int 3745 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3746 { 3747 struct ice_aqc_set_mac_lb *cmd; 3748 struct ice_aq_desc desc; 3749 3750 cmd = &desc.params.set_mac_lb; 3751 3752 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3753 if (ena_lpbk) 3754 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3755 3756 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3757 } 3758 3759 /** 3760 * ice_aq_set_port_id_led 3761 * @pi: pointer to the port information 3762 * @is_orig_mode: is this LED set to original mode (by the net-list) 3763 * @cd: pointer to command details structure or NULL 3764 * 3765 * Set LED value for the given port (0x06e9) 3766 */ 3767 int 3768 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3769 struct ice_sq_cd *cd) 3770 { 3771 struct ice_aqc_set_port_id_led *cmd; 3772 struct ice_hw *hw = pi->hw; 3773 struct ice_aq_desc desc; 3774 3775 cmd = &desc.params.set_port_id_led; 3776 3777 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3778 3779 if (is_orig_mode) 3780 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3781 else 3782 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3783 3784 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3785 } 3786 3787 /** 3788 * ice_aq_get_port_options 3789 * @hw: pointer to the HW struct 3790 * @options: buffer for the resultant port options 3791 * @option_count: input - size of the buffer in port options structures, 3792 * output - number of returned port options 3793 * @lport: logical port to call the command with (optional) 3794 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3795 * when PF owns more than 1 port it must be true 3796 * @active_option_idx: index of active port option in returned buffer 3797 * @active_option_valid: active option in returned buffer is valid 3798 * @pending_option_idx: index of pending port option in returned buffer 3799 * @pending_option_valid: pending option in returned buffer is valid 3800 * 3801 * Calls Get Port Options AQC (0x06ea) and verifies result. 3802 */ 3803 int 3804 ice_aq_get_port_options(struct ice_hw *hw, 3805 struct ice_aqc_get_port_options_elem *options, 3806 u8 *option_count, u8 lport, bool lport_valid, 3807 u8 *active_option_idx, bool *active_option_valid, 3808 u8 *pending_option_idx, bool *pending_option_valid) 3809 { 3810 struct ice_aqc_get_port_options *cmd; 3811 struct ice_aq_desc desc; 3812 int status; 3813 u8 i; 3814 3815 /* options buffer shall be able to hold max returned options */ 3816 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3817 return -EINVAL; 3818 3819 cmd = &desc.params.get_port_options; 3820 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3821 3822 if (lport_valid) 3823 cmd->lport_num = lport; 3824 cmd->lport_num_valid = lport_valid; 3825 3826 status = ice_aq_send_cmd(hw, &desc, options, 3827 *option_count * sizeof(*options), NULL); 3828 if (status) 3829 return status; 3830 3831 /* verify direct FW response & set output parameters */ 3832 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3833 cmd->port_options_count); 3834 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3835 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3836 cmd->port_options); 3837 if (*active_option_valid) { 3838 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3839 cmd->port_options); 3840 if (*active_option_idx > (*option_count - 1)) 3841 return -EIO; 3842 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3843 *active_option_idx); 3844 } 3845 3846 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3847 cmd->pending_port_option_status); 3848 if (*pending_option_valid) { 3849 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3850 cmd->pending_port_option_status); 3851 if (*pending_option_idx > (*option_count - 1)) 3852 return -EIO; 3853 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3854 *pending_option_idx); 3855 } 3856 3857 /* mask output options fields */ 3858 for (i = 0; i < *option_count; i++) { 3859 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3860 options[i].pmd); 3861 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3862 options[i].max_lane_speed); 3863 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3864 options[i].pmd, options[i].max_lane_speed); 3865 } 3866 3867 return 0; 3868 } 3869 3870 /** 3871 * ice_aq_set_port_option 3872 * @hw: pointer to the HW struct 3873 * @lport: logical port to call the command with 3874 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3875 * when PF owns more than 1 port it must be true 3876 * @new_option: new port option to be written 3877 * 3878 * Calls Set Port Options AQC (0x06eb). 3879 */ 3880 int 3881 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3882 u8 new_option) 3883 { 3884 struct ice_aqc_set_port_option *cmd; 3885 struct ice_aq_desc desc; 3886 3887 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3888 return -EINVAL; 3889 3890 cmd = &desc.params.set_port_option; 3891 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3892 3893 if (lport_valid) 3894 cmd->lport_num = lport; 3895 3896 cmd->lport_num_valid = lport_valid; 3897 cmd->selected_port_option = new_option; 3898 3899 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3900 } 3901 3902 /** 3903 * ice_aq_sff_eeprom 3904 * @hw: pointer to the HW struct 3905 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3906 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3907 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3908 * @page: QSFP page 3909 * @set_page: set or ignore the page 3910 * @data: pointer to data buffer to be read/written to the I2C device. 3911 * @length: 1-16 for read, 1 for write. 3912 * @write: 0 read, 1 for write. 3913 * @cd: pointer to command details structure or NULL 3914 * 3915 * Read/Write SFF EEPROM (0x06EE) 3916 */ 3917 int 3918 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3919 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3920 bool write, struct ice_sq_cd *cd) 3921 { 3922 struct ice_aqc_sff_eeprom *cmd; 3923 struct ice_aq_desc desc; 3924 u16 i2c_bus_addr; 3925 int status; 3926 3927 if (!data || (mem_addr & 0xff00)) 3928 return -EINVAL; 3929 3930 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3931 cmd = &desc.params.read_write_sff_param; 3932 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3933 cmd->lport_num = (u8)(lport & 0xff); 3934 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3935 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 3936 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 3937 if (write) 3938 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 3939 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 3940 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3941 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 3942 3943 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3944 return status; 3945 } 3946 3947 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3948 { 3949 switch (type) { 3950 case ICE_LUT_VSI: 3951 return ICE_LUT_VSI_SIZE; 3952 case ICE_LUT_GLOBAL: 3953 return ICE_LUT_GLOBAL_SIZE; 3954 case ICE_LUT_PF: 3955 return ICE_LUT_PF_SIZE; 3956 } 3957 WARN_ONCE(1, "incorrect type passed"); 3958 return ICE_LUT_VSI_SIZE; 3959 } 3960 3961 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3962 { 3963 switch (size) { 3964 case ICE_LUT_VSI_SIZE: 3965 return ICE_AQC_LUT_SIZE_SMALL; 3966 case ICE_LUT_GLOBAL_SIZE: 3967 return ICE_AQC_LUT_SIZE_512; 3968 case ICE_LUT_PF_SIZE: 3969 return ICE_AQC_LUT_SIZE_2K; 3970 } 3971 WARN_ONCE(1, "incorrect size passed"); 3972 return 0; 3973 } 3974 3975 /** 3976 * __ice_aq_get_set_rss_lut 3977 * @hw: pointer to the hardware structure 3978 * @params: RSS LUT parameters 3979 * @set: set true to set the table, false to get the table 3980 * 3981 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3982 */ 3983 static int 3984 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3985 struct ice_aq_get_set_rss_lut_params *params, bool set) 3986 { 3987 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3988 enum ice_lut_type lut_type = params->lut_type; 3989 struct ice_aqc_get_set_rss_lut *desc_params; 3990 enum ice_aqc_lut_flags flags; 3991 enum ice_lut_size lut_size; 3992 struct ice_aq_desc desc; 3993 u8 *lut = params->lut; 3994 3995 3996 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 3997 return -EINVAL; 3998 3999 lut_size = ice_lut_type_to_size(lut_type); 4000 if (lut_size > params->lut_size) 4001 return -EINVAL; 4002 else if (set && lut_size != params->lut_size) 4003 return -EINVAL; 4004 4005 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4006 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4007 if (set) 4008 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4009 4010 desc_params = &desc.params.get_set_rss_lut; 4011 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4012 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4013 4014 if (lut_type == ICE_LUT_GLOBAL) 4015 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4016 params->global_lut_id); 4017 4018 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4019 desc_params->flags = cpu_to_le16(flags); 4020 4021 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4022 } 4023 4024 /** 4025 * ice_aq_get_rss_lut 4026 * @hw: pointer to the hardware structure 4027 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4028 * 4029 * get the RSS lookup table, PF or VSI type 4030 */ 4031 int 4032 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4033 { 4034 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4035 } 4036 4037 /** 4038 * ice_aq_set_rss_lut 4039 * @hw: pointer to the hardware structure 4040 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4041 * 4042 * set the RSS lookup table, PF or VSI type 4043 */ 4044 int 4045 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4046 { 4047 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4048 } 4049 4050 /** 4051 * __ice_aq_get_set_rss_key 4052 * @hw: pointer to the HW struct 4053 * @vsi_id: VSI FW index 4054 * @key: pointer to key info struct 4055 * @set: set true to set the key, false to get the key 4056 * 4057 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4058 */ 4059 static int 4060 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4061 struct ice_aqc_get_set_rss_keys *key, bool set) 4062 { 4063 struct ice_aqc_get_set_rss_key *desc_params; 4064 u16 key_size = sizeof(*key); 4065 struct ice_aq_desc desc; 4066 4067 if (set) { 4068 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4069 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4070 } else { 4071 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4072 } 4073 4074 desc_params = &desc.params.get_set_rss_key; 4075 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4076 4077 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4078 } 4079 4080 /** 4081 * ice_aq_get_rss_key 4082 * @hw: pointer to the HW struct 4083 * @vsi_handle: software VSI handle 4084 * @key: pointer to key info struct 4085 * 4086 * get the RSS key per VSI 4087 */ 4088 int 4089 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4090 struct ice_aqc_get_set_rss_keys *key) 4091 { 4092 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4093 return -EINVAL; 4094 4095 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4096 key, false); 4097 } 4098 4099 /** 4100 * ice_aq_set_rss_key 4101 * @hw: pointer to the HW struct 4102 * @vsi_handle: software VSI handle 4103 * @keys: pointer to key info struct 4104 * 4105 * set the RSS key per VSI 4106 */ 4107 int 4108 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4109 struct ice_aqc_get_set_rss_keys *keys) 4110 { 4111 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4112 return -EINVAL; 4113 4114 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4115 keys, true); 4116 } 4117 4118 /** 4119 * ice_aq_add_lan_txq 4120 * @hw: pointer to the hardware structure 4121 * @num_qgrps: Number of added queue groups 4122 * @qg_list: list of queue groups to be added 4123 * @buf_size: size of buffer for indirect command 4124 * @cd: pointer to command details structure or NULL 4125 * 4126 * Add Tx LAN queue (0x0C30) 4127 * 4128 * NOTE: 4129 * Prior to calling add Tx LAN queue: 4130 * Initialize the following as part of the Tx queue context: 4131 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4132 * Cache profile and Packet shaper profile. 4133 * 4134 * After add Tx LAN queue AQ command is completed: 4135 * Interrupts should be associated with specific queues, 4136 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4137 * flow. 4138 */ 4139 static int 4140 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4141 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4142 struct ice_sq_cd *cd) 4143 { 4144 struct ice_aqc_add_tx_qgrp *list; 4145 struct ice_aqc_add_txqs *cmd; 4146 struct ice_aq_desc desc; 4147 u16 i, sum_size = 0; 4148 4149 cmd = &desc.params.add_txqs; 4150 4151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4152 4153 if (!qg_list) 4154 return -EINVAL; 4155 4156 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4157 return -EINVAL; 4158 4159 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4160 sum_size += struct_size(list, txqs, list->num_txqs); 4161 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4162 list->num_txqs); 4163 } 4164 4165 if (buf_size != sum_size) 4166 return -EINVAL; 4167 4168 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4169 4170 cmd->num_qgrps = num_qgrps; 4171 4172 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4173 } 4174 4175 /** 4176 * ice_aq_dis_lan_txq 4177 * @hw: pointer to the hardware structure 4178 * @num_qgrps: number of groups in the list 4179 * @qg_list: the list of groups to disable 4180 * @buf_size: the total size of the qg_list buffer in bytes 4181 * @rst_src: if called due to reset, specifies the reset source 4182 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4183 * @cd: pointer to command details structure or NULL 4184 * 4185 * Disable LAN Tx queue (0x0C31) 4186 */ 4187 static int 4188 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4189 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4190 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4191 struct ice_sq_cd *cd) 4192 { 4193 struct ice_aqc_dis_txq_item *item; 4194 struct ice_aqc_dis_txqs *cmd; 4195 struct ice_aq_desc desc; 4196 u16 vmvf_and_timeout; 4197 u16 i, sz = 0; 4198 int status; 4199 4200 cmd = &desc.params.dis_txqs; 4201 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4202 4203 /* qg_list can be NULL only in VM/VF reset flow */ 4204 if (!qg_list && !rst_src) 4205 return -EINVAL; 4206 4207 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4208 return -EINVAL; 4209 4210 cmd->num_entries = num_qgrps; 4211 4212 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4213 4214 switch (rst_src) { 4215 case ICE_VM_RESET: 4216 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4217 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4218 break; 4219 case ICE_VF_RESET: 4220 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4221 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4222 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4223 ICE_AQC_Q_DIS_VMVF_NUM_M; 4224 break; 4225 case ICE_NO_RESET: 4226 default: 4227 break; 4228 } 4229 4230 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4231 4232 /* flush pipe on time out */ 4233 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4234 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4235 if (!qg_list) 4236 goto do_aq; 4237 4238 /* set RD bit to indicate that command buffer is provided by the driver 4239 * and it needs to be read by the firmware 4240 */ 4241 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4242 4243 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4244 u16 item_size = struct_size(item, q_id, item->num_qs); 4245 4246 /* If the num of queues is even, add 2 bytes of padding */ 4247 if ((item->num_qs % 2) == 0) 4248 item_size += 2; 4249 4250 sz += item_size; 4251 4252 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4253 } 4254 4255 if (buf_size != sz) 4256 return -EINVAL; 4257 4258 do_aq: 4259 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4260 if (status) { 4261 if (!qg_list) 4262 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4263 vmvf_num, hw->adminq.sq_last_status); 4264 else 4265 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4266 le16_to_cpu(qg_list[0].q_id[0]), 4267 hw->adminq.sq_last_status); 4268 } 4269 return status; 4270 } 4271 4272 /** 4273 * ice_aq_cfg_lan_txq 4274 * @hw: pointer to the hardware structure 4275 * @buf: buffer for command 4276 * @buf_size: size of buffer in bytes 4277 * @num_qs: number of queues being configured 4278 * @oldport: origination lport 4279 * @newport: destination lport 4280 * @cd: pointer to command details structure or NULL 4281 * 4282 * Move/Configure LAN Tx queue (0x0C32) 4283 * 4284 * There is a better AQ command to use for moving nodes, so only coding 4285 * this one for configuring the node. 4286 */ 4287 int 4288 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4289 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4290 struct ice_sq_cd *cd) 4291 { 4292 struct ice_aqc_cfg_txqs *cmd; 4293 struct ice_aq_desc desc; 4294 int status; 4295 4296 cmd = &desc.params.cfg_txqs; 4297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4298 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4299 4300 if (!buf) 4301 return -EINVAL; 4302 4303 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4304 cmd->num_qs = num_qs; 4305 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4306 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4307 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4308 cmd->blocked_cgds = 0; 4309 4310 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4311 if (status) 4312 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4313 hw->adminq.sq_last_status); 4314 return status; 4315 } 4316 4317 /** 4318 * ice_aq_add_rdma_qsets 4319 * @hw: pointer to the hardware structure 4320 * @num_qset_grps: Number of RDMA Qset groups 4321 * @qset_list: list of Qset groups to be added 4322 * @buf_size: size of buffer for indirect command 4323 * @cd: pointer to command details structure or NULL 4324 * 4325 * Add Tx RDMA Qsets (0x0C33) 4326 */ 4327 static int 4328 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4329 struct ice_aqc_add_rdma_qset_data *qset_list, 4330 u16 buf_size, struct ice_sq_cd *cd) 4331 { 4332 struct ice_aqc_add_rdma_qset_data *list; 4333 struct ice_aqc_add_rdma_qset *cmd; 4334 struct ice_aq_desc desc; 4335 u16 i, sum_size = 0; 4336 4337 cmd = &desc.params.add_rdma_qset; 4338 4339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4340 4341 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4342 return -EINVAL; 4343 4344 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4345 u16 num_qsets = le16_to_cpu(list->num_qsets); 4346 4347 sum_size += struct_size(list, rdma_qsets, num_qsets); 4348 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4349 num_qsets); 4350 } 4351 4352 if (buf_size != sum_size) 4353 return -EINVAL; 4354 4355 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4356 4357 cmd->num_qset_grps = num_qset_grps; 4358 4359 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4360 } 4361 4362 /* End of FW Admin Queue command wrappers */ 4363 4364 /** 4365 * ice_write_byte - write a byte to a packed context structure 4366 * @src_ctx: the context structure to read from 4367 * @dest_ctx: the context to be written to 4368 * @ce_info: a description of the struct to be filled 4369 */ 4370 static void 4371 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4372 { 4373 u8 src_byte, dest_byte, mask; 4374 u8 *from, *dest; 4375 u16 shift_width; 4376 4377 /* copy from the next struct field */ 4378 from = src_ctx + ce_info->offset; 4379 4380 /* prepare the bits and mask */ 4381 shift_width = ce_info->lsb % 8; 4382 mask = (u8)(BIT(ce_info->width) - 1); 4383 4384 src_byte = *from; 4385 src_byte &= mask; 4386 4387 /* shift to correct alignment */ 4388 mask <<= shift_width; 4389 src_byte <<= shift_width; 4390 4391 /* get the current bits from the target bit string */ 4392 dest = dest_ctx + (ce_info->lsb / 8); 4393 4394 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4395 4396 dest_byte &= ~mask; /* get the bits not changing */ 4397 dest_byte |= src_byte; /* add in the new bits */ 4398 4399 /* put it all back */ 4400 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4401 } 4402 4403 /** 4404 * ice_write_word - write a word to a packed context structure 4405 * @src_ctx: the context structure to read from 4406 * @dest_ctx: the context to be written to 4407 * @ce_info: a description of the struct to be filled 4408 */ 4409 static void 4410 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4411 { 4412 u16 src_word, mask; 4413 __le16 dest_word; 4414 u8 *from, *dest; 4415 u16 shift_width; 4416 4417 /* copy from the next struct field */ 4418 from = src_ctx + ce_info->offset; 4419 4420 /* prepare the bits and mask */ 4421 shift_width = ce_info->lsb % 8; 4422 mask = BIT(ce_info->width) - 1; 4423 4424 /* don't swizzle the bits until after the mask because the mask bits 4425 * will be in a different bit position on big endian machines 4426 */ 4427 src_word = *(u16 *)from; 4428 src_word &= mask; 4429 4430 /* shift to correct alignment */ 4431 mask <<= shift_width; 4432 src_word <<= shift_width; 4433 4434 /* get the current bits from the target bit string */ 4435 dest = dest_ctx + (ce_info->lsb / 8); 4436 4437 memcpy(&dest_word, dest, sizeof(dest_word)); 4438 4439 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4440 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4441 4442 /* put it all back */ 4443 memcpy(dest, &dest_word, sizeof(dest_word)); 4444 } 4445 4446 /** 4447 * ice_write_dword - write a dword to a packed context structure 4448 * @src_ctx: the context structure to read from 4449 * @dest_ctx: the context to be written to 4450 * @ce_info: a description of the struct to be filled 4451 */ 4452 static void 4453 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4454 { 4455 u32 src_dword, mask; 4456 __le32 dest_dword; 4457 u8 *from, *dest; 4458 u16 shift_width; 4459 4460 /* copy from the next struct field */ 4461 from = src_ctx + ce_info->offset; 4462 4463 /* prepare the bits and mask */ 4464 shift_width = ce_info->lsb % 8; 4465 4466 /* if the field width is exactly 32 on an x86 machine, then the shift 4467 * operation will not work because the SHL instructions count is masked 4468 * to 5 bits so the shift will do nothing 4469 */ 4470 if (ce_info->width < 32) 4471 mask = BIT(ce_info->width) - 1; 4472 else 4473 mask = (u32)~0; 4474 4475 /* don't swizzle the bits until after the mask because the mask bits 4476 * will be in a different bit position on big endian machines 4477 */ 4478 src_dword = *(u32 *)from; 4479 src_dword &= mask; 4480 4481 /* shift to correct alignment */ 4482 mask <<= shift_width; 4483 src_dword <<= shift_width; 4484 4485 /* get the current bits from the target bit string */ 4486 dest = dest_ctx + (ce_info->lsb / 8); 4487 4488 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4489 4490 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4491 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4492 4493 /* put it all back */ 4494 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4495 } 4496 4497 /** 4498 * ice_write_qword - write a qword to a packed context structure 4499 * @src_ctx: the context structure to read from 4500 * @dest_ctx: the context to be written to 4501 * @ce_info: a description of the struct to be filled 4502 */ 4503 static void 4504 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4505 { 4506 u64 src_qword, mask; 4507 __le64 dest_qword; 4508 u8 *from, *dest; 4509 u16 shift_width; 4510 4511 /* copy from the next struct field */ 4512 from = src_ctx + ce_info->offset; 4513 4514 /* prepare the bits and mask */ 4515 shift_width = ce_info->lsb % 8; 4516 4517 /* if the field width is exactly 64 on an x86 machine, then the shift 4518 * operation will not work because the SHL instructions count is masked 4519 * to 6 bits so the shift will do nothing 4520 */ 4521 if (ce_info->width < 64) 4522 mask = BIT_ULL(ce_info->width) - 1; 4523 else 4524 mask = (u64)~0; 4525 4526 /* don't swizzle the bits until after the mask because the mask bits 4527 * will be in a different bit position on big endian machines 4528 */ 4529 src_qword = *(u64 *)from; 4530 src_qword &= mask; 4531 4532 /* shift to correct alignment */ 4533 mask <<= shift_width; 4534 src_qword <<= shift_width; 4535 4536 /* get the current bits from the target bit string */ 4537 dest = dest_ctx + (ce_info->lsb / 8); 4538 4539 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4540 4541 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4542 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4543 4544 /* put it all back */ 4545 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4546 } 4547 4548 /** 4549 * ice_set_ctx - set context bits in packed structure 4550 * @hw: pointer to the hardware structure 4551 * @src_ctx: pointer to a generic non-packed context structure 4552 * @dest_ctx: pointer to memory for the packed structure 4553 * @ce_info: a description of the structure to be transformed 4554 */ 4555 int 4556 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4557 const struct ice_ctx_ele *ce_info) 4558 { 4559 int f; 4560 4561 for (f = 0; ce_info[f].width; f++) { 4562 /* We have to deal with each element of the FW response 4563 * using the correct size so that we are correct regardless 4564 * of the endianness of the machine. 4565 */ 4566 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4567 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4568 f, ce_info[f].width, ce_info[f].size_of); 4569 continue; 4570 } 4571 switch (ce_info[f].size_of) { 4572 case sizeof(u8): 4573 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4574 break; 4575 case sizeof(u16): 4576 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4577 break; 4578 case sizeof(u32): 4579 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4580 break; 4581 case sizeof(u64): 4582 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4583 break; 4584 default: 4585 return -EINVAL; 4586 } 4587 } 4588 4589 return 0; 4590 } 4591 4592 /** 4593 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4594 * @hw: pointer to the HW struct 4595 * @vsi_handle: software VSI handle 4596 * @tc: TC number 4597 * @q_handle: software queue handle 4598 */ 4599 struct ice_q_ctx * 4600 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4601 { 4602 struct ice_vsi_ctx *vsi; 4603 struct ice_q_ctx *q_ctx; 4604 4605 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4606 if (!vsi) 4607 return NULL; 4608 if (q_handle >= vsi->num_lan_q_entries[tc]) 4609 return NULL; 4610 if (!vsi->lan_q_ctx[tc]) 4611 return NULL; 4612 q_ctx = vsi->lan_q_ctx[tc]; 4613 return &q_ctx[q_handle]; 4614 } 4615 4616 /** 4617 * ice_ena_vsi_txq 4618 * @pi: port information structure 4619 * @vsi_handle: software VSI handle 4620 * @tc: TC number 4621 * @q_handle: software queue handle 4622 * @num_qgrps: Number of added queue groups 4623 * @buf: list of queue groups to be added 4624 * @buf_size: size of buffer for indirect command 4625 * @cd: pointer to command details structure or NULL 4626 * 4627 * This function adds one LAN queue 4628 */ 4629 int 4630 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4631 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4632 struct ice_sq_cd *cd) 4633 { 4634 struct ice_aqc_txsched_elem_data node = { 0 }; 4635 struct ice_sched_node *parent; 4636 struct ice_q_ctx *q_ctx; 4637 struct ice_hw *hw; 4638 int status; 4639 4640 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4641 return -EIO; 4642 4643 if (num_qgrps > 1 || buf->num_txqs > 1) 4644 return -ENOSPC; 4645 4646 hw = pi->hw; 4647 4648 if (!ice_is_vsi_valid(hw, vsi_handle)) 4649 return -EINVAL; 4650 4651 mutex_lock(&pi->sched_lock); 4652 4653 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4654 if (!q_ctx) { 4655 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4656 q_handle); 4657 status = -EINVAL; 4658 goto ena_txq_exit; 4659 } 4660 4661 /* find a parent node */ 4662 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4663 ICE_SCHED_NODE_OWNER_LAN); 4664 if (!parent) { 4665 status = -EINVAL; 4666 goto ena_txq_exit; 4667 } 4668 4669 buf->parent_teid = parent->info.node_teid; 4670 node.parent_teid = parent->info.node_teid; 4671 /* Mark that the values in the "generic" section as valid. The default 4672 * value in the "generic" section is zero. This means that : 4673 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4674 * - 0 priority among siblings, indicated by Bit 1-3. 4675 * - WFQ, indicated by Bit 4. 4676 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4677 * Bit 5-6. 4678 * - Bit 7 is reserved. 4679 * Without setting the generic section as valid in valid_sections, the 4680 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4681 */ 4682 buf->txqs[0].info.valid_sections = 4683 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4684 ICE_AQC_ELEM_VALID_EIR; 4685 buf->txqs[0].info.generic = 0; 4686 buf->txqs[0].info.cir_bw.bw_profile_idx = 4687 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4688 buf->txqs[0].info.cir_bw.bw_alloc = 4689 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4690 buf->txqs[0].info.eir_bw.bw_profile_idx = 4691 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4692 buf->txqs[0].info.eir_bw.bw_alloc = 4693 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4694 4695 /* add the LAN queue */ 4696 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4697 if (status) { 4698 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4699 le16_to_cpu(buf->txqs[0].txq_id), 4700 hw->adminq.sq_last_status); 4701 goto ena_txq_exit; 4702 } 4703 4704 node.node_teid = buf->txqs[0].q_teid; 4705 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4706 q_ctx->q_handle = q_handle; 4707 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4708 4709 /* add a leaf node into scheduler tree queue layer */ 4710 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4711 if (!status) 4712 status = ice_sched_replay_q_bw(pi, q_ctx); 4713 4714 ena_txq_exit: 4715 mutex_unlock(&pi->sched_lock); 4716 return status; 4717 } 4718 4719 /** 4720 * ice_dis_vsi_txq 4721 * @pi: port information structure 4722 * @vsi_handle: software VSI handle 4723 * @tc: TC number 4724 * @num_queues: number of queues 4725 * @q_handles: pointer to software queue handle array 4726 * @q_ids: pointer to the q_id array 4727 * @q_teids: pointer to queue node teids 4728 * @rst_src: if called due to reset, specifies the reset source 4729 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4730 * @cd: pointer to command details structure or NULL 4731 * 4732 * This function removes queues and their corresponding nodes in SW DB 4733 */ 4734 int 4735 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4736 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4737 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4738 struct ice_sq_cd *cd) 4739 { 4740 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4741 u16 i, buf_size = __struct_size(qg_list); 4742 struct ice_q_ctx *q_ctx; 4743 int status = -ENOENT; 4744 struct ice_hw *hw; 4745 4746 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4747 return -EIO; 4748 4749 hw = pi->hw; 4750 4751 if (!num_queues) { 4752 /* if queue is disabled already yet the disable queue command 4753 * has to be sent to complete the VF reset, then call 4754 * ice_aq_dis_lan_txq without any queue information 4755 */ 4756 if (rst_src) 4757 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4758 vmvf_num, NULL); 4759 return -EIO; 4760 } 4761 4762 mutex_lock(&pi->sched_lock); 4763 4764 for (i = 0; i < num_queues; i++) { 4765 struct ice_sched_node *node; 4766 4767 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4768 if (!node) 4769 continue; 4770 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4771 if (!q_ctx) { 4772 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4773 q_handles[i]); 4774 continue; 4775 } 4776 if (q_ctx->q_handle != q_handles[i]) { 4777 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4778 q_ctx->q_handle, q_handles[i]); 4779 continue; 4780 } 4781 qg_list->parent_teid = node->info.parent_teid; 4782 qg_list->num_qs = 1; 4783 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4784 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4785 vmvf_num, cd); 4786 4787 if (status) 4788 break; 4789 ice_free_sched_node(pi, node); 4790 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4791 q_ctx->q_teid = ICE_INVAL_TEID; 4792 } 4793 mutex_unlock(&pi->sched_lock); 4794 return status; 4795 } 4796 4797 /** 4798 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4799 * @pi: port information structure 4800 * @vsi_handle: software VSI handle 4801 * @tc_bitmap: TC bitmap 4802 * @maxqs: max queues array per TC 4803 * @owner: LAN or RDMA 4804 * 4805 * This function adds/updates the VSI queues per TC. 4806 */ 4807 static int 4808 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4809 u16 *maxqs, u8 owner) 4810 { 4811 int status = 0; 4812 u8 i; 4813 4814 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4815 return -EIO; 4816 4817 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4818 return -EINVAL; 4819 4820 mutex_lock(&pi->sched_lock); 4821 4822 ice_for_each_traffic_class(i) { 4823 /* configuration is possible only if TC node is present */ 4824 if (!ice_sched_get_tc_node(pi, i)) 4825 continue; 4826 4827 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4828 ice_is_tc_ena(tc_bitmap, i)); 4829 if (status) 4830 break; 4831 } 4832 4833 mutex_unlock(&pi->sched_lock); 4834 return status; 4835 } 4836 4837 /** 4838 * ice_cfg_vsi_lan - configure VSI LAN queues 4839 * @pi: port information structure 4840 * @vsi_handle: software VSI handle 4841 * @tc_bitmap: TC bitmap 4842 * @max_lanqs: max LAN queues array per TC 4843 * 4844 * This function adds/updates the VSI LAN queues per TC. 4845 */ 4846 int 4847 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4848 u16 *max_lanqs) 4849 { 4850 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4851 ICE_SCHED_NODE_OWNER_LAN); 4852 } 4853 4854 /** 4855 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4856 * @pi: port information structure 4857 * @vsi_handle: software VSI handle 4858 * @tc_bitmap: TC bitmap 4859 * @max_rdmaqs: max RDMA queues array per TC 4860 * 4861 * This function adds/updates the VSI RDMA queues per TC. 4862 */ 4863 int 4864 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4865 u16 *max_rdmaqs) 4866 { 4867 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4868 ICE_SCHED_NODE_OWNER_RDMA); 4869 } 4870 4871 /** 4872 * ice_ena_vsi_rdma_qset 4873 * @pi: port information structure 4874 * @vsi_handle: software VSI handle 4875 * @tc: TC number 4876 * @rdma_qset: pointer to RDMA Qset 4877 * @num_qsets: number of RDMA Qsets 4878 * @qset_teid: pointer to Qset node TEIDs 4879 * 4880 * This function adds RDMA Qset 4881 */ 4882 int 4883 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4884 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4885 { 4886 struct ice_aqc_txsched_elem_data node = { 0 }; 4887 struct ice_aqc_add_rdma_qset_data *buf; 4888 struct ice_sched_node *parent; 4889 struct ice_hw *hw; 4890 u16 i, buf_size; 4891 int ret; 4892 4893 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4894 return -EIO; 4895 hw = pi->hw; 4896 4897 if (!ice_is_vsi_valid(hw, vsi_handle)) 4898 return -EINVAL; 4899 4900 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4901 buf = kzalloc(buf_size, GFP_KERNEL); 4902 if (!buf) 4903 return -ENOMEM; 4904 mutex_lock(&pi->sched_lock); 4905 4906 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4907 ICE_SCHED_NODE_OWNER_RDMA); 4908 if (!parent) { 4909 ret = -EINVAL; 4910 goto rdma_error_exit; 4911 } 4912 buf->parent_teid = parent->info.node_teid; 4913 node.parent_teid = parent->info.node_teid; 4914 4915 buf->num_qsets = cpu_to_le16(num_qsets); 4916 for (i = 0; i < num_qsets; i++) { 4917 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4918 buf->rdma_qsets[i].info.valid_sections = 4919 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4920 ICE_AQC_ELEM_VALID_EIR; 4921 buf->rdma_qsets[i].info.generic = 0; 4922 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4923 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4924 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4925 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4926 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4927 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4928 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4929 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4930 } 4931 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4932 if (ret) { 4933 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4934 goto rdma_error_exit; 4935 } 4936 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4937 for (i = 0; i < num_qsets; i++) { 4938 node.node_teid = buf->rdma_qsets[i].qset_teid; 4939 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4940 &node, NULL); 4941 if (ret) 4942 break; 4943 qset_teid[i] = le32_to_cpu(node.node_teid); 4944 } 4945 rdma_error_exit: 4946 mutex_unlock(&pi->sched_lock); 4947 kfree(buf); 4948 return ret; 4949 } 4950 4951 /** 4952 * ice_dis_vsi_rdma_qset - free RDMA resources 4953 * @pi: port_info struct 4954 * @count: number of RDMA Qsets to free 4955 * @qset_teid: TEID of Qset node 4956 * @q_id: list of queue IDs being disabled 4957 */ 4958 int 4959 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4960 u16 *q_id) 4961 { 4962 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4963 u16 qg_size = __struct_size(qg_list); 4964 struct ice_hw *hw; 4965 int status = 0; 4966 int i; 4967 4968 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4969 return -EIO; 4970 4971 hw = pi->hw; 4972 4973 mutex_lock(&pi->sched_lock); 4974 4975 for (i = 0; i < count; i++) { 4976 struct ice_sched_node *node; 4977 4978 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4979 if (!node) 4980 continue; 4981 4982 qg_list->parent_teid = node->info.parent_teid; 4983 qg_list->num_qs = 1; 4984 qg_list->q_id[0] = 4985 cpu_to_le16(q_id[i] | 4986 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4987 4988 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4989 ICE_NO_RESET, 0, NULL); 4990 if (status) 4991 break; 4992 4993 ice_free_sched_node(pi, node); 4994 } 4995 4996 mutex_unlock(&pi->sched_lock); 4997 return status; 4998 } 4999 5000 /** 5001 * ice_aq_get_cgu_abilities - get cgu abilities 5002 * @hw: pointer to the HW struct 5003 * @abilities: CGU abilities 5004 * 5005 * Get CGU abilities (0x0C61) 5006 * Return: 0 on success or negative value on failure. 5007 */ 5008 int 5009 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5010 struct ice_aqc_get_cgu_abilities *abilities) 5011 { 5012 struct ice_aq_desc desc; 5013 5014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5015 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5016 } 5017 5018 /** 5019 * ice_aq_set_input_pin_cfg - set input pin config 5020 * @hw: pointer to the HW struct 5021 * @input_idx: Input index 5022 * @flags1: Input flags 5023 * @flags2: Input flags 5024 * @freq: Frequency in Hz 5025 * @phase_delay: Delay in ps 5026 * 5027 * Set CGU input config (0x0C62) 5028 * Return: 0 on success or negative value on failure. 5029 */ 5030 int 5031 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5032 u32 freq, s32 phase_delay) 5033 { 5034 struct ice_aqc_set_cgu_input_config *cmd; 5035 struct ice_aq_desc desc; 5036 5037 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5038 cmd = &desc.params.set_cgu_input_config; 5039 cmd->input_idx = input_idx; 5040 cmd->flags1 = flags1; 5041 cmd->flags2 = flags2; 5042 cmd->freq = cpu_to_le32(freq); 5043 cmd->phase_delay = cpu_to_le32(phase_delay); 5044 5045 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5046 } 5047 5048 /** 5049 * ice_aq_get_input_pin_cfg - get input pin config 5050 * @hw: pointer to the HW struct 5051 * @input_idx: Input index 5052 * @status: Pin status 5053 * @type: Pin type 5054 * @flags1: Input flags 5055 * @flags2: Input flags 5056 * @freq: Frequency in Hz 5057 * @phase_delay: Delay in ps 5058 * 5059 * Get CGU input config (0x0C63) 5060 * Return: 0 on success or negative value on failure. 5061 */ 5062 int 5063 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5064 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5065 { 5066 struct ice_aqc_get_cgu_input_config *cmd; 5067 struct ice_aq_desc desc; 5068 int ret; 5069 5070 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5071 cmd = &desc.params.get_cgu_input_config; 5072 cmd->input_idx = input_idx; 5073 5074 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5075 if (!ret) { 5076 if (status) 5077 *status = cmd->status; 5078 if (type) 5079 *type = cmd->type; 5080 if (flags1) 5081 *flags1 = cmd->flags1; 5082 if (flags2) 5083 *flags2 = cmd->flags2; 5084 if (freq) 5085 *freq = le32_to_cpu(cmd->freq); 5086 if (phase_delay) 5087 *phase_delay = le32_to_cpu(cmd->phase_delay); 5088 } 5089 5090 return ret; 5091 } 5092 5093 /** 5094 * ice_aq_set_output_pin_cfg - set output pin config 5095 * @hw: pointer to the HW struct 5096 * @output_idx: Output index 5097 * @flags: Output flags 5098 * @src_sel: Index of DPLL block 5099 * @freq: Output frequency 5100 * @phase_delay: Output phase compensation 5101 * 5102 * Set CGU output config (0x0C64) 5103 * Return: 0 on success or negative value on failure. 5104 */ 5105 int 5106 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5107 u8 src_sel, u32 freq, s32 phase_delay) 5108 { 5109 struct ice_aqc_set_cgu_output_config *cmd; 5110 struct ice_aq_desc desc; 5111 5112 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5113 cmd = &desc.params.set_cgu_output_config; 5114 cmd->output_idx = output_idx; 5115 cmd->flags = flags; 5116 cmd->src_sel = src_sel; 5117 cmd->freq = cpu_to_le32(freq); 5118 cmd->phase_delay = cpu_to_le32(phase_delay); 5119 5120 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5121 } 5122 5123 /** 5124 * ice_aq_get_output_pin_cfg - get output pin config 5125 * @hw: pointer to the HW struct 5126 * @output_idx: Output index 5127 * @flags: Output flags 5128 * @src_sel: Internal DPLL source 5129 * @freq: Output frequency 5130 * @src_freq: Source frequency 5131 * 5132 * Get CGU output config (0x0C65) 5133 * Return: 0 on success or negative value on failure. 5134 */ 5135 int 5136 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5137 u8 *src_sel, u32 *freq, u32 *src_freq) 5138 { 5139 struct ice_aqc_get_cgu_output_config *cmd; 5140 struct ice_aq_desc desc; 5141 int ret; 5142 5143 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5144 cmd = &desc.params.get_cgu_output_config; 5145 cmd->output_idx = output_idx; 5146 5147 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5148 if (!ret) { 5149 if (flags) 5150 *flags = cmd->flags; 5151 if (src_sel) 5152 *src_sel = cmd->src_sel; 5153 if (freq) 5154 *freq = le32_to_cpu(cmd->freq); 5155 if (src_freq) 5156 *src_freq = le32_to_cpu(cmd->src_freq); 5157 } 5158 5159 return ret; 5160 } 5161 5162 /** 5163 * ice_aq_get_cgu_dpll_status - get dpll status 5164 * @hw: pointer to the HW struct 5165 * @dpll_num: DPLL index 5166 * @ref_state: Reference clock state 5167 * @config: current DPLL config 5168 * @dpll_state: current DPLL state 5169 * @phase_offset: Phase offset in ns 5170 * @eec_mode: EEC_mode 5171 * 5172 * Get CGU DPLL status (0x0C66) 5173 * Return: 0 on success or negative value on failure. 5174 */ 5175 int 5176 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5177 u8 *dpll_state, u8 *config, s64 *phase_offset, 5178 u8 *eec_mode) 5179 { 5180 struct ice_aqc_get_cgu_dpll_status *cmd; 5181 struct ice_aq_desc desc; 5182 int status; 5183 5184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5185 cmd = &desc.params.get_cgu_dpll_status; 5186 cmd->dpll_num = dpll_num; 5187 5188 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5189 if (!status) { 5190 *ref_state = cmd->ref_state; 5191 *dpll_state = cmd->dpll_state; 5192 *config = cmd->config; 5193 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5194 *phase_offset <<= 32; 5195 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5196 *phase_offset = sign_extend64(*phase_offset, 47); 5197 *eec_mode = cmd->eec_mode; 5198 } 5199 5200 return status; 5201 } 5202 5203 /** 5204 * ice_aq_set_cgu_dpll_config - set dpll config 5205 * @hw: pointer to the HW struct 5206 * @dpll_num: DPLL index 5207 * @ref_state: Reference clock state 5208 * @config: DPLL config 5209 * @eec_mode: EEC mode 5210 * 5211 * Set CGU DPLL config (0x0C67) 5212 * Return: 0 on success or negative value on failure. 5213 */ 5214 int 5215 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5216 u8 config, u8 eec_mode) 5217 { 5218 struct ice_aqc_set_cgu_dpll_config *cmd; 5219 struct ice_aq_desc desc; 5220 5221 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5222 cmd = &desc.params.set_cgu_dpll_config; 5223 cmd->dpll_num = dpll_num; 5224 cmd->ref_state = ref_state; 5225 cmd->config = config; 5226 cmd->eec_mode = eec_mode; 5227 5228 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5229 } 5230 5231 /** 5232 * ice_aq_set_cgu_ref_prio - set input reference priority 5233 * @hw: pointer to the HW struct 5234 * @dpll_num: DPLL index 5235 * @ref_idx: Reference pin index 5236 * @ref_priority: Reference input priority 5237 * 5238 * Set CGU reference priority (0x0C68) 5239 * Return: 0 on success or negative value on failure. 5240 */ 5241 int 5242 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5243 u8 ref_priority) 5244 { 5245 struct ice_aqc_set_cgu_ref_prio *cmd; 5246 struct ice_aq_desc desc; 5247 5248 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5249 cmd = &desc.params.set_cgu_ref_prio; 5250 cmd->dpll_num = dpll_num; 5251 cmd->ref_idx = ref_idx; 5252 cmd->ref_priority = ref_priority; 5253 5254 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5255 } 5256 5257 /** 5258 * ice_aq_get_cgu_ref_prio - get input reference priority 5259 * @hw: pointer to the HW struct 5260 * @dpll_num: DPLL index 5261 * @ref_idx: Reference pin index 5262 * @ref_prio: Reference input priority 5263 * 5264 * Get CGU reference priority (0x0C69) 5265 * Return: 0 on success or negative value on failure. 5266 */ 5267 int 5268 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5269 u8 *ref_prio) 5270 { 5271 struct ice_aqc_get_cgu_ref_prio *cmd; 5272 struct ice_aq_desc desc; 5273 int status; 5274 5275 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5276 cmd = &desc.params.get_cgu_ref_prio; 5277 cmd->dpll_num = dpll_num; 5278 cmd->ref_idx = ref_idx; 5279 5280 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5281 if (!status) 5282 *ref_prio = cmd->ref_priority; 5283 5284 return status; 5285 } 5286 5287 /** 5288 * ice_aq_get_cgu_info - get cgu info 5289 * @hw: pointer to the HW struct 5290 * @cgu_id: CGU ID 5291 * @cgu_cfg_ver: CGU config version 5292 * @cgu_fw_ver: CGU firmware version 5293 * 5294 * Get CGU info (0x0C6A) 5295 * Return: 0 on success or negative value on failure. 5296 */ 5297 int 5298 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5299 u32 *cgu_fw_ver) 5300 { 5301 struct ice_aqc_get_cgu_info *cmd; 5302 struct ice_aq_desc desc; 5303 int status; 5304 5305 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5306 cmd = &desc.params.get_cgu_info; 5307 5308 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5309 if (!status) { 5310 *cgu_id = le32_to_cpu(cmd->cgu_id); 5311 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5312 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5313 } 5314 5315 return status; 5316 } 5317 5318 /** 5319 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5320 * @hw: pointer to the HW struct 5321 * @phy_output: PHY reference clock output pin 5322 * @enable: GPIO state to be applied 5323 * @freq: PHY output frequency 5324 * 5325 * Set phy recovered clock as reference (0x0630) 5326 * Return: 0 on success or negative value on failure. 5327 */ 5328 int 5329 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5330 u32 *freq) 5331 { 5332 struct ice_aqc_set_phy_rec_clk_out *cmd; 5333 struct ice_aq_desc desc; 5334 int status; 5335 5336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5337 cmd = &desc.params.set_phy_rec_clk_out; 5338 cmd->phy_output = phy_output; 5339 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5340 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5341 cmd->freq = cpu_to_le32(*freq); 5342 5343 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5344 if (!status) 5345 *freq = le32_to_cpu(cmd->freq); 5346 5347 return status; 5348 } 5349 5350 /** 5351 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5352 * @hw: pointer to the HW struct 5353 * @phy_output: PHY reference clock output pin 5354 * @port_num: Port number 5355 * @flags: PHY flags 5356 * @node_handle: PHY output frequency 5357 * 5358 * Get PHY recovered clock output info (0x0631) 5359 * Return: 0 on success or negative value on failure. 5360 */ 5361 int 5362 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5363 u8 *flags, u16 *node_handle) 5364 { 5365 struct ice_aqc_get_phy_rec_clk_out *cmd; 5366 struct ice_aq_desc desc; 5367 int status; 5368 5369 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5370 cmd = &desc.params.get_phy_rec_clk_out; 5371 cmd->phy_output = *phy_output; 5372 5373 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5374 if (!status) { 5375 *phy_output = cmd->phy_output; 5376 if (port_num) 5377 *port_num = cmd->port_num; 5378 if (flags) 5379 *flags = cmd->flags; 5380 if (node_handle) 5381 *node_handle = le16_to_cpu(cmd->node_handle); 5382 } 5383 5384 return status; 5385 } 5386 5387 /** 5388 * ice_aq_get_sensor_reading 5389 * @hw: pointer to the HW struct 5390 * @data: pointer to data to be read from the sensor 5391 * 5392 * Get sensor reading (0x0632) 5393 */ 5394 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5395 struct ice_aqc_get_sensor_reading_resp *data) 5396 { 5397 struct ice_aqc_get_sensor_reading *cmd; 5398 struct ice_aq_desc desc; 5399 int status; 5400 5401 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5402 cmd = &desc.params.get_sensor_reading; 5403 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5404 #define ICE_INTERNAL_TEMP_SENSOR 0 5405 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5406 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5407 5408 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5409 if (!status) 5410 memcpy(data, &desc.params.get_sensor_reading_resp, 5411 sizeof(*data)); 5412 5413 return status; 5414 } 5415 5416 /** 5417 * ice_replay_pre_init - replay pre initialization 5418 * @hw: pointer to the HW struct 5419 * 5420 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5421 */ 5422 static int ice_replay_pre_init(struct ice_hw *hw) 5423 { 5424 struct ice_switch_info *sw = hw->switch_info; 5425 u8 i; 5426 5427 /* Delete old entries from replay filter list head if there is any */ 5428 ice_rm_all_sw_replay_rule_info(hw); 5429 /* In start of replay, move entries into replay_rules list, it 5430 * will allow adding rules entries back to filt_rules list, 5431 * which is operational list. 5432 */ 5433 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5434 list_replace_init(&sw->recp_list[i].filt_rules, 5435 &sw->recp_list[i].filt_replay_rules); 5436 ice_sched_replay_agg_vsi_preinit(hw); 5437 5438 return 0; 5439 } 5440 5441 /** 5442 * ice_replay_vsi - replay VSI configuration 5443 * @hw: pointer to the HW struct 5444 * @vsi_handle: driver VSI handle 5445 * 5446 * Restore all VSI configuration after reset. It is required to call this 5447 * function with main VSI first. 5448 */ 5449 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5450 { 5451 int status; 5452 5453 if (!ice_is_vsi_valid(hw, vsi_handle)) 5454 return -EINVAL; 5455 5456 /* Replay pre-initialization if there is any */ 5457 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5458 status = ice_replay_pre_init(hw); 5459 if (status) 5460 return status; 5461 } 5462 /* Replay per VSI all RSS configurations */ 5463 status = ice_replay_rss_cfg(hw, vsi_handle); 5464 if (status) 5465 return status; 5466 /* Replay per VSI all filters */ 5467 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5468 if (!status) 5469 status = ice_replay_vsi_agg(hw, vsi_handle); 5470 return status; 5471 } 5472 5473 /** 5474 * ice_replay_post - post replay configuration cleanup 5475 * @hw: pointer to the HW struct 5476 * 5477 * Post replay cleanup. 5478 */ 5479 void ice_replay_post(struct ice_hw *hw) 5480 { 5481 /* Delete old entries from replay filter list head */ 5482 ice_rm_all_sw_replay_rule_info(hw); 5483 ice_sched_replay_agg(hw); 5484 } 5485 5486 /** 5487 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5488 * @hw: ptr to the hardware info 5489 * @reg: offset of 64 bit HW register to read from 5490 * @prev_stat_loaded: bool to specify if previous stats are loaded 5491 * @prev_stat: ptr to previous loaded stat value 5492 * @cur_stat: ptr to current stat value 5493 */ 5494 void 5495 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5496 u64 *prev_stat, u64 *cur_stat) 5497 { 5498 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5499 5500 /* device stats are not reset at PFR, they likely will not be zeroed 5501 * when the driver starts. Thus, save the value from the first read 5502 * without adding to the statistic value so that we report stats which 5503 * count up from zero. 5504 */ 5505 if (!prev_stat_loaded) { 5506 *prev_stat = new_data; 5507 return; 5508 } 5509 5510 /* Calculate the difference between the new and old values, and then 5511 * add it to the software stat value. 5512 */ 5513 if (new_data >= *prev_stat) 5514 *cur_stat += new_data - *prev_stat; 5515 else 5516 /* to manage the potential roll-over */ 5517 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5518 5519 /* Update the previously stored value to prepare for next read */ 5520 *prev_stat = new_data; 5521 } 5522 5523 /** 5524 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5525 * @hw: ptr to the hardware info 5526 * @reg: offset of HW register to read from 5527 * @prev_stat_loaded: bool to specify if previous stats are loaded 5528 * @prev_stat: ptr to previous loaded stat value 5529 * @cur_stat: ptr to current stat value 5530 */ 5531 void 5532 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5533 u64 *prev_stat, u64 *cur_stat) 5534 { 5535 u32 new_data; 5536 5537 new_data = rd32(hw, reg); 5538 5539 /* device stats are not reset at PFR, they likely will not be zeroed 5540 * when the driver starts. Thus, save the value from the first read 5541 * without adding to the statistic value so that we report stats which 5542 * count up from zero. 5543 */ 5544 if (!prev_stat_loaded) { 5545 *prev_stat = new_data; 5546 return; 5547 } 5548 5549 /* Calculate the difference between the new and old values, and then 5550 * add it to the software stat value. 5551 */ 5552 if (new_data >= *prev_stat) 5553 *cur_stat += new_data - *prev_stat; 5554 else 5555 /* to manage the potential roll-over */ 5556 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5557 5558 /* Update the previously stored value to prepare for next read */ 5559 *prev_stat = new_data; 5560 } 5561 5562 /** 5563 * ice_sched_query_elem - query element information from HW 5564 * @hw: pointer to the HW struct 5565 * @node_teid: node TEID to be queried 5566 * @buf: buffer to element information 5567 * 5568 * This function queries HW element information 5569 */ 5570 int 5571 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5572 struct ice_aqc_txsched_elem_data *buf) 5573 { 5574 u16 buf_size, num_elem_ret = 0; 5575 int status; 5576 5577 buf_size = sizeof(*buf); 5578 memset(buf, 0, buf_size); 5579 buf->node_teid = cpu_to_le32(node_teid); 5580 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5581 NULL); 5582 if (status || num_elem_ret != 1) 5583 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5584 return status; 5585 } 5586 5587 /** 5588 * ice_aq_read_i2c 5589 * @hw: pointer to the hw struct 5590 * @topo_addr: topology address for a device to communicate with 5591 * @bus_addr: 7-bit I2C bus address 5592 * @addr: I2C memory address (I2C offset) with up to 16 bits 5593 * @params: I2C parameters: bit [7] - Repeated start, 5594 * bits [6:5] data offset size, 5595 * bit [4] - I2C address type, 5596 * bits [3:0] - data size to read (0-16 bytes) 5597 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5598 * @cd: pointer to command details structure or NULL 5599 * 5600 * Read I2C (0x06E2) 5601 */ 5602 int 5603 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5604 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5605 struct ice_sq_cd *cd) 5606 { 5607 struct ice_aq_desc desc = { 0 }; 5608 struct ice_aqc_i2c *cmd; 5609 u8 data_size; 5610 int status; 5611 5612 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5613 cmd = &desc.params.read_write_i2c; 5614 5615 if (!data) 5616 return -EINVAL; 5617 5618 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5619 5620 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5621 cmd->topo_addr = topo_addr; 5622 cmd->i2c_params = params; 5623 cmd->i2c_addr = addr; 5624 5625 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5626 if (!status) { 5627 struct ice_aqc_read_i2c_resp *resp; 5628 u8 i; 5629 5630 resp = &desc.params.read_i2c_resp; 5631 for (i = 0; i < data_size; i++) { 5632 *data = resp->i2c_data[i]; 5633 data++; 5634 } 5635 } 5636 5637 return status; 5638 } 5639 5640 /** 5641 * ice_aq_write_i2c 5642 * @hw: pointer to the hw struct 5643 * @topo_addr: topology address for a device to communicate with 5644 * @bus_addr: 7-bit I2C bus address 5645 * @addr: I2C memory address (I2C offset) with up to 16 bits 5646 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5647 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5648 * @cd: pointer to command details structure or NULL 5649 * 5650 * Write I2C (0x06E3) 5651 * 5652 * * Return: 5653 * * 0 - Successful write to the i2c device 5654 * * -EINVAL - Data size greater than 4 bytes 5655 * * -EIO - FW error 5656 */ 5657 int 5658 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5659 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5660 struct ice_sq_cd *cd) 5661 { 5662 struct ice_aq_desc desc = { 0 }; 5663 struct ice_aqc_i2c *cmd; 5664 u8 data_size; 5665 5666 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5667 cmd = &desc.params.read_write_i2c; 5668 5669 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5670 5671 /* data_size limited to 4 */ 5672 if (data_size > 4) 5673 return -EINVAL; 5674 5675 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5676 cmd->topo_addr = topo_addr; 5677 cmd->i2c_params = params; 5678 cmd->i2c_addr = addr; 5679 5680 memcpy(cmd->i2c_data, data, data_size); 5681 5682 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5683 } 5684 5685 /** 5686 * ice_aq_set_gpio 5687 * @hw: pointer to the hw struct 5688 * @gpio_ctrl_handle: GPIO controller node handle 5689 * @pin_idx: IO Number of the GPIO that needs to be set 5690 * @value: SW provide IO value to set in the LSB 5691 * @cd: pointer to command details structure or NULL 5692 * 5693 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5694 */ 5695 int 5696 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5697 struct ice_sq_cd *cd) 5698 { 5699 struct ice_aqc_gpio *cmd; 5700 struct ice_aq_desc desc; 5701 5702 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5703 cmd = &desc.params.read_write_gpio; 5704 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5705 cmd->gpio_num = pin_idx; 5706 cmd->gpio_val = value ? 1 : 0; 5707 5708 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5709 } 5710 5711 /** 5712 * ice_aq_get_gpio 5713 * @hw: pointer to the hw struct 5714 * @gpio_ctrl_handle: GPIO controller node handle 5715 * @pin_idx: IO Number of the GPIO that needs to be set 5716 * @value: IO value read 5717 * @cd: pointer to command details structure or NULL 5718 * 5719 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5720 * the topology 5721 */ 5722 int 5723 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5724 bool *value, struct ice_sq_cd *cd) 5725 { 5726 struct ice_aqc_gpio *cmd; 5727 struct ice_aq_desc desc; 5728 int status; 5729 5730 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5731 cmd = &desc.params.read_write_gpio; 5732 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5733 cmd->gpio_num = pin_idx; 5734 5735 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5736 if (status) 5737 return status; 5738 5739 *value = !!cmd->gpio_val; 5740 return 0; 5741 } 5742 5743 /** 5744 * ice_is_fw_api_min_ver 5745 * @hw: pointer to the hardware structure 5746 * @maj: major version 5747 * @min: minor version 5748 * @patch: patch version 5749 * 5750 * Checks if the firmware API is minimum version 5751 */ 5752 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5753 { 5754 if (hw->api_maj_ver == maj) { 5755 if (hw->api_min_ver > min) 5756 return true; 5757 if (hw->api_min_ver == min && hw->api_patch >= patch) 5758 return true; 5759 } else if (hw->api_maj_ver > maj) { 5760 return true; 5761 } 5762 5763 return false; 5764 } 5765 5766 /** 5767 * ice_fw_supports_link_override 5768 * @hw: pointer to the hardware structure 5769 * 5770 * Checks if the firmware supports link override 5771 */ 5772 bool ice_fw_supports_link_override(struct ice_hw *hw) 5773 { 5774 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5775 ICE_FW_API_LINK_OVERRIDE_MIN, 5776 ICE_FW_API_LINK_OVERRIDE_PATCH); 5777 } 5778 5779 /** 5780 * ice_get_link_default_override 5781 * @ldo: pointer to the link default override struct 5782 * @pi: pointer to the port info struct 5783 * 5784 * Gets the link default override for a port 5785 */ 5786 int 5787 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5788 struct ice_port_info *pi) 5789 { 5790 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5791 struct ice_hw *hw = pi->hw; 5792 int status; 5793 5794 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5795 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5796 if (status) { 5797 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5798 return status; 5799 } 5800 5801 /* Each port has its own config; calculate for our port */ 5802 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5803 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5804 5805 /* link options first */ 5806 status = ice_read_sr_word(hw, tlv_start, &buf); 5807 if (status) { 5808 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5809 return status; 5810 } 5811 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5812 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5813 ICE_LINK_OVERRIDE_PHY_CFG_S; 5814 5815 /* link PHY config */ 5816 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5817 status = ice_read_sr_word(hw, offset, &buf); 5818 if (status) { 5819 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5820 return status; 5821 } 5822 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5823 5824 /* PHY types low */ 5825 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5826 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5827 status = ice_read_sr_word(hw, (offset + i), &buf); 5828 if (status) { 5829 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5830 return status; 5831 } 5832 /* shift 16 bits at a time to fill 64 bits */ 5833 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5834 } 5835 5836 /* PHY types high */ 5837 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5838 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5839 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5840 status = ice_read_sr_word(hw, (offset + i), &buf); 5841 if (status) { 5842 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5843 return status; 5844 } 5845 /* shift 16 bits at a time to fill 64 bits */ 5846 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5847 } 5848 5849 return status; 5850 } 5851 5852 /** 5853 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5854 * @caps: get PHY capability data 5855 */ 5856 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5857 { 5858 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5859 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5860 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5861 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5862 return true; 5863 5864 return false; 5865 } 5866 5867 /** 5868 * ice_aq_set_lldp_mib - Set the LLDP MIB 5869 * @hw: pointer to the HW struct 5870 * @mib_type: Local, Remote or both Local and Remote MIBs 5871 * @buf: pointer to the caller-supplied buffer to store the MIB block 5872 * @buf_size: size of the buffer (in bytes) 5873 * @cd: pointer to command details structure or NULL 5874 * 5875 * Set the LLDP MIB. (0x0A08) 5876 */ 5877 int 5878 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5879 struct ice_sq_cd *cd) 5880 { 5881 struct ice_aqc_lldp_set_local_mib *cmd; 5882 struct ice_aq_desc desc; 5883 5884 cmd = &desc.params.lldp_set_mib; 5885 5886 if (buf_size == 0 || !buf) 5887 return -EINVAL; 5888 5889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5890 5891 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5892 desc.datalen = cpu_to_le16(buf_size); 5893 5894 cmd->type = mib_type; 5895 cmd->length = cpu_to_le16(buf_size); 5896 5897 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5898 } 5899 5900 /** 5901 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5902 * @hw: pointer to HW struct 5903 */ 5904 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5905 { 5906 if (hw->mac_type != ICE_MAC_E810) 5907 return false; 5908 5909 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5910 ICE_FW_API_LLDP_FLTR_MIN, 5911 ICE_FW_API_LLDP_FLTR_PATCH); 5912 } 5913 5914 /** 5915 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5916 * @hw: pointer to HW struct 5917 * @vsi_num: absolute HW index for VSI 5918 * @add: boolean for if adding or removing a filter 5919 */ 5920 int 5921 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5922 { 5923 struct ice_aqc_lldp_filter_ctrl *cmd; 5924 struct ice_aq_desc desc; 5925 5926 cmd = &desc.params.lldp_filter_ctrl; 5927 5928 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5929 5930 if (add) 5931 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5932 else 5933 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5934 5935 cmd->vsi_num = cpu_to_le16(vsi_num); 5936 5937 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5938 } 5939 5940 /** 5941 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5942 * @hw: pointer to HW struct 5943 */ 5944 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5945 { 5946 struct ice_aq_desc desc; 5947 5948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5949 5950 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5951 } 5952 5953 /** 5954 * ice_fw_supports_report_dflt_cfg 5955 * @hw: pointer to the hardware structure 5956 * 5957 * Checks if the firmware supports report default configuration 5958 */ 5959 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5960 { 5961 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5962 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5963 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5964 } 5965 5966 /* each of the indexes into the following array match the speed of a return 5967 * value from the list of AQ returned speeds like the range: 5968 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5969 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5970 * array. The array is defined as 15 elements long because the link_speed 5971 * returned by the firmware is a 16 bit * value, but is indexed 5972 * by [fls(speed) - 1] 5973 */ 5974 static const u32 ice_aq_to_link_speed[] = { 5975 SPEED_10, /* BIT(0) */ 5976 SPEED_100, 5977 SPEED_1000, 5978 SPEED_2500, 5979 SPEED_5000, 5980 SPEED_10000, 5981 SPEED_20000, 5982 SPEED_25000, 5983 SPEED_40000, 5984 SPEED_50000, 5985 SPEED_100000, /* BIT(10) */ 5986 SPEED_200000, 5987 }; 5988 5989 /** 5990 * ice_get_link_speed - get integer speed from table 5991 * @index: array index from fls(aq speed) - 1 5992 * 5993 * Returns: u32 value containing integer speed 5994 */ 5995 u32 ice_get_link_speed(u16 index) 5996 { 5997 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5998 return 0; 5999 6000 return ice_aq_to_link_speed[index]; 6001 } 6002