1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E825C_BACKPLANE: 158 case ICE_DEV_ID_E825C_QSFP: 159 case ICE_DEV_ID_E825C_SFP: 160 case ICE_DEV_ID_E825C_SGMII: 161 hw->mac_type = ICE_MAC_GENERIC_3K_E825; 162 break; 163 case ICE_DEV_ID_E830_BACKPLANE: 164 case ICE_DEV_ID_E830_QSFP56: 165 case ICE_DEV_ID_E830_SFP: 166 case ICE_DEV_ID_E830_SFP_DD: 167 hw->mac_type = ICE_MAC_E830; 168 break; 169 default: 170 hw->mac_type = ICE_MAC_UNKNOWN; 171 break; 172 } 173 174 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 175 return 0; 176 } 177 178 /** 179 * ice_is_generic_mac - check if device's mac_type is generic 180 * @hw: pointer to the hardware structure 181 * 182 * Return: true if mac_type is generic (with SBQ support), false if not 183 */ 184 bool ice_is_generic_mac(struct ice_hw *hw) 185 { 186 return (hw->mac_type == ICE_MAC_GENERIC || 187 hw->mac_type == ICE_MAC_GENERIC_3K_E825); 188 } 189 190 /** 191 * ice_is_e810 192 * @hw: pointer to the hardware structure 193 * 194 * returns true if the device is E810 based, false if not. 195 */ 196 bool ice_is_e810(struct ice_hw *hw) 197 { 198 return hw->mac_type == ICE_MAC_E810; 199 } 200 201 /** 202 * ice_is_e810t 203 * @hw: pointer to the hardware structure 204 * 205 * returns true if the device is E810T based, false if not. 206 */ 207 bool ice_is_e810t(struct ice_hw *hw) 208 { 209 switch (hw->device_id) { 210 case ICE_DEV_ID_E810C_SFP: 211 switch (hw->subsystem_device_id) { 212 case ICE_SUBDEV_ID_E810T: 213 case ICE_SUBDEV_ID_E810T2: 214 case ICE_SUBDEV_ID_E810T3: 215 case ICE_SUBDEV_ID_E810T4: 216 case ICE_SUBDEV_ID_E810T6: 217 case ICE_SUBDEV_ID_E810T7: 218 return true; 219 } 220 break; 221 case ICE_DEV_ID_E810C_QSFP: 222 switch (hw->subsystem_device_id) { 223 case ICE_SUBDEV_ID_E810T2: 224 case ICE_SUBDEV_ID_E810T3: 225 case ICE_SUBDEV_ID_E810T5: 226 return true; 227 } 228 break; 229 default: 230 break; 231 } 232 233 return false; 234 } 235 236 /** 237 * ice_is_e823 238 * @hw: pointer to the hardware structure 239 * 240 * returns true if the device is E823-L or E823-C based, false if not. 241 */ 242 bool ice_is_e823(struct ice_hw *hw) 243 { 244 switch (hw->device_id) { 245 case ICE_DEV_ID_E823L_BACKPLANE: 246 case ICE_DEV_ID_E823L_SFP: 247 case ICE_DEV_ID_E823L_10G_BASE_T: 248 case ICE_DEV_ID_E823L_1GBE: 249 case ICE_DEV_ID_E823L_QSFP: 250 case ICE_DEV_ID_E823C_BACKPLANE: 251 case ICE_DEV_ID_E823C_QSFP: 252 case ICE_DEV_ID_E823C_SFP: 253 case ICE_DEV_ID_E823C_10G_BASE_T: 254 case ICE_DEV_ID_E823C_SGMII: 255 return true; 256 default: 257 return false; 258 } 259 } 260 261 /** 262 * ice_is_e825c - Check if a device is E825C family device 263 * @hw: pointer to the hardware structure 264 * 265 * Return: true if the device is E825-C based, false if not. 266 */ 267 bool ice_is_e825c(struct ice_hw *hw) 268 { 269 switch (hw->device_id) { 270 case ICE_DEV_ID_E825C_BACKPLANE: 271 case ICE_DEV_ID_E825C_QSFP: 272 case ICE_DEV_ID_E825C_SFP: 273 case ICE_DEV_ID_E825C_SGMII: 274 return true; 275 default: 276 return false; 277 } 278 } 279 280 /** 281 * ice_clear_pf_cfg - Clear PF configuration 282 * @hw: pointer to the hardware structure 283 * 284 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 285 * configuration, flow director filters, etc.). 286 */ 287 int ice_clear_pf_cfg(struct ice_hw *hw) 288 { 289 struct ice_aq_desc desc; 290 291 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 292 293 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 294 } 295 296 /** 297 * ice_aq_manage_mac_read - manage MAC address read command 298 * @hw: pointer to the HW struct 299 * @buf: a virtual buffer to hold the manage MAC read response 300 * @buf_size: Size of the virtual buffer 301 * @cd: pointer to command details structure or NULL 302 * 303 * This function is used to return per PF station MAC address (0x0107). 304 * NOTE: Upon successful completion of this command, MAC address information 305 * is returned in user specified buffer. Please interpret user specified 306 * buffer as "manage_mac_read" response. 307 * Response such as various MAC addresses are stored in HW struct (port.mac) 308 * ice_discover_dev_caps is expected to be called before this function is 309 * called. 310 */ 311 static int 312 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 313 struct ice_sq_cd *cd) 314 { 315 struct ice_aqc_manage_mac_read_resp *resp; 316 struct ice_aqc_manage_mac_read *cmd; 317 struct ice_aq_desc desc; 318 int status; 319 u16 flags; 320 u8 i; 321 322 cmd = &desc.params.mac_read; 323 324 if (buf_size < sizeof(*resp)) 325 return -EINVAL; 326 327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 328 329 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 330 if (status) 331 return status; 332 333 resp = buf; 334 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 335 336 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 337 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 338 return -EIO; 339 } 340 341 /* A single port can report up to two (LAN and WoL) addresses */ 342 for (i = 0; i < cmd->num_addr; i++) 343 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 344 ether_addr_copy(hw->port_info->mac.lan_addr, 345 resp[i].mac_addr); 346 ether_addr_copy(hw->port_info->mac.perm_addr, 347 resp[i].mac_addr); 348 break; 349 } 350 351 return 0; 352 } 353 354 /** 355 * ice_aq_get_phy_caps - returns PHY capabilities 356 * @pi: port information structure 357 * @qual_mods: report qualified modules 358 * @report_mode: report mode capabilities 359 * @pcaps: structure for PHY capabilities to be filled 360 * @cd: pointer to command details structure or NULL 361 * 362 * Returns the various PHY capabilities supported on the Port (0x0600) 363 */ 364 int 365 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 366 struct ice_aqc_get_phy_caps_data *pcaps, 367 struct ice_sq_cd *cd) 368 { 369 struct ice_aqc_get_phy_caps *cmd; 370 u16 pcaps_size = sizeof(*pcaps); 371 struct ice_aq_desc desc; 372 const char *prefix; 373 struct ice_hw *hw; 374 int status; 375 376 cmd = &desc.params.get_phy; 377 378 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 379 return -EINVAL; 380 hw = pi->hw; 381 382 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 383 !ice_fw_supports_report_dflt_cfg(hw)) 384 return -EINVAL; 385 386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 387 388 if (qual_mods) 389 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 390 391 cmd->param0 |= cpu_to_le16(report_mode); 392 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 393 394 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 395 396 switch (report_mode) { 397 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 398 prefix = "phy_caps_media"; 399 break; 400 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 401 prefix = "phy_caps_no_media"; 402 break; 403 case ICE_AQC_REPORT_ACTIVE_CFG: 404 prefix = "phy_caps_active"; 405 break; 406 case ICE_AQC_REPORT_DFLT_CFG: 407 prefix = "phy_caps_default"; 408 break; 409 default: 410 prefix = "phy_caps_invalid"; 411 } 412 413 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 414 le64_to_cpu(pcaps->phy_type_high), prefix); 415 416 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 417 prefix, report_mode); 418 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 419 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 420 pcaps->low_power_ctrl_an); 421 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 422 pcaps->eee_cap); 423 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 424 pcaps->eeer_value); 425 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 426 pcaps->link_fec_options); 427 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 428 prefix, pcaps->module_compliance_enforcement); 429 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 430 prefix, pcaps->extended_compliance_code); 431 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 432 pcaps->module_type[0]); 433 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 434 pcaps->module_type[1]); 435 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 436 pcaps->module_type[2]); 437 438 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 439 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 440 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 441 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 442 sizeof(pi->phy.link_info.module_type)); 443 } 444 445 return status; 446 } 447 448 /** 449 * ice_aq_get_link_topo_handle - get link topology node return status 450 * @pi: port information structure 451 * @node_type: requested node type 452 * @cd: pointer to command details structure or NULL 453 * 454 * Get link topology node return status for specified node type (0x06E0) 455 * 456 * Node type cage can be used to determine if cage is present. If AQC 457 * returns error (ENOENT), then no cage present. If no cage present, then 458 * connection type is backplane or BASE-T. 459 */ 460 static int 461 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 462 struct ice_sq_cd *cd) 463 { 464 struct ice_aqc_get_link_topo *cmd; 465 struct ice_aq_desc desc; 466 467 cmd = &desc.params.get_link_topo; 468 469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 470 471 cmd->addr.topo_params.node_type_ctx = 472 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 473 ICE_AQC_LINK_TOPO_NODE_CTX_S); 474 475 /* set node type */ 476 cmd->addr.topo_params.node_type_ctx |= 477 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 478 479 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 480 } 481 482 /** 483 * ice_aq_get_netlist_node 484 * @hw: pointer to the hw struct 485 * @cmd: get_link_topo AQ structure 486 * @node_part_number: output node part number if node found 487 * @node_handle: output node handle parameter if node found 488 * 489 * Get netlist node handle. 490 */ 491 int 492 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 493 u8 *node_part_number, u16 *node_handle) 494 { 495 struct ice_aq_desc desc; 496 497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 498 desc.params.get_link_topo = *cmd; 499 500 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 501 return -EINTR; 502 503 if (node_handle) 504 *node_handle = 505 le16_to_cpu(desc.params.get_link_topo.addr.handle); 506 if (node_part_number) 507 *node_part_number = desc.params.get_link_topo.node_part_num; 508 509 return 0; 510 } 511 512 /** 513 * ice_find_netlist_node 514 * @hw: pointer to the hw struct 515 * @node_type_ctx: type of netlist node to look for 516 * @node_part_number: node part number to look for 517 * @node_handle: output parameter if node found - optional 518 * 519 * Scan the netlist for a node handle of the given node type and part number. 520 * 521 * If node_handle is non-NULL it will be modified on function exit. It is only 522 * valid if the function returns zero, and should be ignored on any non-zero 523 * return value. 524 * 525 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 526 * a negative error code on failure to access the AQ. 527 */ 528 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 529 u8 node_part_number, u16 *node_handle) 530 { 531 u8 idx; 532 533 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 534 struct ice_aqc_get_link_topo cmd = {}; 535 u8 rec_node_part_number; 536 int status; 537 538 cmd.addr.topo_params.node_type_ctx = 539 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 540 node_type_ctx); 541 cmd.addr.topo_params.index = idx; 542 543 status = ice_aq_get_netlist_node(hw, &cmd, 544 &rec_node_part_number, 545 node_handle); 546 if (status) 547 return status; 548 549 if (rec_node_part_number == node_part_number) 550 return 0; 551 } 552 553 return -ENOENT; 554 } 555 556 /** 557 * ice_is_media_cage_present 558 * @pi: port information structure 559 * 560 * Returns true if media cage is present, else false. If no cage, then 561 * media type is backplane or BASE-T. 562 */ 563 static bool ice_is_media_cage_present(struct ice_port_info *pi) 564 { 565 /* Node type cage can be used to determine if cage is present. If AQC 566 * returns error (ENOENT), then no cage present. If no cage present then 567 * connection type is backplane or BASE-T. 568 */ 569 return !ice_aq_get_link_topo_handle(pi, 570 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 571 NULL); 572 } 573 574 /** 575 * ice_get_media_type - Gets media type 576 * @pi: port information structure 577 */ 578 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 579 { 580 struct ice_link_status *hw_link_info; 581 582 if (!pi) 583 return ICE_MEDIA_UNKNOWN; 584 585 hw_link_info = &pi->phy.link_info; 586 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 587 /* If more than one media type is selected, report unknown */ 588 return ICE_MEDIA_UNKNOWN; 589 590 if (hw_link_info->phy_type_low) { 591 /* 1G SGMII is a special case where some DA cable PHYs 592 * may show this as an option when it really shouldn't 593 * be since SGMII is meant to be between a MAC and a PHY 594 * in a backplane. Try to detect this case and handle it 595 */ 596 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 597 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 598 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 599 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 600 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 601 return ICE_MEDIA_DA; 602 603 switch (hw_link_info->phy_type_low) { 604 case ICE_PHY_TYPE_LOW_1000BASE_SX: 605 case ICE_PHY_TYPE_LOW_1000BASE_LX: 606 case ICE_PHY_TYPE_LOW_10GBASE_SR: 607 case ICE_PHY_TYPE_LOW_10GBASE_LR: 608 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 609 case ICE_PHY_TYPE_LOW_25GBASE_SR: 610 case ICE_PHY_TYPE_LOW_25GBASE_LR: 611 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 612 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 613 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 614 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 615 case ICE_PHY_TYPE_LOW_50GBASE_SR: 616 case ICE_PHY_TYPE_LOW_50GBASE_FR: 617 case ICE_PHY_TYPE_LOW_50GBASE_LR: 618 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 619 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 620 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 621 case ICE_PHY_TYPE_LOW_100GBASE_DR: 622 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 623 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 624 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 625 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 626 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 627 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 628 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 629 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 630 return ICE_MEDIA_FIBER; 631 case ICE_PHY_TYPE_LOW_100BASE_TX: 632 case ICE_PHY_TYPE_LOW_1000BASE_T: 633 case ICE_PHY_TYPE_LOW_2500BASE_T: 634 case ICE_PHY_TYPE_LOW_5GBASE_T: 635 case ICE_PHY_TYPE_LOW_10GBASE_T: 636 case ICE_PHY_TYPE_LOW_25GBASE_T: 637 return ICE_MEDIA_BASET; 638 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 639 case ICE_PHY_TYPE_LOW_25GBASE_CR: 640 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 641 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 642 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 644 case ICE_PHY_TYPE_LOW_50GBASE_CP: 645 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 646 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 647 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 648 return ICE_MEDIA_DA; 649 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 650 case ICE_PHY_TYPE_LOW_40G_XLAUI: 651 case ICE_PHY_TYPE_LOW_50G_LAUI2: 652 case ICE_PHY_TYPE_LOW_50G_AUI2: 653 case ICE_PHY_TYPE_LOW_50G_AUI1: 654 case ICE_PHY_TYPE_LOW_100G_AUI4: 655 case ICE_PHY_TYPE_LOW_100G_CAUI4: 656 if (ice_is_media_cage_present(pi)) 657 return ICE_MEDIA_DA; 658 fallthrough; 659 case ICE_PHY_TYPE_LOW_1000BASE_KX: 660 case ICE_PHY_TYPE_LOW_2500BASE_KX: 661 case ICE_PHY_TYPE_LOW_2500BASE_X: 662 case ICE_PHY_TYPE_LOW_5GBASE_KR: 663 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 664 case ICE_PHY_TYPE_LOW_25GBASE_KR: 665 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 666 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 667 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 668 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 669 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 670 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 671 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 672 return ICE_MEDIA_BACKPLANE; 673 } 674 } else { 675 switch (hw_link_info->phy_type_high) { 676 case ICE_PHY_TYPE_HIGH_100G_AUI2: 677 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 678 if (ice_is_media_cage_present(pi)) 679 return ICE_MEDIA_DA; 680 fallthrough; 681 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 682 return ICE_MEDIA_BACKPLANE; 683 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 684 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 685 return ICE_MEDIA_FIBER; 686 } 687 } 688 return ICE_MEDIA_UNKNOWN; 689 } 690 691 /** 692 * ice_get_link_status_datalen 693 * @hw: pointer to the HW struct 694 * 695 * Returns datalength for the Get Link Status AQ command, which is bigger for 696 * newer adapter families handled by ice driver. 697 */ 698 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 699 { 700 switch (hw->mac_type) { 701 case ICE_MAC_E830: 702 return ICE_AQC_LS_DATA_SIZE_V2; 703 case ICE_MAC_E810: 704 default: 705 return ICE_AQC_LS_DATA_SIZE_V1; 706 } 707 } 708 709 /** 710 * ice_aq_get_link_info 711 * @pi: port information structure 712 * @ena_lse: enable/disable LinkStatusEvent reporting 713 * @link: pointer to link status structure - optional 714 * @cd: pointer to command details structure or NULL 715 * 716 * Get Link Status (0x607). Returns the link status of the adapter. 717 */ 718 int 719 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 720 struct ice_link_status *link, struct ice_sq_cd *cd) 721 { 722 struct ice_aqc_get_link_status_data link_data = { 0 }; 723 struct ice_aqc_get_link_status *resp; 724 struct ice_link_status *li_old, *li; 725 enum ice_media_type *hw_media_type; 726 struct ice_fc_info *hw_fc_info; 727 bool tx_pause, rx_pause; 728 struct ice_aq_desc desc; 729 struct ice_hw *hw; 730 u16 cmd_flags; 731 int status; 732 733 if (!pi) 734 return -EINVAL; 735 hw = pi->hw; 736 li_old = &pi->phy.link_info_old; 737 hw_media_type = &pi->phy.media_type; 738 li = &pi->phy.link_info; 739 hw_fc_info = &pi->fc; 740 741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 742 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 743 resp = &desc.params.get_link_status; 744 resp->cmd_flags = cpu_to_le16(cmd_flags); 745 resp->lport_num = pi->lport; 746 747 status = ice_aq_send_cmd(hw, &desc, &link_data, 748 ice_get_link_status_datalen(hw), cd); 749 if (status) 750 return status; 751 752 /* save off old link status information */ 753 *li_old = *li; 754 755 /* update current link status information */ 756 li->link_speed = le16_to_cpu(link_data.link_speed); 757 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 758 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 759 *hw_media_type = ice_get_media_type(pi); 760 li->link_info = link_data.link_info; 761 li->link_cfg_err = link_data.link_cfg_err; 762 li->an_info = link_data.an_info; 763 li->ext_info = link_data.ext_info; 764 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 765 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 766 li->topo_media_conflict = link_data.topo_media_conflict; 767 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 768 ICE_AQ_CFG_PACING_TYPE_M); 769 770 /* update fc info */ 771 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 772 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 773 if (tx_pause && rx_pause) 774 hw_fc_info->current_mode = ICE_FC_FULL; 775 else if (tx_pause) 776 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 777 else if (rx_pause) 778 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 779 else 780 hw_fc_info->current_mode = ICE_FC_NONE; 781 782 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 783 784 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 785 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 786 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 787 (unsigned long long)li->phy_type_low); 788 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 789 (unsigned long long)li->phy_type_high); 790 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 791 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 792 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 793 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 794 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 795 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 796 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 797 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 798 li->max_frame_size); 799 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 800 801 /* save link status information */ 802 if (link) 803 *link = *li; 804 805 /* flag cleared so calling functions don't call AQ again */ 806 pi->phy.get_link_info = false; 807 808 return 0; 809 } 810 811 /** 812 * ice_fill_tx_timer_and_fc_thresh 813 * @hw: pointer to the HW struct 814 * @cmd: pointer to MAC cfg structure 815 * 816 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 817 * descriptor 818 */ 819 static void 820 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 821 struct ice_aqc_set_mac_cfg *cmd) 822 { 823 u32 val, fc_thres_m; 824 825 /* We read back the transmit timer and FC threshold value of 826 * LFC. Thus, we will use index = 827 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 828 * 829 * Also, because we are operating on transmit timer and FC 830 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 831 */ 832 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 833 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 834 835 if (hw->mac_type == ICE_MAC_E830) { 836 /* Retrieve the transmit timer */ 837 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 838 cmd->tx_tmr_value = 839 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 840 841 /* Retrieve the fc threshold */ 842 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 843 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 844 } else { 845 /* Retrieve the transmit timer */ 846 val = rd32(hw, 847 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 848 cmd->tx_tmr_value = 849 le16_encode_bits(val, 850 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 851 852 /* Retrieve the fc threshold */ 853 val = rd32(hw, 854 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 855 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 856 } 857 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 858 } 859 860 /** 861 * ice_aq_set_mac_cfg 862 * @hw: pointer to the HW struct 863 * @max_frame_size: Maximum Frame Size to be supported 864 * @cd: pointer to command details structure or NULL 865 * 866 * Set MAC configuration (0x0603) 867 */ 868 int 869 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 870 { 871 struct ice_aqc_set_mac_cfg *cmd; 872 struct ice_aq_desc desc; 873 874 cmd = &desc.params.set_mac_cfg; 875 876 if (max_frame_size == 0) 877 return -EINVAL; 878 879 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 880 881 cmd->max_frame_size = cpu_to_le16(max_frame_size); 882 883 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 884 885 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 886 } 887 888 /** 889 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 890 * @hw: pointer to the HW struct 891 */ 892 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 893 { 894 struct ice_switch_info *sw; 895 int status; 896 897 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 898 sizeof(*hw->switch_info), GFP_KERNEL); 899 sw = hw->switch_info; 900 901 if (!sw) 902 return -ENOMEM; 903 904 INIT_LIST_HEAD(&sw->vsi_list_map_head); 905 sw->prof_res_bm_init = 0; 906 907 status = ice_init_def_sw_recp(hw); 908 if (status) { 909 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 910 return status; 911 } 912 return 0; 913 } 914 915 /** 916 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 917 * @hw: pointer to the HW struct 918 */ 919 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 920 { 921 struct ice_switch_info *sw = hw->switch_info; 922 struct ice_vsi_list_map_info *v_pos_map; 923 struct ice_vsi_list_map_info *v_tmp_map; 924 struct ice_sw_recipe *recps; 925 u8 i; 926 927 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 928 list_entry) { 929 list_del(&v_pos_map->list_entry); 930 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 931 } 932 recps = sw->recp_list; 933 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 934 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 935 936 recps[i].root_rid = i; 937 list_for_each_entry_safe(rg_entry, tmprg_entry, 938 &recps[i].rg_list, l_entry) { 939 list_del(&rg_entry->l_entry); 940 devm_kfree(ice_hw_to_dev(hw), rg_entry); 941 } 942 943 if (recps[i].adv_rule) { 944 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 945 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 946 947 mutex_destroy(&recps[i].filt_rule_lock); 948 list_for_each_entry_safe(lst_itr, tmp_entry, 949 &recps[i].filt_rules, 950 list_entry) { 951 list_del(&lst_itr->list_entry); 952 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 953 devm_kfree(ice_hw_to_dev(hw), lst_itr); 954 } 955 } else { 956 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 957 958 mutex_destroy(&recps[i].filt_rule_lock); 959 list_for_each_entry_safe(lst_itr, tmp_entry, 960 &recps[i].filt_rules, 961 list_entry) { 962 list_del(&lst_itr->list_entry); 963 devm_kfree(ice_hw_to_dev(hw), lst_itr); 964 } 965 } 966 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 967 } 968 ice_rm_all_sw_replay_rule_info(hw); 969 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 970 devm_kfree(ice_hw_to_dev(hw), sw); 971 } 972 973 /** 974 * ice_get_itr_intrl_gran 975 * @hw: pointer to the HW struct 976 * 977 * Determines the ITR/INTRL granularities based on the maximum aggregate 978 * bandwidth according to the device's configuration during power-on. 979 */ 980 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 981 { 982 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 983 rd32(hw, GL_PWR_MODE_CTL)); 984 985 switch (max_agg_bw) { 986 case ICE_MAX_AGG_BW_200G: 987 case ICE_MAX_AGG_BW_100G: 988 case ICE_MAX_AGG_BW_50G: 989 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 990 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 991 break; 992 case ICE_MAX_AGG_BW_25G: 993 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 994 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 995 break; 996 } 997 } 998 999 /** 1000 * ice_init_hw - main hardware initialization routine 1001 * @hw: pointer to the hardware structure 1002 */ 1003 int ice_init_hw(struct ice_hw *hw) 1004 { 1005 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 1006 void *mac_buf __free(kfree); 1007 u16 mac_buf_len; 1008 int status; 1009 1010 /* Set MAC type based on DeviceID */ 1011 status = ice_set_mac_type(hw); 1012 if (status) 1013 return status; 1014 1015 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 1016 1017 status = ice_reset(hw, ICE_RESET_PFR); 1018 if (status) 1019 return status; 1020 1021 ice_get_itr_intrl_gran(hw); 1022 1023 status = ice_create_all_ctrlq(hw); 1024 if (status) 1025 goto err_unroll_cqinit; 1026 1027 status = ice_fwlog_init(hw); 1028 if (status) 1029 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 1030 status); 1031 1032 status = ice_clear_pf_cfg(hw); 1033 if (status) 1034 goto err_unroll_cqinit; 1035 1036 /* Set bit to enable Flow Director filters */ 1037 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1038 INIT_LIST_HEAD(&hw->fdir_list_head); 1039 1040 ice_clear_pxe_mode(hw); 1041 1042 status = ice_init_nvm(hw); 1043 if (status) 1044 goto err_unroll_cqinit; 1045 1046 status = ice_get_caps(hw); 1047 if (status) 1048 goto err_unroll_cqinit; 1049 1050 if (!hw->port_info) 1051 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1052 sizeof(*hw->port_info), 1053 GFP_KERNEL); 1054 if (!hw->port_info) { 1055 status = -ENOMEM; 1056 goto err_unroll_cqinit; 1057 } 1058 1059 /* set the back pointer to HW */ 1060 hw->port_info->hw = hw; 1061 1062 /* Initialize port_info struct with switch configuration data */ 1063 status = ice_get_initial_sw_cfg(hw); 1064 if (status) 1065 goto err_unroll_alloc; 1066 1067 hw->evb_veb = true; 1068 1069 /* init xarray for identifying scheduling nodes uniquely */ 1070 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1071 1072 /* Query the allocated resources for Tx scheduler */ 1073 status = ice_sched_query_res_alloc(hw); 1074 if (status) { 1075 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1076 goto err_unroll_alloc; 1077 } 1078 ice_sched_get_psm_clk_freq(hw); 1079 1080 /* Initialize port_info struct with scheduler data */ 1081 status = ice_sched_init_port(hw->port_info); 1082 if (status) 1083 goto err_unroll_sched; 1084 1085 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1086 if (!pcaps) { 1087 status = -ENOMEM; 1088 goto err_unroll_sched; 1089 } 1090 1091 /* Initialize port_info struct with PHY capabilities */ 1092 status = ice_aq_get_phy_caps(hw->port_info, false, 1093 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1094 NULL); 1095 if (status) 1096 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1097 status); 1098 1099 /* Initialize port_info struct with link information */ 1100 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1101 if (status) 1102 goto err_unroll_sched; 1103 1104 /* need a valid SW entry point to build a Tx tree */ 1105 if (!hw->sw_entry_point_layer) { 1106 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1107 status = -EIO; 1108 goto err_unroll_sched; 1109 } 1110 INIT_LIST_HEAD(&hw->agg_list); 1111 /* Initialize max burst size */ 1112 if (!hw->max_burst_size) 1113 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1114 1115 status = ice_init_fltr_mgmt_struct(hw); 1116 if (status) 1117 goto err_unroll_sched; 1118 1119 /* Get MAC information */ 1120 /* A single port can report up to two (LAN and WoL) addresses */ 1121 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), 1122 GFP_KERNEL); 1123 if (!mac_buf) { 1124 status = -ENOMEM; 1125 goto err_unroll_fltr_mgmt_struct; 1126 } 1127 1128 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1129 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1130 1131 if (status) 1132 goto err_unroll_fltr_mgmt_struct; 1133 /* enable jumbo frame support at MAC level */ 1134 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1135 if (status) 1136 goto err_unroll_fltr_mgmt_struct; 1137 /* Obtain counter base index which would be used by flow director */ 1138 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1139 if (status) 1140 goto err_unroll_fltr_mgmt_struct; 1141 status = ice_init_hw_tbls(hw); 1142 if (status) 1143 goto err_unroll_fltr_mgmt_struct; 1144 mutex_init(&hw->tnl_lock); 1145 return 0; 1146 1147 err_unroll_fltr_mgmt_struct: 1148 ice_cleanup_fltr_mgmt_struct(hw); 1149 err_unroll_sched: 1150 ice_sched_cleanup_all(hw); 1151 err_unroll_alloc: 1152 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1153 err_unroll_cqinit: 1154 ice_destroy_all_ctrlq(hw); 1155 return status; 1156 } 1157 1158 /** 1159 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1160 * @hw: pointer to the hardware structure 1161 * 1162 * This should be called only during nominal operation, not as a result of 1163 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1164 * applicable initializations if it fails for any reason. 1165 */ 1166 void ice_deinit_hw(struct ice_hw *hw) 1167 { 1168 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1169 ice_cleanup_fltr_mgmt_struct(hw); 1170 1171 ice_sched_cleanup_all(hw); 1172 ice_sched_clear_agg(hw); 1173 ice_free_seg(hw); 1174 ice_free_hw_tbls(hw); 1175 mutex_destroy(&hw->tnl_lock); 1176 1177 ice_fwlog_deinit(hw); 1178 ice_destroy_all_ctrlq(hw); 1179 1180 /* Clear VSI contexts if not already cleared */ 1181 ice_clear_all_vsi_ctx(hw); 1182 } 1183 1184 /** 1185 * ice_check_reset - Check to see if a global reset is complete 1186 * @hw: pointer to the hardware structure 1187 */ 1188 int ice_check_reset(struct ice_hw *hw) 1189 { 1190 u32 cnt, reg = 0, grst_timeout, uld_mask; 1191 1192 /* Poll for Device Active state in case a recent CORER, GLOBR, 1193 * or EMPR has occurred. The grst delay value is in 100ms units. 1194 * Add 1sec for outstanding AQ commands that can take a long time. 1195 */ 1196 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1197 rd32(hw, GLGEN_RSTCTL)) + 10; 1198 1199 for (cnt = 0; cnt < grst_timeout; cnt++) { 1200 mdelay(100); 1201 reg = rd32(hw, GLGEN_RSTAT); 1202 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1203 break; 1204 } 1205 1206 if (cnt == grst_timeout) { 1207 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1208 return -EIO; 1209 } 1210 1211 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1212 GLNVM_ULD_PCIER_DONE_1_M |\ 1213 GLNVM_ULD_CORER_DONE_M |\ 1214 GLNVM_ULD_GLOBR_DONE_M |\ 1215 GLNVM_ULD_POR_DONE_M |\ 1216 GLNVM_ULD_POR_DONE_1_M |\ 1217 GLNVM_ULD_PCIER_DONE_2_M) 1218 1219 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1220 GLNVM_ULD_PE_DONE_M : 0); 1221 1222 /* Device is Active; check Global Reset processes are done */ 1223 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1224 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1225 if (reg == uld_mask) { 1226 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1227 break; 1228 } 1229 mdelay(10); 1230 } 1231 1232 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1233 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1234 reg); 1235 return -EIO; 1236 } 1237 1238 return 0; 1239 } 1240 1241 /** 1242 * ice_pf_reset - Reset the PF 1243 * @hw: pointer to the hardware structure 1244 * 1245 * If a global reset has been triggered, this function checks 1246 * for its completion and then issues the PF reset 1247 */ 1248 static int ice_pf_reset(struct ice_hw *hw) 1249 { 1250 u32 cnt, reg; 1251 1252 /* If at function entry a global reset was already in progress, i.e. 1253 * state is not 'device active' or any of the reset done bits are not 1254 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1255 * global reset is done. 1256 */ 1257 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1258 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1259 /* poll on global reset currently in progress until done */ 1260 if (ice_check_reset(hw)) 1261 return -EIO; 1262 1263 return 0; 1264 } 1265 1266 /* Reset the PF */ 1267 reg = rd32(hw, PFGEN_CTRL); 1268 1269 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1270 1271 /* Wait for the PFR to complete. The wait time is the global config lock 1272 * timeout plus the PFR timeout which will account for a possible reset 1273 * that is occurring during a download package operation. 1274 */ 1275 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1276 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1277 reg = rd32(hw, PFGEN_CTRL); 1278 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1279 break; 1280 1281 mdelay(1); 1282 } 1283 1284 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1285 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1286 return -EIO; 1287 } 1288 1289 return 0; 1290 } 1291 1292 /** 1293 * ice_reset - Perform different types of reset 1294 * @hw: pointer to the hardware structure 1295 * @req: reset request 1296 * 1297 * This function triggers a reset as specified by the req parameter. 1298 * 1299 * Note: 1300 * If anything other than a PF reset is triggered, PXE mode is restored. 1301 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1302 * interface has been restored in the rebuild flow. 1303 */ 1304 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1305 { 1306 u32 val = 0; 1307 1308 switch (req) { 1309 case ICE_RESET_PFR: 1310 return ice_pf_reset(hw); 1311 case ICE_RESET_CORER: 1312 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1313 val = GLGEN_RTRIG_CORER_M; 1314 break; 1315 case ICE_RESET_GLOBR: 1316 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1317 val = GLGEN_RTRIG_GLOBR_M; 1318 break; 1319 default: 1320 return -EINVAL; 1321 } 1322 1323 val |= rd32(hw, GLGEN_RTRIG); 1324 wr32(hw, GLGEN_RTRIG, val); 1325 ice_flush(hw); 1326 1327 /* wait for the FW to be ready */ 1328 return ice_check_reset(hw); 1329 } 1330 1331 /** 1332 * ice_copy_rxq_ctx_to_hw 1333 * @hw: pointer to the hardware structure 1334 * @ice_rxq_ctx: pointer to the rxq context 1335 * @rxq_index: the index of the Rx queue 1336 * 1337 * Copies rxq context from dense structure to HW register space 1338 */ 1339 static int 1340 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1341 { 1342 u8 i; 1343 1344 if (!ice_rxq_ctx) 1345 return -EINVAL; 1346 1347 if (rxq_index > QRX_CTRL_MAX_INDEX) 1348 return -EINVAL; 1349 1350 /* Copy each dword separately to HW */ 1351 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1352 wr32(hw, QRX_CONTEXT(i, rxq_index), 1353 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1354 1355 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1356 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1357 } 1358 1359 return 0; 1360 } 1361 1362 /* LAN Rx Queue Context */ 1363 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1364 /* Field Width LSB */ 1365 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1366 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1367 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1368 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1369 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1370 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1371 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1372 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1373 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1374 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1375 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1376 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1377 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1378 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1379 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1380 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1381 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1382 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1383 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1384 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1385 { 0 } 1386 }; 1387 1388 /** 1389 * ice_write_rxq_ctx 1390 * @hw: pointer to the hardware structure 1391 * @rlan_ctx: pointer to the rxq context 1392 * @rxq_index: the index of the Rx queue 1393 * 1394 * Converts rxq context from sparse to dense structure and then writes 1395 * it to HW register space and enables the hardware to prefetch descriptors 1396 * instead of only fetching them on demand 1397 */ 1398 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1399 u32 rxq_index) 1400 { 1401 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1402 1403 if (!rlan_ctx) 1404 return -EINVAL; 1405 1406 rlan_ctx->prefena = 1; 1407 1408 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1409 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1410 } 1411 1412 /* LAN Tx Queue Context */ 1413 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1414 /* Field Width LSB */ 1415 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1416 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1417 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1418 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1419 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1420 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1421 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1422 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1423 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1424 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1425 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1426 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1427 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1428 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1429 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1430 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1431 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1432 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1433 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1434 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1435 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1436 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1437 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1438 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1439 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1440 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1441 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1442 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1443 { 0 } 1444 }; 1445 1446 /* Sideband Queue command wrappers */ 1447 1448 /** 1449 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1450 * @hw: pointer to the HW struct 1451 * @desc: descriptor describing the command 1452 * @buf: buffer to use for indirect commands (NULL for direct commands) 1453 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1454 * @cd: pointer to command details structure 1455 */ 1456 static int 1457 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1458 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1459 { 1460 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1461 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1462 } 1463 1464 /** 1465 * ice_sbq_rw_reg - Fill Sideband Queue command 1466 * @hw: pointer to the HW struct 1467 * @in: message info to be filled in descriptor 1468 */ 1469 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1470 { 1471 struct ice_sbq_cmd_desc desc = {0}; 1472 struct ice_sbq_msg_req msg = {0}; 1473 u16 msg_len; 1474 int status; 1475 1476 msg_len = sizeof(msg); 1477 1478 msg.dest_dev = in->dest_dev; 1479 msg.opcode = in->opcode; 1480 msg.flags = ICE_SBQ_MSG_FLAGS; 1481 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1482 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1483 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1484 1485 if (in->opcode) 1486 msg.data = cpu_to_le32(in->data); 1487 else 1488 /* data read comes back in completion, so shorten the struct by 1489 * sizeof(msg.data) 1490 */ 1491 msg_len -= sizeof(msg.data); 1492 1493 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1494 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1495 desc.param0.cmd_len = cpu_to_le16(msg_len); 1496 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1497 if (!status && !in->opcode) 1498 in->data = le32_to_cpu 1499 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1500 return status; 1501 } 1502 1503 /* FW Admin Queue command wrappers */ 1504 1505 /* Software lock/mutex that is meant to be held while the Global Config Lock 1506 * in firmware is acquired by the software to prevent most (but not all) types 1507 * of AQ commands from being sent to FW 1508 */ 1509 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1510 1511 /** 1512 * ice_should_retry_sq_send_cmd 1513 * @opcode: AQ opcode 1514 * 1515 * Decide if we should retry the send command routine for the ATQ, depending 1516 * on the opcode. 1517 */ 1518 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1519 { 1520 switch (opcode) { 1521 case ice_aqc_opc_get_link_topo: 1522 case ice_aqc_opc_lldp_stop: 1523 case ice_aqc_opc_lldp_start: 1524 case ice_aqc_opc_lldp_filter_ctrl: 1525 return true; 1526 } 1527 1528 return false; 1529 } 1530 1531 /** 1532 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1533 * @hw: pointer to the HW struct 1534 * @cq: pointer to the specific Control queue 1535 * @desc: prefilled descriptor describing the command 1536 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1537 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1538 * @cd: pointer to command details structure 1539 * 1540 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1541 * Queue if the EBUSY AQ error is returned. 1542 */ 1543 static int 1544 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1545 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1546 struct ice_sq_cd *cd) 1547 { 1548 struct ice_aq_desc desc_cpy; 1549 bool is_cmd_for_retry; 1550 u8 idx = 0; 1551 u16 opcode; 1552 int status; 1553 1554 opcode = le16_to_cpu(desc->opcode); 1555 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1556 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1557 1558 if (is_cmd_for_retry) { 1559 /* All retryable cmds are direct, without buf. */ 1560 WARN_ON(buf); 1561 1562 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1563 } 1564 1565 do { 1566 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1567 1568 if (!is_cmd_for_retry || !status || 1569 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1570 break; 1571 1572 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1573 1574 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1575 1576 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1577 1578 return status; 1579 } 1580 1581 /** 1582 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1583 * @hw: pointer to the HW struct 1584 * @desc: descriptor describing the command 1585 * @buf: buffer to use for indirect commands (NULL for direct commands) 1586 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1587 * @cd: pointer to command details structure 1588 * 1589 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1590 */ 1591 int 1592 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1593 u16 buf_size, struct ice_sq_cd *cd) 1594 { 1595 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1596 bool lock_acquired = false; 1597 int status; 1598 1599 /* When a package download is in process (i.e. when the firmware's 1600 * Global Configuration Lock resource is held), only the Download 1601 * Package, Get Version, Get Package Info List, Upload Section, 1602 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1603 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1604 * Recipes to Profile Association, and Release Resource (with resource 1605 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1606 * must block until the package download completes and the Global Config 1607 * Lock is released. See also ice_acquire_global_cfg_lock(). 1608 */ 1609 switch (le16_to_cpu(desc->opcode)) { 1610 case ice_aqc_opc_download_pkg: 1611 case ice_aqc_opc_get_pkg_info_list: 1612 case ice_aqc_opc_get_ver: 1613 case ice_aqc_opc_upload_section: 1614 case ice_aqc_opc_update_pkg: 1615 case ice_aqc_opc_set_port_params: 1616 case ice_aqc_opc_get_vlan_mode_parameters: 1617 case ice_aqc_opc_set_vlan_mode_parameters: 1618 case ice_aqc_opc_add_recipe: 1619 case ice_aqc_opc_recipe_to_profile: 1620 case ice_aqc_opc_get_recipe: 1621 case ice_aqc_opc_get_recipe_to_profile: 1622 break; 1623 case ice_aqc_opc_release_res: 1624 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1625 break; 1626 fallthrough; 1627 default: 1628 mutex_lock(&ice_global_cfg_lock_sw); 1629 lock_acquired = true; 1630 break; 1631 } 1632 1633 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1634 if (lock_acquired) 1635 mutex_unlock(&ice_global_cfg_lock_sw); 1636 1637 return status; 1638 } 1639 1640 /** 1641 * ice_aq_get_fw_ver 1642 * @hw: pointer to the HW struct 1643 * @cd: pointer to command details structure or NULL 1644 * 1645 * Get the firmware version (0x0001) from the admin queue commands 1646 */ 1647 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1648 { 1649 struct ice_aqc_get_ver *resp; 1650 struct ice_aq_desc desc; 1651 int status; 1652 1653 resp = &desc.params.get_ver; 1654 1655 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1656 1657 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1658 1659 if (!status) { 1660 hw->fw_branch = resp->fw_branch; 1661 hw->fw_maj_ver = resp->fw_major; 1662 hw->fw_min_ver = resp->fw_minor; 1663 hw->fw_patch = resp->fw_patch; 1664 hw->fw_build = le32_to_cpu(resp->fw_build); 1665 hw->api_branch = resp->api_branch; 1666 hw->api_maj_ver = resp->api_major; 1667 hw->api_min_ver = resp->api_minor; 1668 hw->api_patch = resp->api_patch; 1669 } 1670 1671 return status; 1672 } 1673 1674 /** 1675 * ice_aq_send_driver_ver 1676 * @hw: pointer to the HW struct 1677 * @dv: driver's major, minor version 1678 * @cd: pointer to command details structure or NULL 1679 * 1680 * Send the driver version (0x0002) to the firmware 1681 */ 1682 int 1683 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1684 struct ice_sq_cd *cd) 1685 { 1686 struct ice_aqc_driver_ver *cmd; 1687 struct ice_aq_desc desc; 1688 u16 len; 1689 1690 cmd = &desc.params.driver_ver; 1691 1692 if (!dv) 1693 return -EINVAL; 1694 1695 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1696 1697 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1698 cmd->major_ver = dv->major_ver; 1699 cmd->minor_ver = dv->minor_ver; 1700 cmd->build_ver = dv->build_ver; 1701 cmd->subbuild_ver = dv->subbuild_ver; 1702 1703 len = 0; 1704 while (len < sizeof(dv->driver_string) && 1705 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1706 len++; 1707 1708 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1709 } 1710 1711 /** 1712 * ice_aq_q_shutdown 1713 * @hw: pointer to the HW struct 1714 * @unloading: is the driver unloading itself 1715 * 1716 * Tell the Firmware that we're shutting down the AdminQ and whether 1717 * or not the driver is unloading as well (0x0003). 1718 */ 1719 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1720 { 1721 struct ice_aqc_q_shutdown *cmd; 1722 struct ice_aq_desc desc; 1723 1724 cmd = &desc.params.q_shutdown; 1725 1726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1727 1728 if (unloading) 1729 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1730 1731 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1732 } 1733 1734 /** 1735 * ice_aq_req_res 1736 * @hw: pointer to the HW struct 1737 * @res: resource ID 1738 * @access: access type 1739 * @sdp_number: resource number 1740 * @timeout: the maximum time in ms that the driver may hold the resource 1741 * @cd: pointer to command details structure or NULL 1742 * 1743 * Requests common resource using the admin queue commands (0x0008). 1744 * When attempting to acquire the Global Config Lock, the driver can 1745 * learn of three states: 1746 * 1) 0 - acquired lock, and can perform download package 1747 * 2) -EIO - did not get lock, driver should fail to load 1748 * 3) -EALREADY - did not get lock, but another driver has 1749 * successfully downloaded the package; the driver does 1750 * not have to download the package and can continue 1751 * loading 1752 * 1753 * Note that if the caller is in an acquire lock, perform action, release lock 1754 * phase of operation, it is possible that the FW may detect a timeout and issue 1755 * a CORER. In this case, the driver will receive a CORER interrupt and will 1756 * have to determine its cause. The calling thread that is handling this flow 1757 * will likely get an error propagated back to it indicating the Download 1758 * Package, Update Package or the Release Resource AQ commands timed out. 1759 */ 1760 static int 1761 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1762 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1763 struct ice_sq_cd *cd) 1764 { 1765 struct ice_aqc_req_res *cmd_resp; 1766 struct ice_aq_desc desc; 1767 int status; 1768 1769 cmd_resp = &desc.params.res_owner; 1770 1771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1772 1773 cmd_resp->res_id = cpu_to_le16(res); 1774 cmd_resp->access_type = cpu_to_le16(access); 1775 cmd_resp->res_number = cpu_to_le32(sdp_number); 1776 cmd_resp->timeout = cpu_to_le32(*timeout); 1777 *timeout = 0; 1778 1779 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1780 1781 /* The completion specifies the maximum time in ms that the driver 1782 * may hold the resource in the Timeout field. 1783 */ 1784 1785 /* Global config lock response utilizes an additional status field. 1786 * 1787 * If the Global config lock resource is held by some other driver, the 1788 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1789 * and the timeout field indicates the maximum time the current owner 1790 * of the resource has to free it. 1791 */ 1792 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1793 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1794 *timeout = le32_to_cpu(cmd_resp->timeout); 1795 return 0; 1796 } else if (le16_to_cpu(cmd_resp->status) == 1797 ICE_AQ_RES_GLBL_IN_PROG) { 1798 *timeout = le32_to_cpu(cmd_resp->timeout); 1799 return -EIO; 1800 } else if (le16_to_cpu(cmd_resp->status) == 1801 ICE_AQ_RES_GLBL_DONE) { 1802 return -EALREADY; 1803 } 1804 1805 /* invalid FW response, force a timeout immediately */ 1806 *timeout = 0; 1807 return -EIO; 1808 } 1809 1810 /* If the resource is held by some other driver, the command completes 1811 * with a busy return value and the timeout field indicates the maximum 1812 * time the current owner of the resource has to free it. 1813 */ 1814 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1815 *timeout = le32_to_cpu(cmd_resp->timeout); 1816 1817 return status; 1818 } 1819 1820 /** 1821 * ice_aq_release_res 1822 * @hw: pointer to the HW struct 1823 * @res: resource ID 1824 * @sdp_number: resource number 1825 * @cd: pointer to command details structure or NULL 1826 * 1827 * release common resource using the admin queue commands (0x0009) 1828 */ 1829 static int 1830 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1831 struct ice_sq_cd *cd) 1832 { 1833 struct ice_aqc_req_res *cmd; 1834 struct ice_aq_desc desc; 1835 1836 cmd = &desc.params.res_owner; 1837 1838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1839 1840 cmd->res_id = cpu_to_le16(res); 1841 cmd->res_number = cpu_to_le32(sdp_number); 1842 1843 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1844 } 1845 1846 /** 1847 * ice_acquire_res 1848 * @hw: pointer to the HW structure 1849 * @res: resource ID 1850 * @access: access type (read or write) 1851 * @timeout: timeout in milliseconds 1852 * 1853 * This function will attempt to acquire the ownership of a resource. 1854 */ 1855 int 1856 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1857 enum ice_aq_res_access_type access, u32 timeout) 1858 { 1859 #define ICE_RES_POLLING_DELAY_MS 10 1860 u32 delay = ICE_RES_POLLING_DELAY_MS; 1861 u32 time_left = timeout; 1862 int status; 1863 1864 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1865 1866 /* A return code of -EALREADY means that another driver has 1867 * previously acquired the resource and performed any necessary updates; 1868 * in this case the caller does not obtain the resource and has no 1869 * further work to do. 1870 */ 1871 if (status == -EALREADY) 1872 goto ice_acquire_res_exit; 1873 1874 if (status) 1875 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1876 1877 /* If necessary, poll until the current lock owner timeouts */ 1878 timeout = time_left; 1879 while (status && timeout && time_left) { 1880 mdelay(delay); 1881 timeout = (timeout > delay) ? timeout - delay : 0; 1882 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1883 1884 if (status == -EALREADY) 1885 /* lock free, but no work to do */ 1886 break; 1887 1888 if (!status) 1889 /* lock acquired */ 1890 break; 1891 } 1892 if (status && status != -EALREADY) 1893 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1894 1895 ice_acquire_res_exit: 1896 if (status == -EALREADY) { 1897 if (access == ICE_RES_WRITE) 1898 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1899 else 1900 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1901 } 1902 return status; 1903 } 1904 1905 /** 1906 * ice_release_res 1907 * @hw: pointer to the HW structure 1908 * @res: resource ID 1909 * 1910 * This function will release a resource using the proper Admin Command. 1911 */ 1912 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1913 { 1914 unsigned long timeout; 1915 int status; 1916 1917 /* there are some rare cases when trying to release the resource 1918 * results in an admin queue timeout, so handle them correctly 1919 */ 1920 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1921 do { 1922 status = ice_aq_release_res(hw, res, 0, NULL); 1923 if (status != -EIO) 1924 break; 1925 usleep_range(1000, 2000); 1926 } while (time_before(jiffies, timeout)); 1927 } 1928 1929 /** 1930 * ice_aq_alloc_free_res - command to allocate/free resources 1931 * @hw: pointer to the HW struct 1932 * @buf: Indirect buffer to hold data parameters and response 1933 * @buf_size: size of buffer for indirect commands 1934 * @opc: pass in the command opcode 1935 * 1936 * Helper function to allocate/free resources using the admin queue commands 1937 */ 1938 int ice_aq_alloc_free_res(struct ice_hw *hw, 1939 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1940 enum ice_adminq_opc opc) 1941 { 1942 struct ice_aqc_alloc_free_res_cmd *cmd; 1943 struct ice_aq_desc desc; 1944 1945 cmd = &desc.params.sw_res_ctrl; 1946 1947 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1948 return -EINVAL; 1949 1950 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1951 1952 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1953 1954 cmd->num_entries = cpu_to_le16(1); 1955 1956 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1957 } 1958 1959 /** 1960 * ice_alloc_hw_res - allocate resource 1961 * @hw: pointer to the HW struct 1962 * @type: type of resource 1963 * @num: number of resources to allocate 1964 * @btm: allocate from bottom 1965 * @res: pointer to array that will receive the resources 1966 */ 1967 int 1968 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1969 { 1970 struct ice_aqc_alloc_free_res_elem *buf; 1971 u16 buf_len; 1972 int status; 1973 1974 buf_len = struct_size(buf, elem, num); 1975 buf = kzalloc(buf_len, GFP_KERNEL); 1976 if (!buf) 1977 return -ENOMEM; 1978 1979 /* Prepare buffer to allocate resource. */ 1980 buf->num_elems = cpu_to_le16(num); 1981 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1982 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1983 if (btm) 1984 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1985 1986 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 1987 if (status) 1988 goto ice_alloc_res_exit; 1989 1990 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1991 1992 ice_alloc_res_exit: 1993 kfree(buf); 1994 return status; 1995 } 1996 1997 /** 1998 * ice_free_hw_res - free allocated HW resource 1999 * @hw: pointer to the HW struct 2000 * @type: type of resource to free 2001 * @num: number of resources 2002 * @res: pointer to array that contains the resources to free 2003 */ 2004 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2005 { 2006 struct ice_aqc_alloc_free_res_elem *buf; 2007 u16 buf_len; 2008 int status; 2009 2010 buf_len = struct_size(buf, elem, num); 2011 buf = kzalloc(buf_len, GFP_KERNEL); 2012 if (!buf) 2013 return -ENOMEM; 2014 2015 /* Prepare buffer to free resource. */ 2016 buf->num_elems = cpu_to_le16(num); 2017 buf->res_type = cpu_to_le16(type); 2018 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2019 2020 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2021 if (status) 2022 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2023 2024 kfree(buf); 2025 return status; 2026 } 2027 2028 /** 2029 * ice_get_num_per_func - determine number of resources per PF 2030 * @hw: pointer to the HW structure 2031 * @max: value to be evenly split between each PF 2032 * 2033 * Determine the number of valid functions by going through the bitmap returned 2034 * from parsing capabilities and use this to calculate the number of resources 2035 * per PF based on the max value passed in. 2036 */ 2037 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2038 { 2039 u8 funcs; 2040 2041 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2042 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2043 ICE_CAPS_VALID_FUNCS_M); 2044 2045 if (!funcs) 2046 return 0; 2047 2048 return max / funcs; 2049 } 2050 2051 /** 2052 * ice_parse_common_caps - parse common device/function capabilities 2053 * @hw: pointer to the HW struct 2054 * @caps: pointer to common capabilities structure 2055 * @elem: the capability element to parse 2056 * @prefix: message prefix for tracing capabilities 2057 * 2058 * Given a capability element, extract relevant details into the common 2059 * capability structure. 2060 * 2061 * Returns: true if the capability matches one of the common capability ids, 2062 * false otherwise. 2063 */ 2064 static bool 2065 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2066 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2067 { 2068 u32 logical_id = le32_to_cpu(elem->logical_id); 2069 u32 phys_id = le32_to_cpu(elem->phys_id); 2070 u32 number = le32_to_cpu(elem->number); 2071 u16 cap = le16_to_cpu(elem->cap); 2072 bool found = true; 2073 2074 switch (cap) { 2075 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2076 caps->valid_functions = number; 2077 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2078 caps->valid_functions); 2079 break; 2080 case ICE_AQC_CAPS_SRIOV: 2081 caps->sr_iov_1_1 = (number == 1); 2082 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2083 caps->sr_iov_1_1); 2084 break; 2085 case ICE_AQC_CAPS_DCB: 2086 caps->dcb = (number == 1); 2087 caps->active_tc_bitmap = logical_id; 2088 caps->maxtc = phys_id; 2089 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2090 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2091 caps->active_tc_bitmap); 2092 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2093 break; 2094 case ICE_AQC_CAPS_RSS: 2095 caps->rss_table_size = number; 2096 caps->rss_table_entry_width = logical_id; 2097 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2098 caps->rss_table_size); 2099 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2100 caps->rss_table_entry_width); 2101 break; 2102 case ICE_AQC_CAPS_RXQS: 2103 caps->num_rxq = number; 2104 caps->rxq_first_id = phys_id; 2105 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2106 caps->num_rxq); 2107 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2108 caps->rxq_first_id); 2109 break; 2110 case ICE_AQC_CAPS_TXQS: 2111 caps->num_txq = number; 2112 caps->txq_first_id = phys_id; 2113 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2114 caps->num_txq); 2115 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2116 caps->txq_first_id); 2117 break; 2118 case ICE_AQC_CAPS_MSIX: 2119 caps->num_msix_vectors = number; 2120 caps->msix_vector_first_id = phys_id; 2121 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2122 caps->num_msix_vectors); 2123 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2124 caps->msix_vector_first_id); 2125 break; 2126 case ICE_AQC_CAPS_PENDING_NVM_VER: 2127 caps->nvm_update_pending_nvm = true; 2128 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2129 break; 2130 case ICE_AQC_CAPS_PENDING_OROM_VER: 2131 caps->nvm_update_pending_orom = true; 2132 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2133 break; 2134 case ICE_AQC_CAPS_PENDING_NET_VER: 2135 caps->nvm_update_pending_netlist = true; 2136 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2137 break; 2138 case ICE_AQC_CAPS_NVM_MGMT: 2139 caps->nvm_unified_update = 2140 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2141 true : false; 2142 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2143 caps->nvm_unified_update); 2144 break; 2145 case ICE_AQC_CAPS_RDMA: 2146 caps->rdma = (number == 1); 2147 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2148 break; 2149 case ICE_AQC_CAPS_MAX_MTU: 2150 caps->max_mtu = number; 2151 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2152 prefix, caps->max_mtu); 2153 break; 2154 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2155 caps->pcie_reset_avoidance = (number > 0); 2156 ice_debug(hw, ICE_DBG_INIT, 2157 "%s: pcie_reset_avoidance = %d\n", prefix, 2158 caps->pcie_reset_avoidance); 2159 break; 2160 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2161 caps->reset_restrict_support = (number == 1); 2162 ice_debug(hw, ICE_DBG_INIT, 2163 "%s: reset_restrict_support = %d\n", prefix, 2164 caps->reset_restrict_support); 2165 break; 2166 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2167 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2168 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2169 prefix, caps->roce_lag); 2170 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2171 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2172 prefix, caps->sriov_lag); 2173 break; 2174 default: 2175 /* Not one of the recognized common capabilities */ 2176 found = false; 2177 } 2178 2179 return found; 2180 } 2181 2182 /** 2183 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2184 * @hw: pointer to the HW structure 2185 * @caps: pointer to capabilities structure to fix 2186 * 2187 * Re-calculate the capabilities that are dependent on the number of physical 2188 * ports; i.e. some features are not supported or function differently on 2189 * devices with more than 4 ports. 2190 */ 2191 static void 2192 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2193 { 2194 /* This assumes device capabilities are always scanned before function 2195 * capabilities during the initialization flow. 2196 */ 2197 if (hw->dev_caps.num_funcs > 4) { 2198 /* Max 4 TCs per port */ 2199 caps->maxtc = 4; 2200 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2201 caps->maxtc); 2202 if (caps->rdma) { 2203 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2204 caps->rdma = 0; 2205 } 2206 2207 /* print message only when processing device capabilities 2208 * during initialization. 2209 */ 2210 if (caps == &hw->dev_caps.common_cap) 2211 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2212 } 2213 } 2214 2215 /** 2216 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2217 * @hw: pointer to the HW struct 2218 * @func_p: pointer to function capabilities structure 2219 * @cap: pointer to the capability element to parse 2220 * 2221 * Extract function capabilities for ICE_AQC_CAPS_VF. 2222 */ 2223 static void 2224 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2225 struct ice_aqc_list_caps_elem *cap) 2226 { 2227 u32 logical_id = le32_to_cpu(cap->logical_id); 2228 u32 number = le32_to_cpu(cap->number); 2229 2230 func_p->num_allocd_vfs = number; 2231 func_p->vf_base_id = logical_id; 2232 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2233 func_p->num_allocd_vfs); 2234 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2235 func_p->vf_base_id); 2236 } 2237 2238 /** 2239 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2240 * @hw: pointer to the HW struct 2241 * @func_p: pointer to function capabilities structure 2242 * @cap: pointer to the capability element to parse 2243 * 2244 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2245 */ 2246 static void 2247 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2248 struct ice_aqc_list_caps_elem *cap) 2249 { 2250 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2251 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2252 le32_to_cpu(cap->number)); 2253 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2254 func_p->guar_num_vsi); 2255 } 2256 2257 /** 2258 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2259 * @hw: pointer to the HW struct 2260 * @func_p: pointer to function capabilities structure 2261 * @cap: pointer to the capability element to parse 2262 * 2263 * Extract function capabilities for ICE_AQC_CAPS_1588. 2264 */ 2265 static void 2266 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2267 struct ice_aqc_list_caps_elem *cap) 2268 { 2269 struct ice_ts_func_info *info = &func_p->ts_func_info; 2270 u32 number = le32_to_cpu(cap->number); 2271 2272 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2273 func_p->common_cap.ieee_1588 = info->ena; 2274 2275 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2276 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2277 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2278 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2279 2280 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2281 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2282 2283 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2284 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2285 } else { 2286 /* Unknown clock frequency, so assume a (probably incorrect) 2287 * default to avoid out-of-bounds look ups of frequency 2288 * related information. 2289 */ 2290 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2291 info->clk_freq); 2292 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2293 } 2294 2295 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2296 func_p->common_cap.ieee_1588); 2297 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2298 info->src_tmr_owned); 2299 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2300 info->tmr_ena); 2301 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2302 info->tmr_index_owned); 2303 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2304 info->tmr_index_assoc); 2305 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2306 info->clk_freq); 2307 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2308 info->clk_src); 2309 } 2310 2311 /** 2312 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2313 * @hw: pointer to the HW struct 2314 * @func_p: pointer to function capabilities structure 2315 * 2316 * Extract function capabilities for ICE_AQC_CAPS_FD. 2317 */ 2318 static void 2319 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2320 { 2321 u32 reg_val, gsize, bsize; 2322 2323 reg_val = rd32(hw, GLQF_FD_SIZE); 2324 switch (hw->mac_type) { 2325 case ICE_MAC_E830: 2326 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2327 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2328 break; 2329 case ICE_MAC_E810: 2330 default: 2331 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2332 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2333 } 2334 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2335 func_p->fd_fltr_best_effort = bsize; 2336 2337 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2338 func_p->fd_fltr_guar); 2339 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2340 func_p->fd_fltr_best_effort); 2341 } 2342 2343 /** 2344 * ice_parse_func_caps - Parse function capabilities 2345 * @hw: pointer to the HW struct 2346 * @func_p: pointer to function capabilities structure 2347 * @buf: buffer containing the function capability records 2348 * @cap_count: the number of capabilities 2349 * 2350 * Helper function to parse function (0x000A) capabilities list. For 2351 * capabilities shared between device and function, this relies on 2352 * ice_parse_common_caps. 2353 * 2354 * Loop through the list of provided capabilities and extract the relevant 2355 * data into the function capabilities structured. 2356 */ 2357 static void 2358 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2359 void *buf, u32 cap_count) 2360 { 2361 struct ice_aqc_list_caps_elem *cap_resp; 2362 u32 i; 2363 2364 cap_resp = buf; 2365 2366 memset(func_p, 0, sizeof(*func_p)); 2367 2368 for (i = 0; i < cap_count; i++) { 2369 u16 cap = le16_to_cpu(cap_resp[i].cap); 2370 bool found; 2371 2372 found = ice_parse_common_caps(hw, &func_p->common_cap, 2373 &cap_resp[i], "func caps"); 2374 2375 switch (cap) { 2376 case ICE_AQC_CAPS_VF: 2377 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2378 break; 2379 case ICE_AQC_CAPS_VSI: 2380 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2381 break; 2382 case ICE_AQC_CAPS_1588: 2383 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2384 break; 2385 case ICE_AQC_CAPS_FD: 2386 ice_parse_fdir_func_caps(hw, func_p); 2387 break; 2388 default: 2389 /* Don't list common capabilities as unknown */ 2390 if (!found) 2391 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2392 i, cap); 2393 break; 2394 } 2395 } 2396 2397 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2398 } 2399 2400 /** 2401 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2402 * @hw: pointer to the HW struct 2403 * @dev_p: pointer to device capabilities structure 2404 * @cap: capability element to parse 2405 * 2406 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2407 */ 2408 static void 2409 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2410 struct ice_aqc_list_caps_elem *cap) 2411 { 2412 u32 number = le32_to_cpu(cap->number); 2413 2414 dev_p->num_funcs = hweight32(number); 2415 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2416 dev_p->num_funcs); 2417 } 2418 2419 /** 2420 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2421 * @hw: pointer to the HW struct 2422 * @dev_p: pointer to device capabilities structure 2423 * @cap: capability element to parse 2424 * 2425 * Parse ICE_AQC_CAPS_VF for device capabilities. 2426 */ 2427 static void 2428 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2429 struct ice_aqc_list_caps_elem *cap) 2430 { 2431 u32 number = le32_to_cpu(cap->number); 2432 2433 dev_p->num_vfs_exposed = number; 2434 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2435 dev_p->num_vfs_exposed); 2436 } 2437 2438 /** 2439 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2440 * @hw: pointer to the HW struct 2441 * @dev_p: pointer to device capabilities structure 2442 * @cap: capability element to parse 2443 * 2444 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2445 */ 2446 static void 2447 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2448 struct ice_aqc_list_caps_elem *cap) 2449 { 2450 u32 number = le32_to_cpu(cap->number); 2451 2452 dev_p->num_vsi_allocd_to_host = number; 2453 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2454 dev_p->num_vsi_allocd_to_host); 2455 } 2456 2457 /** 2458 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2459 * @hw: pointer to the HW struct 2460 * @dev_p: pointer to device capabilities structure 2461 * @cap: capability element to parse 2462 * 2463 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2464 */ 2465 static void 2466 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2467 struct ice_aqc_list_caps_elem *cap) 2468 { 2469 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2470 u32 logical_id = le32_to_cpu(cap->logical_id); 2471 u32 phys_id = le32_to_cpu(cap->phys_id); 2472 u32 number = le32_to_cpu(cap->number); 2473 2474 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2475 dev_p->common_cap.ieee_1588 = info->ena; 2476 2477 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2478 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2479 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2480 2481 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2482 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2483 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2484 2485 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2486 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2487 2488 info->ena_ports = logical_id; 2489 info->tmr_own_map = phys_id; 2490 2491 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2492 dev_p->common_cap.ieee_1588); 2493 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2494 info->tmr0_owner); 2495 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2496 info->tmr0_owned); 2497 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2498 info->tmr0_ena); 2499 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2500 info->tmr1_owner); 2501 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2502 info->tmr1_owned); 2503 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2504 info->tmr1_ena); 2505 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2506 info->ts_ll_read); 2507 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2508 info->ts_ll_int_read); 2509 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2510 info->ena_ports); 2511 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2512 info->tmr_own_map); 2513 } 2514 2515 /** 2516 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2517 * @hw: pointer to the HW struct 2518 * @dev_p: pointer to device capabilities structure 2519 * @cap: capability element to parse 2520 * 2521 * Parse ICE_AQC_CAPS_FD for device capabilities. 2522 */ 2523 static void 2524 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2525 struct ice_aqc_list_caps_elem *cap) 2526 { 2527 u32 number = le32_to_cpu(cap->number); 2528 2529 dev_p->num_flow_director_fltr = number; 2530 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2531 dev_p->num_flow_director_fltr); 2532 } 2533 2534 /** 2535 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2536 * @hw: pointer to the HW struct 2537 * @dev_p: pointer to device capabilities structure 2538 * @cap: capability element to parse 2539 * 2540 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2541 * enabled sensors. 2542 */ 2543 static void 2544 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2545 struct ice_aqc_list_caps_elem *cap) 2546 { 2547 dev_p->supported_sensors = le32_to_cpu(cap->number); 2548 2549 ice_debug(hw, ICE_DBG_INIT, 2550 "dev caps: supported sensors (bitmap) = 0x%x\n", 2551 dev_p->supported_sensors); 2552 } 2553 2554 /** 2555 * ice_parse_dev_caps - Parse device capabilities 2556 * @hw: pointer to the HW struct 2557 * @dev_p: pointer to device capabilities structure 2558 * @buf: buffer containing the device capability records 2559 * @cap_count: the number of capabilities 2560 * 2561 * Helper device to parse device (0x000B) capabilities list. For 2562 * capabilities shared between device and function, this relies on 2563 * ice_parse_common_caps. 2564 * 2565 * Loop through the list of provided capabilities and extract the relevant 2566 * data into the device capabilities structured. 2567 */ 2568 static void 2569 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2570 void *buf, u32 cap_count) 2571 { 2572 struct ice_aqc_list_caps_elem *cap_resp; 2573 u32 i; 2574 2575 cap_resp = buf; 2576 2577 memset(dev_p, 0, sizeof(*dev_p)); 2578 2579 for (i = 0; i < cap_count; i++) { 2580 u16 cap = le16_to_cpu(cap_resp[i].cap); 2581 bool found; 2582 2583 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2584 &cap_resp[i], "dev caps"); 2585 2586 switch (cap) { 2587 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2588 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2589 break; 2590 case ICE_AQC_CAPS_VF: 2591 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2592 break; 2593 case ICE_AQC_CAPS_VSI: 2594 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2595 break; 2596 case ICE_AQC_CAPS_1588: 2597 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2598 break; 2599 case ICE_AQC_CAPS_FD: 2600 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2601 break; 2602 case ICE_AQC_CAPS_SENSOR_READING: 2603 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2604 break; 2605 default: 2606 /* Don't list common capabilities as unknown */ 2607 if (!found) 2608 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2609 i, cap); 2610 break; 2611 } 2612 } 2613 2614 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2615 } 2616 2617 /** 2618 * ice_is_pf_c827 - check if pf contains c827 phy 2619 * @hw: pointer to the hw struct 2620 */ 2621 bool ice_is_pf_c827(struct ice_hw *hw) 2622 { 2623 struct ice_aqc_get_link_topo cmd = {}; 2624 u8 node_part_number; 2625 u16 node_handle; 2626 int status; 2627 2628 if (hw->mac_type != ICE_MAC_E810) 2629 return false; 2630 2631 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2632 return true; 2633 2634 cmd.addr.topo_params.node_type_ctx = 2635 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2636 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2637 cmd.addr.topo_params.index = 0; 2638 2639 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2640 &node_handle); 2641 2642 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2643 return false; 2644 2645 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2646 return true; 2647 2648 return false; 2649 } 2650 2651 /** 2652 * ice_is_phy_rclk_in_netlist 2653 * @hw: pointer to the hw struct 2654 * 2655 * Check if the PHY Recovered Clock device is present in the netlist 2656 */ 2657 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2658 { 2659 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2660 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2661 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2662 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2663 return false; 2664 2665 return true; 2666 } 2667 2668 /** 2669 * ice_is_clock_mux_in_netlist 2670 * @hw: pointer to the hw struct 2671 * 2672 * Check if the Clock Multiplexer device is present in the netlist 2673 */ 2674 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2675 { 2676 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2677 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2678 NULL)) 2679 return false; 2680 2681 return true; 2682 } 2683 2684 /** 2685 * ice_is_cgu_in_netlist - check for CGU presence 2686 * @hw: pointer to the hw struct 2687 * 2688 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2689 * Save the CGU part number in the hw structure for later use. 2690 * Return: 2691 * * true - cgu is present 2692 * * false - cgu is not present 2693 */ 2694 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2695 { 2696 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2697 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2698 NULL)) { 2699 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2700 return true; 2701 } else if (!ice_find_netlist_node(hw, 2702 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2703 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2704 NULL)) { 2705 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2706 return true; 2707 } 2708 2709 return false; 2710 } 2711 2712 /** 2713 * ice_is_gps_in_netlist 2714 * @hw: pointer to the hw struct 2715 * 2716 * Check if the GPS generic device is present in the netlist 2717 */ 2718 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2719 { 2720 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2721 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2722 return false; 2723 2724 return true; 2725 } 2726 2727 /** 2728 * ice_aq_list_caps - query function/device capabilities 2729 * @hw: pointer to the HW struct 2730 * @buf: a buffer to hold the capabilities 2731 * @buf_size: size of the buffer 2732 * @cap_count: if not NULL, set to the number of capabilities reported 2733 * @opc: capabilities type to discover, device or function 2734 * @cd: pointer to command details structure or NULL 2735 * 2736 * Get the function (0x000A) or device (0x000B) capabilities description from 2737 * firmware and store it in the buffer. 2738 * 2739 * If the cap_count pointer is not NULL, then it is set to the number of 2740 * capabilities firmware will report. Note that if the buffer size is too 2741 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2742 * cap_count will still be updated in this case. It is recommended that the 2743 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2744 * firmware could return) to avoid this. 2745 */ 2746 int 2747 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2748 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2749 { 2750 struct ice_aqc_list_caps *cmd; 2751 struct ice_aq_desc desc; 2752 int status; 2753 2754 cmd = &desc.params.get_cap; 2755 2756 if (opc != ice_aqc_opc_list_func_caps && 2757 opc != ice_aqc_opc_list_dev_caps) 2758 return -EINVAL; 2759 2760 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2761 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2762 2763 if (cap_count) 2764 *cap_count = le32_to_cpu(cmd->count); 2765 2766 return status; 2767 } 2768 2769 /** 2770 * ice_discover_dev_caps - Read and extract device capabilities 2771 * @hw: pointer to the hardware structure 2772 * @dev_caps: pointer to device capabilities structure 2773 * 2774 * Read the device capabilities and extract them into the dev_caps structure 2775 * for later use. 2776 */ 2777 int 2778 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2779 { 2780 u32 cap_count = 0; 2781 void *cbuf; 2782 int status; 2783 2784 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2785 if (!cbuf) 2786 return -ENOMEM; 2787 2788 /* Although the driver doesn't know the number of capabilities the 2789 * device will return, we can simply send a 4KB buffer, the maximum 2790 * possible size that firmware can return. 2791 */ 2792 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2793 2794 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2795 ice_aqc_opc_list_dev_caps, NULL); 2796 if (!status) 2797 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2798 kfree(cbuf); 2799 2800 return status; 2801 } 2802 2803 /** 2804 * ice_discover_func_caps - Read and extract function capabilities 2805 * @hw: pointer to the hardware structure 2806 * @func_caps: pointer to function capabilities structure 2807 * 2808 * Read the function capabilities and extract them into the func_caps structure 2809 * for later use. 2810 */ 2811 static int 2812 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2813 { 2814 u32 cap_count = 0; 2815 void *cbuf; 2816 int status; 2817 2818 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2819 if (!cbuf) 2820 return -ENOMEM; 2821 2822 /* Although the driver doesn't know the number of capabilities the 2823 * device will return, we can simply send a 4KB buffer, the maximum 2824 * possible size that firmware can return. 2825 */ 2826 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2827 2828 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2829 ice_aqc_opc_list_func_caps, NULL); 2830 if (!status) 2831 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2832 kfree(cbuf); 2833 2834 return status; 2835 } 2836 2837 /** 2838 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2839 * @hw: pointer to the hardware structure 2840 */ 2841 void ice_set_safe_mode_caps(struct ice_hw *hw) 2842 { 2843 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2844 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2845 struct ice_hw_common_caps cached_caps; 2846 u32 num_funcs; 2847 2848 /* cache some func_caps values that should be restored after memset */ 2849 cached_caps = func_caps->common_cap; 2850 2851 /* unset func capabilities */ 2852 memset(func_caps, 0, sizeof(*func_caps)); 2853 2854 #define ICE_RESTORE_FUNC_CAP(name) \ 2855 func_caps->common_cap.name = cached_caps.name 2856 2857 /* restore cached values */ 2858 ICE_RESTORE_FUNC_CAP(valid_functions); 2859 ICE_RESTORE_FUNC_CAP(txq_first_id); 2860 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2861 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2862 ICE_RESTORE_FUNC_CAP(max_mtu); 2863 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2864 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2865 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2866 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2867 2868 /* one Tx and one Rx queue in safe mode */ 2869 func_caps->common_cap.num_rxq = 1; 2870 func_caps->common_cap.num_txq = 1; 2871 2872 /* two MSIX vectors, one for traffic and one for misc causes */ 2873 func_caps->common_cap.num_msix_vectors = 2; 2874 func_caps->guar_num_vsi = 1; 2875 2876 /* cache some dev_caps values that should be restored after memset */ 2877 cached_caps = dev_caps->common_cap; 2878 num_funcs = dev_caps->num_funcs; 2879 2880 /* unset dev capabilities */ 2881 memset(dev_caps, 0, sizeof(*dev_caps)); 2882 2883 #define ICE_RESTORE_DEV_CAP(name) \ 2884 dev_caps->common_cap.name = cached_caps.name 2885 2886 /* restore cached values */ 2887 ICE_RESTORE_DEV_CAP(valid_functions); 2888 ICE_RESTORE_DEV_CAP(txq_first_id); 2889 ICE_RESTORE_DEV_CAP(rxq_first_id); 2890 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2891 ICE_RESTORE_DEV_CAP(max_mtu); 2892 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2893 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2894 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2895 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2896 dev_caps->num_funcs = num_funcs; 2897 2898 /* one Tx and one Rx queue per function in safe mode */ 2899 dev_caps->common_cap.num_rxq = num_funcs; 2900 dev_caps->common_cap.num_txq = num_funcs; 2901 2902 /* two MSIX vectors per function */ 2903 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2904 } 2905 2906 /** 2907 * ice_get_caps - get info about the HW 2908 * @hw: pointer to the hardware structure 2909 */ 2910 int ice_get_caps(struct ice_hw *hw) 2911 { 2912 int status; 2913 2914 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2915 if (status) 2916 return status; 2917 2918 return ice_discover_func_caps(hw, &hw->func_caps); 2919 } 2920 2921 /** 2922 * ice_aq_manage_mac_write - manage MAC address write command 2923 * @hw: pointer to the HW struct 2924 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2925 * @flags: flags to control write behavior 2926 * @cd: pointer to command details structure or NULL 2927 * 2928 * This function is used to write MAC address to the NVM (0x0108). 2929 */ 2930 int 2931 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2932 struct ice_sq_cd *cd) 2933 { 2934 struct ice_aqc_manage_mac_write *cmd; 2935 struct ice_aq_desc desc; 2936 2937 cmd = &desc.params.mac_write; 2938 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2939 2940 cmd->flags = flags; 2941 ether_addr_copy(cmd->mac_addr, mac_addr); 2942 2943 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2944 } 2945 2946 /** 2947 * ice_aq_clear_pxe_mode 2948 * @hw: pointer to the HW struct 2949 * 2950 * Tell the firmware that the driver is taking over from PXE (0x0110). 2951 */ 2952 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2953 { 2954 struct ice_aq_desc desc; 2955 2956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2957 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2958 2959 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2960 } 2961 2962 /** 2963 * ice_clear_pxe_mode - clear pxe operations mode 2964 * @hw: pointer to the HW struct 2965 * 2966 * Make sure all PXE mode settings are cleared, including things 2967 * like descriptor fetch/write-back mode. 2968 */ 2969 void ice_clear_pxe_mode(struct ice_hw *hw) 2970 { 2971 if (ice_check_sq_alive(hw, &hw->adminq)) 2972 ice_aq_clear_pxe_mode(hw); 2973 } 2974 2975 /** 2976 * ice_aq_set_port_params - set physical port parameters. 2977 * @pi: pointer to the port info struct 2978 * @double_vlan: if set double VLAN is enabled 2979 * @cd: pointer to command details structure or NULL 2980 * 2981 * Set Physical port parameters (0x0203) 2982 */ 2983 int 2984 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2985 struct ice_sq_cd *cd) 2986 2987 { 2988 struct ice_aqc_set_port_params *cmd; 2989 struct ice_hw *hw = pi->hw; 2990 struct ice_aq_desc desc; 2991 u16 cmd_flags = 0; 2992 2993 cmd = &desc.params.set_port_params; 2994 2995 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2996 if (double_vlan) 2997 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 2998 cmd->cmd_flags = cpu_to_le16(cmd_flags); 2999 3000 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3001 } 3002 3003 /** 3004 * ice_is_100m_speed_supported 3005 * @hw: pointer to the HW struct 3006 * 3007 * returns true if 100M speeds are supported by the device, 3008 * false otherwise. 3009 */ 3010 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3011 { 3012 switch (hw->device_id) { 3013 case ICE_DEV_ID_E822C_SGMII: 3014 case ICE_DEV_ID_E822L_SGMII: 3015 case ICE_DEV_ID_E823L_1GBE: 3016 case ICE_DEV_ID_E823C_SGMII: 3017 return true; 3018 default: 3019 return false; 3020 } 3021 } 3022 3023 /** 3024 * ice_get_link_speed_based_on_phy_type - returns link speed 3025 * @phy_type_low: lower part of phy_type 3026 * @phy_type_high: higher part of phy_type 3027 * 3028 * This helper function will convert an entry in PHY type structure 3029 * [phy_type_low, phy_type_high] to its corresponding link speed. 3030 * Note: In the structure of [phy_type_low, phy_type_high], there should 3031 * be one bit set, as this function will convert one PHY type to its 3032 * speed. 3033 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3034 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3035 */ 3036 static u16 3037 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3038 { 3039 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3040 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3041 3042 switch (phy_type_low) { 3043 case ICE_PHY_TYPE_LOW_100BASE_TX: 3044 case ICE_PHY_TYPE_LOW_100M_SGMII: 3045 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3046 break; 3047 case ICE_PHY_TYPE_LOW_1000BASE_T: 3048 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3049 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3050 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3051 case ICE_PHY_TYPE_LOW_1G_SGMII: 3052 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3053 break; 3054 case ICE_PHY_TYPE_LOW_2500BASE_T: 3055 case ICE_PHY_TYPE_LOW_2500BASE_X: 3056 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3057 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3058 break; 3059 case ICE_PHY_TYPE_LOW_5GBASE_T: 3060 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3061 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3062 break; 3063 case ICE_PHY_TYPE_LOW_10GBASE_T: 3064 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3065 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3066 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3067 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3068 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3069 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3070 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3071 break; 3072 case ICE_PHY_TYPE_LOW_25GBASE_T: 3073 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3074 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3075 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3076 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3077 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3078 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3079 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3080 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3081 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3082 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3083 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3084 break; 3085 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3086 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3087 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3088 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3089 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3090 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3091 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3092 break; 3093 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3094 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3095 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3096 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3097 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3098 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3099 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3100 case ICE_PHY_TYPE_LOW_50G_AUI2: 3101 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3102 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3103 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3104 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3105 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3106 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3107 case ICE_PHY_TYPE_LOW_50G_AUI1: 3108 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3109 break; 3110 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3111 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3112 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3113 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3114 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3115 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3116 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3117 case ICE_PHY_TYPE_LOW_100G_AUI4: 3118 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3119 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3120 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3121 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3122 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3123 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3124 break; 3125 default: 3126 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3127 break; 3128 } 3129 3130 switch (phy_type_high) { 3131 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3132 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3133 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3134 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3135 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3136 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3137 break; 3138 default: 3139 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3140 break; 3141 } 3142 3143 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3144 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3145 return ICE_AQ_LINK_SPEED_UNKNOWN; 3146 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3147 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3148 return ICE_AQ_LINK_SPEED_UNKNOWN; 3149 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3150 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3151 return speed_phy_type_low; 3152 else 3153 return speed_phy_type_high; 3154 } 3155 3156 /** 3157 * ice_update_phy_type 3158 * @phy_type_low: pointer to the lower part of phy_type 3159 * @phy_type_high: pointer to the higher part of phy_type 3160 * @link_speeds_bitmap: targeted link speeds bitmap 3161 * 3162 * Note: For the link_speeds_bitmap structure, you can check it at 3163 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3164 * link_speeds_bitmap include multiple speeds. 3165 * 3166 * Each entry in this [phy_type_low, phy_type_high] structure will 3167 * present a certain link speed. This helper function will turn on bits 3168 * in [phy_type_low, phy_type_high] structure based on the value of 3169 * link_speeds_bitmap input parameter. 3170 */ 3171 void 3172 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3173 u16 link_speeds_bitmap) 3174 { 3175 u64 pt_high; 3176 u64 pt_low; 3177 int index; 3178 u16 speed; 3179 3180 /* We first check with low part of phy_type */ 3181 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3182 pt_low = BIT_ULL(index); 3183 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3184 3185 if (link_speeds_bitmap & speed) 3186 *phy_type_low |= BIT_ULL(index); 3187 } 3188 3189 /* We then check with high part of phy_type */ 3190 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3191 pt_high = BIT_ULL(index); 3192 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3193 3194 if (link_speeds_bitmap & speed) 3195 *phy_type_high |= BIT_ULL(index); 3196 } 3197 } 3198 3199 /** 3200 * ice_aq_set_phy_cfg 3201 * @hw: pointer to the HW struct 3202 * @pi: port info structure of the interested logical port 3203 * @cfg: structure with PHY configuration data to be set 3204 * @cd: pointer to command details structure or NULL 3205 * 3206 * Set the various PHY configuration parameters supported on the Port. 3207 * One or more of the Set PHY config parameters may be ignored in an MFP 3208 * mode as the PF may not have the privilege to set some of the PHY Config 3209 * parameters. This status will be indicated by the command response (0x0601). 3210 */ 3211 int 3212 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3213 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3214 { 3215 struct ice_aq_desc desc; 3216 int status; 3217 3218 if (!cfg) 3219 return -EINVAL; 3220 3221 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3222 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3223 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3224 cfg->caps); 3225 3226 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3227 } 3228 3229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3230 desc.params.set_phy.lport_num = pi->lport; 3231 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3232 3233 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3234 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3235 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3236 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3237 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3238 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3239 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3240 cfg->low_power_ctrl_an); 3241 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3242 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3243 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3244 cfg->link_fec_opt); 3245 3246 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3247 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3248 status = 0; 3249 3250 if (!status) 3251 pi->phy.curr_user_phy_cfg = *cfg; 3252 3253 return status; 3254 } 3255 3256 /** 3257 * ice_update_link_info - update status of the HW network link 3258 * @pi: port info structure of the interested logical port 3259 */ 3260 int ice_update_link_info(struct ice_port_info *pi) 3261 { 3262 struct ice_link_status *li; 3263 int status; 3264 3265 if (!pi) 3266 return -EINVAL; 3267 3268 li = &pi->phy.link_info; 3269 3270 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3271 if (status) 3272 return status; 3273 3274 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3275 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3276 3277 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3278 if (!pcaps) 3279 return -ENOMEM; 3280 3281 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3282 pcaps, NULL); 3283 } 3284 3285 return status; 3286 } 3287 3288 /** 3289 * ice_cache_phy_user_req 3290 * @pi: port information structure 3291 * @cache_data: PHY logging data 3292 * @cache_mode: PHY logging mode 3293 * 3294 * Log the user request on (FC, FEC, SPEED) for later use. 3295 */ 3296 static void 3297 ice_cache_phy_user_req(struct ice_port_info *pi, 3298 struct ice_phy_cache_mode_data cache_data, 3299 enum ice_phy_cache_mode cache_mode) 3300 { 3301 if (!pi) 3302 return; 3303 3304 switch (cache_mode) { 3305 case ICE_FC_MODE: 3306 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3307 break; 3308 case ICE_SPEED_MODE: 3309 pi->phy.curr_user_speed_req = 3310 cache_data.data.curr_user_speed_req; 3311 break; 3312 case ICE_FEC_MODE: 3313 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3314 break; 3315 default: 3316 break; 3317 } 3318 } 3319 3320 /** 3321 * ice_caps_to_fc_mode 3322 * @caps: PHY capabilities 3323 * 3324 * Convert PHY FC capabilities to ice FC mode 3325 */ 3326 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3327 { 3328 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3329 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3330 return ICE_FC_FULL; 3331 3332 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3333 return ICE_FC_TX_PAUSE; 3334 3335 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3336 return ICE_FC_RX_PAUSE; 3337 3338 return ICE_FC_NONE; 3339 } 3340 3341 /** 3342 * ice_caps_to_fec_mode 3343 * @caps: PHY capabilities 3344 * @fec_options: Link FEC options 3345 * 3346 * Convert PHY FEC capabilities to ice FEC mode 3347 */ 3348 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3349 { 3350 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3351 return ICE_FEC_AUTO; 3352 3353 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3354 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3355 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3356 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3357 return ICE_FEC_BASER; 3358 3359 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3360 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3361 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3362 return ICE_FEC_RS; 3363 3364 return ICE_FEC_NONE; 3365 } 3366 3367 /** 3368 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3369 * @pi: port information structure 3370 * @cfg: PHY configuration data to set FC mode 3371 * @req_mode: FC mode to configure 3372 */ 3373 int 3374 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3375 enum ice_fc_mode req_mode) 3376 { 3377 struct ice_phy_cache_mode_data cache_data; 3378 u8 pause_mask = 0x0; 3379 3380 if (!pi || !cfg) 3381 return -EINVAL; 3382 3383 switch (req_mode) { 3384 case ICE_FC_FULL: 3385 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3386 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3387 break; 3388 case ICE_FC_RX_PAUSE: 3389 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3390 break; 3391 case ICE_FC_TX_PAUSE: 3392 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3393 break; 3394 default: 3395 break; 3396 } 3397 3398 /* clear the old pause settings */ 3399 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3400 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3401 3402 /* set the new capabilities */ 3403 cfg->caps |= pause_mask; 3404 3405 /* Cache user FC request */ 3406 cache_data.data.curr_user_fc_req = req_mode; 3407 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3408 3409 return 0; 3410 } 3411 3412 /** 3413 * ice_set_fc 3414 * @pi: port information structure 3415 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3416 * @ena_auto_link_update: enable automatic link update 3417 * 3418 * Set the requested flow control mode. 3419 */ 3420 int 3421 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3422 { 3423 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3424 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3425 struct ice_hw *hw; 3426 int status; 3427 3428 if (!pi || !aq_failures) 3429 return -EINVAL; 3430 3431 *aq_failures = 0; 3432 hw = pi->hw; 3433 3434 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3435 if (!pcaps) 3436 return -ENOMEM; 3437 3438 /* Get the current PHY config */ 3439 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3440 pcaps, NULL); 3441 if (status) { 3442 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3443 goto out; 3444 } 3445 3446 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3447 3448 /* Configure the set PHY data */ 3449 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3450 if (status) 3451 goto out; 3452 3453 /* If the capabilities have changed, then set the new config */ 3454 if (cfg.caps != pcaps->caps) { 3455 int retry_count, retry_max = 10; 3456 3457 /* Auto restart link so settings take effect */ 3458 if (ena_auto_link_update) 3459 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3460 3461 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3462 if (status) { 3463 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3464 goto out; 3465 } 3466 3467 /* Update the link info 3468 * It sometimes takes a really long time for link to 3469 * come back from the atomic reset. Thus, we wait a 3470 * little bit. 3471 */ 3472 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3473 status = ice_update_link_info(pi); 3474 3475 if (!status) 3476 break; 3477 3478 mdelay(100); 3479 } 3480 3481 if (status) 3482 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3483 } 3484 3485 out: 3486 return status; 3487 } 3488 3489 /** 3490 * ice_phy_caps_equals_cfg 3491 * @phy_caps: PHY capabilities 3492 * @phy_cfg: PHY configuration 3493 * 3494 * Helper function to determine if PHY capabilities matches PHY 3495 * configuration 3496 */ 3497 bool 3498 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3499 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3500 { 3501 u8 caps_mask, cfg_mask; 3502 3503 if (!phy_caps || !phy_cfg) 3504 return false; 3505 3506 /* These bits are not common between capabilities and configuration. 3507 * Do not use them to determine equality. 3508 */ 3509 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3510 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3511 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3512 3513 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3514 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3515 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3516 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3517 phy_caps->eee_cap != phy_cfg->eee_cap || 3518 phy_caps->eeer_value != phy_cfg->eeer_value || 3519 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3520 return false; 3521 3522 return true; 3523 } 3524 3525 /** 3526 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3527 * @pi: port information structure 3528 * @caps: PHY ability structure to copy date from 3529 * @cfg: PHY configuration structure to copy data to 3530 * 3531 * Helper function to copy AQC PHY get ability data to PHY set configuration 3532 * data structure 3533 */ 3534 void 3535 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3536 struct ice_aqc_get_phy_caps_data *caps, 3537 struct ice_aqc_set_phy_cfg_data *cfg) 3538 { 3539 if (!pi || !caps || !cfg) 3540 return; 3541 3542 memset(cfg, 0, sizeof(*cfg)); 3543 cfg->phy_type_low = caps->phy_type_low; 3544 cfg->phy_type_high = caps->phy_type_high; 3545 cfg->caps = caps->caps; 3546 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3547 cfg->eee_cap = caps->eee_cap; 3548 cfg->eeer_value = caps->eeer_value; 3549 cfg->link_fec_opt = caps->link_fec_options; 3550 cfg->module_compliance_enforcement = 3551 caps->module_compliance_enforcement; 3552 } 3553 3554 /** 3555 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3556 * @pi: port information structure 3557 * @cfg: PHY configuration data to set FEC mode 3558 * @fec: FEC mode to configure 3559 */ 3560 int 3561 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3562 enum ice_fec_mode fec) 3563 { 3564 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); 3565 struct ice_hw *hw; 3566 int status; 3567 3568 if (!pi || !cfg) 3569 return -EINVAL; 3570 3571 hw = pi->hw; 3572 3573 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3574 if (!pcaps) 3575 return -ENOMEM; 3576 3577 status = ice_aq_get_phy_caps(pi, false, 3578 (ice_fw_supports_report_dflt_cfg(hw) ? 3579 ICE_AQC_REPORT_DFLT_CFG : 3580 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3581 if (status) 3582 goto out; 3583 3584 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3585 cfg->link_fec_opt = pcaps->link_fec_options; 3586 3587 switch (fec) { 3588 case ICE_FEC_BASER: 3589 /* Clear RS bits, and AND BASE-R ability 3590 * bits and OR request bits. 3591 */ 3592 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3593 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3594 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3595 ICE_AQC_PHY_FEC_25G_KR_REQ; 3596 break; 3597 case ICE_FEC_RS: 3598 /* Clear BASE-R bits, and AND RS ability 3599 * bits and OR request bits. 3600 */ 3601 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3602 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3603 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3604 break; 3605 case ICE_FEC_NONE: 3606 /* Clear all FEC option bits. */ 3607 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3608 break; 3609 case ICE_FEC_AUTO: 3610 /* AND auto FEC bit, and all caps bits. */ 3611 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3612 cfg->link_fec_opt |= pcaps->link_fec_options; 3613 break; 3614 default: 3615 status = -EINVAL; 3616 break; 3617 } 3618 3619 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3620 !ice_fw_supports_report_dflt_cfg(hw)) { 3621 struct ice_link_default_override_tlv tlv = { 0 }; 3622 3623 status = ice_get_link_default_override(&tlv, pi); 3624 if (status) 3625 goto out; 3626 3627 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3628 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3629 cfg->link_fec_opt = tlv.fec_options; 3630 } 3631 3632 out: 3633 return status; 3634 } 3635 3636 /** 3637 * ice_get_link_status - get status of the HW network link 3638 * @pi: port information structure 3639 * @link_up: pointer to bool (true/false = linkup/linkdown) 3640 * 3641 * Variable link_up is true if link is up, false if link is down. 3642 * The variable link_up is invalid if status is non zero. As a 3643 * result of this call, link status reporting becomes enabled 3644 */ 3645 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3646 { 3647 struct ice_phy_info *phy_info; 3648 int status = 0; 3649 3650 if (!pi || !link_up) 3651 return -EINVAL; 3652 3653 phy_info = &pi->phy; 3654 3655 if (phy_info->get_link_info) { 3656 status = ice_update_link_info(pi); 3657 3658 if (status) 3659 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3660 status); 3661 } 3662 3663 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3664 3665 return status; 3666 } 3667 3668 /** 3669 * ice_aq_set_link_restart_an 3670 * @pi: pointer to the port information structure 3671 * @ena_link: if true: enable link, if false: disable link 3672 * @cd: pointer to command details structure or NULL 3673 * 3674 * Sets up the link and restarts the Auto-Negotiation over the link. 3675 */ 3676 int 3677 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3678 struct ice_sq_cd *cd) 3679 { 3680 struct ice_aqc_restart_an *cmd; 3681 struct ice_aq_desc desc; 3682 3683 cmd = &desc.params.restart_an; 3684 3685 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3686 3687 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3688 cmd->lport_num = pi->lport; 3689 if (ena_link) 3690 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3691 else 3692 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3693 3694 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3695 } 3696 3697 /** 3698 * ice_aq_set_event_mask 3699 * @hw: pointer to the HW struct 3700 * @port_num: port number of the physical function 3701 * @mask: event mask to be set 3702 * @cd: pointer to command details structure or NULL 3703 * 3704 * Set event mask (0x0613) 3705 */ 3706 int 3707 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3708 struct ice_sq_cd *cd) 3709 { 3710 struct ice_aqc_set_event_mask *cmd; 3711 struct ice_aq_desc desc; 3712 3713 cmd = &desc.params.set_event_mask; 3714 3715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3716 3717 cmd->lport_num = port_num; 3718 3719 cmd->event_mask = cpu_to_le16(mask); 3720 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3721 } 3722 3723 /** 3724 * ice_aq_set_mac_loopback 3725 * @hw: pointer to the HW struct 3726 * @ena_lpbk: Enable or Disable loopback 3727 * @cd: pointer to command details structure or NULL 3728 * 3729 * Enable/disable loopback on a given port 3730 */ 3731 int 3732 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3733 { 3734 struct ice_aqc_set_mac_lb *cmd; 3735 struct ice_aq_desc desc; 3736 3737 cmd = &desc.params.set_mac_lb; 3738 3739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3740 if (ena_lpbk) 3741 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3742 3743 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3744 } 3745 3746 /** 3747 * ice_aq_set_port_id_led 3748 * @pi: pointer to the port information 3749 * @is_orig_mode: is this LED set to original mode (by the net-list) 3750 * @cd: pointer to command details structure or NULL 3751 * 3752 * Set LED value for the given port (0x06e9) 3753 */ 3754 int 3755 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3756 struct ice_sq_cd *cd) 3757 { 3758 struct ice_aqc_set_port_id_led *cmd; 3759 struct ice_hw *hw = pi->hw; 3760 struct ice_aq_desc desc; 3761 3762 cmd = &desc.params.set_port_id_led; 3763 3764 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3765 3766 if (is_orig_mode) 3767 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3768 else 3769 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3770 3771 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3772 } 3773 3774 /** 3775 * ice_aq_get_port_options 3776 * @hw: pointer to the HW struct 3777 * @options: buffer for the resultant port options 3778 * @option_count: input - size of the buffer in port options structures, 3779 * output - number of returned port options 3780 * @lport: logical port to call the command with (optional) 3781 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3782 * when PF owns more than 1 port it must be true 3783 * @active_option_idx: index of active port option in returned buffer 3784 * @active_option_valid: active option in returned buffer is valid 3785 * @pending_option_idx: index of pending port option in returned buffer 3786 * @pending_option_valid: pending option in returned buffer is valid 3787 * 3788 * Calls Get Port Options AQC (0x06ea) and verifies result. 3789 */ 3790 int 3791 ice_aq_get_port_options(struct ice_hw *hw, 3792 struct ice_aqc_get_port_options_elem *options, 3793 u8 *option_count, u8 lport, bool lport_valid, 3794 u8 *active_option_idx, bool *active_option_valid, 3795 u8 *pending_option_idx, bool *pending_option_valid) 3796 { 3797 struct ice_aqc_get_port_options *cmd; 3798 struct ice_aq_desc desc; 3799 int status; 3800 u8 i; 3801 3802 /* options buffer shall be able to hold max returned options */ 3803 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3804 return -EINVAL; 3805 3806 cmd = &desc.params.get_port_options; 3807 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3808 3809 if (lport_valid) 3810 cmd->lport_num = lport; 3811 cmd->lport_num_valid = lport_valid; 3812 3813 status = ice_aq_send_cmd(hw, &desc, options, 3814 *option_count * sizeof(*options), NULL); 3815 if (status) 3816 return status; 3817 3818 /* verify direct FW response & set output parameters */ 3819 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3820 cmd->port_options_count); 3821 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3822 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3823 cmd->port_options); 3824 if (*active_option_valid) { 3825 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3826 cmd->port_options); 3827 if (*active_option_idx > (*option_count - 1)) 3828 return -EIO; 3829 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3830 *active_option_idx); 3831 } 3832 3833 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3834 cmd->pending_port_option_status); 3835 if (*pending_option_valid) { 3836 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3837 cmd->pending_port_option_status); 3838 if (*pending_option_idx > (*option_count - 1)) 3839 return -EIO; 3840 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3841 *pending_option_idx); 3842 } 3843 3844 /* mask output options fields */ 3845 for (i = 0; i < *option_count; i++) { 3846 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3847 options[i].pmd); 3848 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3849 options[i].max_lane_speed); 3850 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3851 options[i].pmd, options[i].max_lane_speed); 3852 } 3853 3854 return 0; 3855 } 3856 3857 /** 3858 * ice_aq_set_port_option 3859 * @hw: pointer to the HW struct 3860 * @lport: logical port to call the command with 3861 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3862 * when PF owns more than 1 port it must be true 3863 * @new_option: new port option to be written 3864 * 3865 * Calls Set Port Options AQC (0x06eb). 3866 */ 3867 int 3868 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3869 u8 new_option) 3870 { 3871 struct ice_aqc_set_port_option *cmd; 3872 struct ice_aq_desc desc; 3873 3874 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3875 return -EINVAL; 3876 3877 cmd = &desc.params.set_port_option; 3878 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3879 3880 if (lport_valid) 3881 cmd->lport_num = lport; 3882 3883 cmd->lport_num_valid = lport_valid; 3884 cmd->selected_port_option = new_option; 3885 3886 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3887 } 3888 3889 /** 3890 * ice_aq_sff_eeprom 3891 * @hw: pointer to the HW struct 3892 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3893 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3894 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3895 * @page: QSFP page 3896 * @set_page: set or ignore the page 3897 * @data: pointer to data buffer to be read/written to the I2C device. 3898 * @length: 1-16 for read, 1 for write. 3899 * @write: 0 read, 1 for write. 3900 * @cd: pointer to command details structure or NULL 3901 * 3902 * Read/Write SFF EEPROM (0x06EE) 3903 */ 3904 int 3905 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3906 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3907 bool write, struct ice_sq_cd *cd) 3908 { 3909 struct ice_aqc_sff_eeprom *cmd; 3910 struct ice_aq_desc desc; 3911 u16 i2c_bus_addr; 3912 int status; 3913 3914 if (!data || (mem_addr & 0xff00)) 3915 return -EINVAL; 3916 3917 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3918 cmd = &desc.params.read_write_sff_param; 3919 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3920 cmd->lport_num = (u8)(lport & 0xff); 3921 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3922 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 3923 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 3924 if (write) 3925 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 3926 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 3927 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3928 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 3929 3930 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3931 return status; 3932 } 3933 3934 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3935 { 3936 switch (type) { 3937 case ICE_LUT_VSI: 3938 return ICE_LUT_VSI_SIZE; 3939 case ICE_LUT_GLOBAL: 3940 return ICE_LUT_GLOBAL_SIZE; 3941 case ICE_LUT_PF: 3942 return ICE_LUT_PF_SIZE; 3943 } 3944 WARN_ONCE(1, "incorrect type passed"); 3945 return ICE_LUT_VSI_SIZE; 3946 } 3947 3948 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3949 { 3950 switch (size) { 3951 case ICE_LUT_VSI_SIZE: 3952 return ICE_AQC_LUT_SIZE_SMALL; 3953 case ICE_LUT_GLOBAL_SIZE: 3954 return ICE_AQC_LUT_SIZE_512; 3955 case ICE_LUT_PF_SIZE: 3956 return ICE_AQC_LUT_SIZE_2K; 3957 } 3958 WARN_ONCE(1, "incorrect size passed"); 3959 return 0; 3960 } 3961 3962 /** 3963 * __ice_aq_get_set_rss_lut 3964 * @hw: pointer to the hardware structure 3965 * @params: RSS LUT parameters 3966 * @set: set true to set the table, false to get the table 3967 * 3968 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3969 */ 3970 static int 3971 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3972 struct ice_aq_get_set_rss_lut_params *params, bool set) 3973 { 3974 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3975 enum ice_lut_type lut_type = params->lut_type; 3976 struct ice_aqc_get_set_rss_lut *desc_params; 3977 enum ice_aqc_lut_flags flags; 3978 enum ice_lut_size lut_size; 3979 struct ice_aq_desc desc; 3980 u8 *lut = params->lut; 3981 3982 3983 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 3984 return -EINVAL; 3985 3986 lut_size = ice_lut_type_to_size(lut_type); 3987 if (lut_size > params->lut_size) 3988 return -EINVAL; 3989 else if (set && lut_size != params->lut_size) 3990 return -EINVAL; 3991 3992 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 3993 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 3994 if (set) 3995 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3996 3997 desc_params = &desc.params.get_set_rss_lut; 3998 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3999 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4000 4001 if (lut_type == ICE_LUT_GLOBAL) 4002 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4003 params->global_lut_id); 4004 4005 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4006 desc_params->flags = cpu_to_le16(flags); 4007 4008 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4009 } 4010 4011 /** 4012 * ice_aq_get_rss_lut 4013 * @hw: pointer to the hardware structure 4014 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4015 * 4016 * get the RSS lookup table, PF or VSI type 4017 */ 4018 int 4019 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4020 { 4021 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4022 } 4023 4024 /** 4025 * ice_aq_set_rss_lut 4026 * @hw: pointer to the hardware structure 4027 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4028 * 4029 * set the RSS lookup table, PF or VSI type 4030 */ 4031 int 4032 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4033 { 4034 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4035 } 4036 4037 /** 4038 * __ice_aq_get_set_rss_key 4039 * @hw: pointer to the HW struct 4040 * @vsi_id: VSI FW index 4041 * @key: pointer to key info struct 4042 * @set: set true to set the key, false to get the key 4043 * 4044 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4045 */ 4046 static int 4047 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4048 struct ice_aqc_get_set_rss_keys *key, bool set) 4049 { 4050 struct ice_aqc_get_set_rss_key *desc_params; 4051 u16 key_size = sizeof(*key); 4052 struct ice_aq_desc desc; 4053 4054 if (set) { 4055 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4056 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4057 } else { 4058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4059 } 4060 4061 desc_params = &desc.params.get_set_rss_key; 4062 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4063 4064 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4065 } 4066 4067 /** 4068 * ice_aq_get_rss_key 4069 * @hw: pointer to the HW struct 4070 * @vsi_handle: software VSI handle 4071 * @key: pointer to key info struct 4072 * 4073 * get the RSS key per VSI 4074 */ 4075 int 4076 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4077 struct ice_aqc_get_set_rss_keys *key) 4078 { 4079 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4080 return -EINVAL; 4081 4082 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4083 key, false); 4084 } 4085 4086 /** 4087 * ice_aq_set_rss_key 4088 * @hw: pointer to the HW struct 4089 * @vsi_handle: software VSI handle 4090 * @keys: pointer to key info struct 4091 * 4092 * set the RSS key per VSI 4093 */ 4094 int 4095 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4096 struct ice_aqc_get_set_rss_keys *keys) 4097 { 4098 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4099 return -EINVAL; 4100 4101 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4102 keys, true); 4103 } 4104 4105 /** 4106 * ice_aq_add_lan_txq 4107 * @hw: pointer to the hardware structure 4108 * @num_qgrps: Number of added queue groups 4109 * @qg_list: list of queue groups to be added 4110 * @buf_size: size of buffer for indirect command 4111 * @cd: pointer to command details structure or NULL 4112 * 4113 * Add Tx LAN queue (0x0C30) 4114 * 4115 * NOTE: 4116 * Prior to calling add Tx LAN queue: 4117 * Initialize the following as part of the Tx queue context: 4118 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4119 * Cache profile and Packet shaper profile. 4120 * 4121 * After add Tx LAN queue AQ command is completed: 4122 * Interrupts should be associated with specific queues, 4123 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4124 * flow. 4125 */ 4126 static int 4127 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4128 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4129 struct ice_sq_cd *cd) 4130 { 4131 struct ice_aqc_add_tx_qgrp *list; 4132 struct ice_aqc_add_txqs *cmd; 4133 struct ice_aq_desc desc; 4134 u16 i, sum_size = 0; 4135 4136 cmd = &desc.params.add_txqs; 4137 4138 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4139 4140 if (!qg_list) 4141 return -EINVAL; 4142 4143 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4144 return -EINVAL; 4145 4146 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4147 sum_size += struct_size(list, txqs, list->num_txqs); 4148 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4149 list->num_txqs); 4150 } 4151 4152 if (buf_size != sum_size) 4153 return -EINVAL; 4154 4155 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4156 4157 cmd->num_qgrps = num_qgrps; 4158 4159 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4160 } 4161 4162 /** 4163 * ice_aq_dis_lan_txq 4164 * @hw: pointer to the hardware structure 4165 * @num_qgrps: number of groups in the list 4166 * @qg_list: the list of groups to disable 4167 * @buf_size: the total size of the qg_list buffer in bytes 4168 * @rst_src: if called due to reset, specifies the reset source 4169 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4170 * @cd: pointer to command details structure or NULL 4171 * 4172 * Disable LAN Tx queue (0x0C31) 4173 */ 4174 static int 4175 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4176 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4177 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4178 struct ice_sq_cd *cd) 4179 { 4180 struct ice_aqc_dis_txq_item *item; 4181 struct ice_aqc_dis_txqs *cmd; 4182 struct ice_aq_desc desc; 4183 u16 vmvf_and_timeout; 4184 u16 i, sz = 0; 4185 int status; 4186 4187 cmd = &desc.params.dis_txqs; 4188 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4189 4190 /* qg_list can be NULL only in VM/VF reset flow */ 4191 if (!qg_list && !rst_src) 4192 return -EINVAL; 4193 4194 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4195 return -EINVAL; 4196 4197 cmd->num_entries = num_qgrps; 4198 4199 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4200 4201 switch (rst_src) { 4202 case ICE_VM_RESET: 4203 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4204 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4205 break; 4206 case ICE_VF_RESET: 4207 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4208 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4209 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4210 ICE_AQC_Q_DIS_VMVF_NUM_M; 4211 break; 4212 case ICE_NO_RESET: 4213 default: 4214 break; 4215 } 4216 4217 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4218 4219 /* flush pipe on time out */ 4220 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4221 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4222 if (!qg_list) 4223 goto do_aq; 4224 4225 /* set RD bit to indicate that command buffer is provided by the driver 4226 * and it needs to be read by the firmware 4227 */ 4228 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4229 4230 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4231 u16 item_size = struct_size(item, q_id, item->num_qs); 4232 4233 /* If the num of queues is even, add 2 bytes of padding */ 4234 if ((item->num_qs % 2) == 0) 4235 item_size += 2; 4236 4237 sz += item_size; 4238 4239 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4240 } 4241 4242 if (buf_size != sz) 4243 return -EINVAL; 4244 4245 do_aq: 4246 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4247 if (status) { 4248 if (!qg_list) 4249 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4250 vmvf_num, hw->adminq.sq_last_status); 4251 else 4252 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4253 le16_to_cpu(qg_list[0].q_id[0]), 4254 hw->adminq.sq_last_status); 4255 } 4256 return status; 4257 } 4258 4259 /** 4260 * ice_aq_cfg_lan_txq 4261 * @hw: pointer to the hardware structure 4262 * @buf: buffer for command 4263 * @buf_size: size of buffer in bytes 4264 * @num_qs: number of queues being configured 4265 * @oldport: origination lport 4266 * @newport: destination lport 4267 * @cd: pointer to command details structure or NULL 4268 * 4269 * Move/Configure LAN Tx queue (0x0C32) 4270 * 4271 * There is a better AQ command to use for moving nodes, so only coding 4272 * this one for configuring the node. 4273 */ 4274 int 4275 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4276 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4277 struct ice_sq_cd *cd) 4278 { 4279 struct ice_aqc_cfg_txqs *cmd; 4280 struct ice_aq_desc desc; 4281 int status; 4282 4283 cmd = &desc.params.cfg_txqs; 4284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4285 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4286 4287 if (!buf) 4288 return -EINVAL; 4289 4290 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4291 cmd->num_qs = num_qs; 4292 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4293 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4294 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4295 cmd->blocked_cgds = 0; 4296 4297 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4298 if (status) 4299 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4300 hw->adminq.sq_last_status); 4301 return status; 4302 } 4303 4304 /** 4305 * ice_aq_add_rdma_qsets 4306 * @hw: pointer to the hardware structure 4307 * @num_qset_grps: Number of RDMA Qset groups 4308 * @qset_list: list of Qset groups to be added 4309 * @buf_size: size of buffer for indirect command 4310 * @cd: pointer to command details structure or NULL 4311 * 4312 * Add Tx RDMA Qsets (0x0C33) 4313 */ 4314 static int 4315 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4316 struct ice_aqc_add_rdma_qset_data *qset_list, 4317 u16 buf_size, struct ice_sq_cd *cd) 4318 { 4319 struct ice_aqc_add_rdma_qset_data *list; 4320 struct ice_aqc_add_rdma_qset *cmd; 4321 struct ice_aq_desc desc; 4322 u16 i, sum_size = 0; 4323 4324 cmd = &desc.params.add_rdma_qset; 4325 4326 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4327 4328 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4329 return -EINVAL; 4330 4331 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4332 u16 num_qsets = le16_to_cpu(list->num_qsets); 4333 4334 sum_size += struct_size(list, rdma_qsets, num_qsets); 4335 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4336 num_qsets); 4337 } 4338 4339 if (buf_size != sum_size) 4340 return -EINVAL; 4341 4342 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4343 4344 cmd->num_qset_grps = num_qset_grps; 4345 4346 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4347 } 4348 4349 /* End of FW Admin Queue command wrappers */ 4350 4351 /** 4352 * ice_pack_ctx_byte - write a byte to a packed context structure 4353 * @src_ctx: unpacked source context structure 4354 * @dest_ctx: packed destination context data 4355 * @ce_info: context element description 4356 */ 4357 static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, 4358 const struct ice_ctx_ele *ce_info) 4359 { 4360 u8 src_byte, dest_byte, mask; 4361 u8 *from, *dest; 4362 u16 shift_width; 4363 4364 /* copy from the next struct field */ 4365 from = src_ctx + ce_info->offset; 4366 4367 /* prepare the bits and mask */ 4368 shift_width = ce_info->lsb % 8; 4369 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4370 4371 src_byte = *from; 4372 src_byte <<= shift_width; 4373 src_byte &= mask; 4374 4375 /* get the current bits from the target bit string */ 4376 dest = dest_ctx + (ce_info->lsb / 8); 4377 4378 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4379 4380 dest_byte &= ~mask; /* get the bits not changing */ 4381 dest_byte |= src_byte; /* add in the new bits */ 4382 4383 /* put it all back */ 4384 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4385 } 4386 4387 /** 4388 * ice_pack_ctx_word - write a word to a packed context structure 4389 * @src_ctx: unpacked source context structure 4390 * @dest_ctx: packed destination context data 4391 * @ce_info: context element description 4392 */ 4393 static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, 4394 const struct ice_ctx_ele *ce_info) 4395 { 4396 u16 src_word, mask; 4397 __le16 dest_word; 4398 u8 *from, *dest; 4399 u16 shift_width; 4400 4401 /* copy from the next struct field */ 4402 from = src_ctx + ce_info->offset; 4403 4404 /* prepare the bits and mask */ 4405 shift_width = ce_info->lsb % 8; 4406 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4407 4408 /* don't swizzle the bits until after the mask because the mask bits 4409 * will be in a different bit position on big endian machines 4410 */ 4411 src_word = *(u16 *)from; 4412 src_word <<= shift_width; 4413 src_word &= mask; 4414 4415 /* get the current bits from the target bit string */ 4416 dest = dest_ctx + (ce_info->lsb / 8); 4417 4418 memcpy(&dest_word, dest, sizeof(dest_word)); 4419 4420 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4421 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4422 4423 /* put it all back */ 4424 memcpy(dest, &dest_word, sizeof(dest_word)); 4425 } 4426 4427 /** 4428 * ice_pack_ctx_dword - write a dword to a packed context structure 4429 * @src_ctx: unpacked source context structure 4430 * @dest_ctx: packed destination context data 4431 * @ce_info: context element description 4432 */ 4433 static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, 4434 const struct ice_ctx_ele *ce_info) 4435 { 4436 u32 src_dword, mask; 4437 __le32 dest_dword; 4438 u8 *from, *dest; 4439 u16 shift_width; 4440 4441 /* copy from the next struct field */ 4442 from = src_ctx + ce_info->offset; 4443 4444 /* prepare the bits and mask */ 4445 shift_width = ce_info->lsb % 8; 4446 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); 4447 4448 /* don't swizzle the bits until after the mask because the mask bits 4449 * will be in a different bit position on big endian machines 4450 */ 4451 src_dword = *(u32 *)from; 4452 src_dword <<= shift_width; 4453 src_dword &= mask; 4454 4455 /* get the current bits from the target bit string */ 4456 dest = dest_ctx + (ce_info->lsb / 8); 4457 4458 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4459 4460 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4461 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4462 4463 /* put it all back */ 4464 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4465 } 4466 4467 /** 4468 * ice_pack_ctx_qword - write a qword to a packed context structure 4469 * @src_ctx: unpacked source context structure 4470 * @dest_ctx: packed destination context data 4471 * @ce_info: context element description 4472 */ 4473 static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, 4474 const struct ice_ctx_ele *ce_info) 4475 { 4476 u64 src_qword, mask; 4477 __le64 dest_qword; 4478 u8 *from, *dest; 4479 u16 shift_width; 4480 4481 /* copy from the next struct field */ 4482 from = src_ctx + ce_info->offset; 4483 4484 /* prepare the bits and mask */ 4485 shift_width = ce_info->lsb % 8; 4486 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); 4487 4488 /* don't swizzle the bits until after the mask because the mask bits 4489 * will be in a different bit position on big endian machines 4490 */ 4491 src_qword = *(u64 *)from; 4492 src_qword <<= shift_width; 4493 src_qword &= mask; 4494 4495 /* get the current bits from the target bit string */ 4496 dest = dest_ctx + (ce_info->lsb / 8); 4497 4498 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4499 4500 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4501 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4502 4503 /* put it all back */ 4504 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4505 } 4506 4507 /** 4508 * ice_set_ctx - set context bits in packed structure 4509 * @hw: pointer to the hardware structure 4510 * @src_ctx: pointer to a generic non-packed context structure 4511 * @dest_ctx: pointer to memory for the packed structure 4512 * @ce_info: List of Rx context elements 4513 */ 4514 int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4515 const struct ice_ctx_ele *ce_info) 4516 { 4517 int f; 4518 4519 for (f = 0; ce_info[f].width; f++) { 4520 /* We have to deal with each element of the FW response 4521 * using the correct size so that we are correct regardless 4522 * of the endianness of the machine. 4523 */ 4524 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4525 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4526 f, ce_info[f].width, ce_info[f].size_of); 4527 continue; 4528 } 4529 switch (ce_info[f].size_of) { 4530 case sizeof(u8): 4531 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); 4532 break; 4533 case sizeof(u16): 4534 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); 4535 break; 4536 case sizeof(u32): 4537 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); 4538 break; 4539 case sizeof(u64): 4540 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); 4541 break; 4542 default: 4543 return -EINVAL; 4544 } 4545 } 4546 4547 return 0; 4548 } 4549 4550 /** 4551 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4552 * @hw: pointer to the HW struct 4553 * @vsi_handle: software VSI handle 4554 * @tc: TC number 4555 * @q_handle: software queue handle 4556 */ 4557 struct ice_q_ctx * 4558 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4559 { 4560 struct ice_vsi_ctx *vsi; 4561 struct ice_q_ctx *q_ctx; 4562 4563 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4564 if (!vsi) 4565 return NULL; 4566 if (q_handle >= vsi->num_lan_q_entries[tc]) 4567 return NULL; 4568 if (!vsi->lan_q_ctx[tc]) 4569 return NULL; 4570 q_ctx = vsi->lan_q_ctx[tc]; 4571 return &q_ctx[q_handle]; 4572 } 4573 4574 /** 4575 * ice_ena_vsi_txq 4576 * @pi: port information structure 4577 * @vsi_handle: software VSI handle 4578 * @tc: TC number 4579 * @q_handle: software queue handle 4580 * @num_qgrps: Number of added queue groups 4581 * @buf: list of queue groups to be added 4582 * @buf_size: size of buffer for indirect command 4583 * @cd: pointer to command details structure or NULL 4584 * 4585 * This function adds one LAN queue 4586 */ 4587 int 4588 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4589 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4590 struct ice_sq_cd *cd) 4591 { 4592 struct ice_aqc_txsched_elem_data node = { 0 }; 4593 struct ice_sched_node *parent; 4594 struct ice_q_ctx *q_ctx; 4595 struct ice_hw *hw; 4596 int status; 4597 4598 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4599 return -EIO; 4600 4601 if (num_qgrps > 1 || buf->num_txqs > 1) 4602 return -ENOSPC; 4603 4604 hw = pi->hw; 4605 4606 if (!ice_is_vsi_valid(hw, vsi_handle)) 4607 return -EINVAL; 4608 4609 mutex_lock(&pi->sched_lock); 4610 4611 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4612 if (!q_ctx) { 4613 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4614 q_handle); 4615 status = -EINVAL; 4616 goto ena_txq_exit; 4617 } 4618 4619 /* find a parent node */ 4620 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4621 ICE_SCHED_NODE_OWNER_LAN); 4622 if (!parent) { 4623 status = -EINVAL; 4624 goto ena_txq_exit; 4625 } 4626 4627 buf->parent_teid = parent->info.node_teid; 4628 node.parent_teid = parent->info.node_teid; 4629 /* Mark that the values in the "generic" section as valid. The default 4630 * value in the "generic" section is zero. This means that : 4631 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4632 * - 0 priority among siblings, indicated by Bit 1-3. 4633 * - WFQ, indicated by Bit 4. 4634 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4635 * Bit 5-6. 4636 * - Bit 7 is reserved. 4637 * Without setting the generic section as valid in valid_sections, the 4638 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4639 */ 4640 buf->txqs[0].info.valid_sections = 4641 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4642 ICE_AQC_ELEM_VALID_EIR; 4643 buf->txqs[0].info.generic = 0; 4644 buf->txqs[0].info.cir_bw.bw_profile_idx = 4645 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4646 buf->txqs[0].info.cir_bw.bw_alloc = 4647 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4648 buf->txqs[0].info.eir_bw.bw_profile_idx = 4649 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4650 buf->txqs[0].info.eir_bw.bw_alloc = 4651 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4652 4653 /* add the LAN queue */ 4654 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4655 if (status) { 4656 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4657 le16_to_cpu(buf->txqs[0].txq_id), 4658 hw->adminq.sq_last_status); 4659 goto ena_txq_exit; 4660 } 4661 4662 node.node_teid = buf->txqs[0].q_teid; 4663 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4664 q_ctx->q_handle = q_handle; 4665 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4666 4667 /* add a leaf node into scheduler tree queue layer */ 4668 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4669 if (!status) 4670 status = ice_sched_replay_q_bw(pi, q_ctx); 4671 4672 ena_txq_exit: 4673 mutex_unlock(&pi->sched_lock); 4674 return status; 4675 } 4676 4677 /** 4678 * ice_dis_vsi_txq 4679 * @pi: port information structure 4680 * @vsi_handle: software VSI handle 4681 * @tc: TC number 4682 * @num_queues: number of queues 4683 * @q_handles: pointer to software queue handle array 4684 * @q_ids: pointer to the q_id array 4685 * @q_teids: pointer to queue node teids 4686 * @rst_src: if called due to reset, specifies the reset source 4687 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4688 * @cd: pointer to command details structure or NULL 4689 * 4690 * This function removes queues and their corresponding nodes in SW DB 4691 */ 4692 int 4693 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4694 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4695 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4696 struct ice_sq_cd *cd) 4697 { 4698 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4699 u16 i, buf_size = __struct_size(qg_list); 4700 struct ice_q_ctx *q_ctx; 4701 int status = -ENOENT; 4702 struct ice_hw *hw; 4703 4704 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4705 return -EIO; 4706 4707 hw = pi->hw; 4708 4709 if (!num_queues) { 4710 /* if queue is disabled already yet the disable queue command 4711 * has to be sent to complete the VF reset, then call 4712 * ice_aq_dis_lan_txq without any queue information 4713 */ 4714 if (rst_src) 4715 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4716 vmvf_num, NULL); 4717 return -EIO; 4718 } 4719 4720 mutex_lock(&pi->sched_lock); 4721 4722 for (i = 0; i < num_queues; i++) { 4723 struct ice_sched_node *node; 4724 4725 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4726 if (!node) 4727 continue; 4728 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4729 if (!q_ctx) { 4730 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4731 q_handles[i]); 4732 continue; 4733 } 4734 if (q_ctx->q_handle != q_handles[i]) { 4735 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4736 q_ctx->q_handle, q_handles[i]); 4737 continue; 4738 } 4739 qg_list->parent_teid = node->info.parent_teid; 4740 qg_list->num_qs = 1; 4741 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4742 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4743 vmvf_num, cd); 4744 4745 if (status) 4746 break; 4747 ice_free_sched_node(pi, node); 4748 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4749 q_ctx->q_teid = ICE_INVAL_TEID; 4750 } 4751 mutex_unlock(&pi->sched_lock); 4752 return status; 4753 } 4754 4755 /** 4756 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4757 * @pi: port information structure 4758 * @vsi_handle: software VSI handle 4759 * @tc_bitmap: TC bitmap 4760 * @maxqs: max queues array per TC 4761 * @owner: LAN or RDMA 4762 * 4763 * This function adds/updates the VSI queues per TC. 4764 */ 4765 static int 4766 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4767 u16 *maxqs, u8 owner) 4768 { 4769 int status = 0; 4770 u8 i; 4771 4772 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4773 return -EIO; 4774 4775 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4776 return -EINVAL; 4777 4778 mutex_lock(&pi->sched_lock); 4779 4780 ice_for_each_traffic_class(i) { 4781 /* configuration is possible only if TC node is present */ 4782 if (!ice_sched_get_tc_node(pi, i)) 4783 continue; 4784 4785 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4786 ice_is_tc_ena(tc_bitmap, i)); 4787 if (status) 4788 break; 4789 } 4790 4791 mutex_unlock(&pi->sched_lock); 4792 return status; 4793 } 4794 4795 /** 4796 * ice_cfg_vsi_lan - configure VSI LAN queues 4797 * @pi: port information structure 4798 * @vsi_handle: software VSI handle 4799 * @tc_bitmap: TC bitmap 4800 * @max_lanqs: max LAN queues array per TC 4801 * 4802 * This function adds/updates the VSI LAN queues per TC. 4803 */ 4804 int 4805 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4806 u16 *max_lanqs) 4807 { 4808 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4809 ICE_SCHED_NODE_OWNER_LAN); 4810 } 4811 4812 /** 4813 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4814 * @pi: port information structure 4815 * @vsi_handle: software VSI handle 4816 * @tc_bitmap: TC bitmap 4817 * @max_rdmaqs: max RDMA queues array per TC 4818 * 4819 * This function adds/updates the VSI RDMA queues per TC. 4820 */ 4821 int 4822 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4823 u16 *max_rdmaqs) 4824 { 4825 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4826 ICE_SCHED_NODE_OWNER_RDMA); 4827 } 4828 4829 /** 4830 * ice_ena_vsi_rdma_qset 4831 * @pi: port information structure 4832 * @vsi_handle: software VSI handle 4833 * @tc: TC number 4834 * @rdma_qset: pointer to RDMA Qset 4835 * @num_qsets: number of RDMA Qsets 4836 * @qset_teid: pointer to Qset node TEIDs 4837 * 4838 * This function adds RDMA Qset 4839 */ 4840 int 4841 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4842 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4843 { 4844 struct ice_aqc_txsched_elem_data node = { 0 }; 4845 struct ice_aqc_add_rdma_qset_data *buf; 4846 struct ice_sched_node *parent; 4847 struct ice_hw *hw; 4848 u16 i, buf_size; 4849 int ret; 4850 4851 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4852 return -EIO; 4853 hw = pi->hw; 4854 4855 if (!ice_is_vsi_valid(hw, vsi_handle)) 4856 return -EINVAL; 4857 4858 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4859 buf = kzalloc(buf_size, GFP_KERNEL); 4860 if (!buf) 4861 return -ENOMEM; 4862 mutex_lock(&pi->sched_lock); 4863 4864 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4865 ICE_SCHED_NODE_OWNER_RDMA); 4866 if (!parent) { 4867 ret = -EINVAL; 4868 goto rdma_error_exit; 4869 } 4870 buf->parent_teid = parent->info.node_teid; 4871 node.parent_teid = parent->info.node_teid; 4872 4873 buf->num_qsets = cpu_to_le16(num_qsets); 4874 for (i = 0; i < num_qsets; i++) { 4875 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4876 buf->rdma_qsets[i].info.valid_sections = 4877 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4878 ICE_AQC_ELEM_VALID_EIR; 4879 buf->rdma_qsets[i].info.generic = 0; 4880 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4881 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4882 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4883 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4884 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4885 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4886 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4887 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4888 } 4889 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4890 if (ret) { 4891 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4892 goto rdma_error_exit; 4893 } 4894 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4895 for (i = 0; i < num_qsets; i++) { 4896 node.node_teid = buf->rdma_qsets[i].qset_teid; 4897 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4898 &node, NULL); 4899 if (ret) 4900 break; 4901 qset_teid[i] = le32_to_cpu(node.node_teid); 4902 } 4903 rdma_error_exit: 4904 mutex_unlock(&pi->sched_lock); 4905 kfree(buf); 4906 return ret; 4907 } 4908 4909 /** 4910 * ice_dis_vsi_rdma_qset - free RDMA resources 4911 * @pi: port_info struct 4912 * @count: number of RDMA Qsets to free 4913 * @qset_teid: TEID of Qset node 4914 * @q_id: list of queue IDs being disabled 4915 */ 4916 int 4917 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4918 u16 *q_id) 4919 { 4920 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4921 u16 qg_size = __struct_size(qg_list); 4922 struct ice_hw *hw; 4923 int status = 0; 4924 int i; 4925 4926 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4927 return -EIO; 4928 4929 hw = pi->hw; 4930 4931 mutex_lock(&pi->sched_lock); 4932 4933 for (i = 0; i < count; i++) { 4934 struct ice_sched_node *node; 4935 4936 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4937 if (!node) 4938 continue; 4939 4940 qg_list->parent_teid = node->info.parent_teid; 4941 qg_list->num_qs = 1; 4942 qg_list->q_id[0] = 4943 cpu_to_le16(q_id[i] | 4944 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4945 4946 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4947 ICE_NO_RESET, 0, NULL); 4948 if (status) 4949 break; 4950 4951 ice_free_sched_node(pi, node); 4952 } 4953 4954 mutex_unlock(&pi->sched_lock); 4955 return status; 4956 } 4957 4958 /** 4959 * ice_aq_get_cgu_abilities - get cgu abilities 4960 * @hw: pointer to the HW struct 4961 * @abilities: CGU abilities 4962 * 4963 * Get CGU abilities (0x0C61) 4964 * Return: 0 on success or negative value on failure. 4965 */ 4966 int 4967 ice_aq_get_cgu_abilities(struct ice_hw *hw, 4968 struct ice_aqc_get_cgu_abilities *abilities) 4969 { 4970 struct ice_aq_desc desc; 4971 4972 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 4973 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 4974 } 4975 4976 /** 4977 * ice_aq_set_input_pin_cfg - set input pin config 4978 * @hw: pointer to the HW struct 4979 * @input_idx: Input index 4980 * @flags1: Input flags 4981 * @flags2: Input flags 4982 * @freq: Frequency in Hz 4983 * @phase_delay: Delay in ps 4984 * 4985 * Set CGU input config (0x0C62) 4986 * Return: 0 on success or negative value on failure. 4987 */ 4988 int 4989 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 4990 u32 freq, s32 phase_delay) 4991 { 4992 struct ice_aqc_set_cgu_input_config *cmd; 4993 struct ice_aq_desc desc; 4994 4995 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 4996 cmd = &desc.params.set_cgu_input_config; 4997 cmd->input_idx = input_idx; 4998 cmd->flags1 = flags1; 4999 cmd->flags2 = flags2; 5000 cmd->freq = cpu_to_le32(freq); 5001 cmd->phase_delay = cpu_to_le32(phase_delay); 5002 5003 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5004 } 5005 5006 /** 5007 * ice_aq_get_input_pin_cfg - get input pin config 5008 * @hw: pointer to the HW struct 5009 * @input_idx: Input index 5010 * @status: Pin status 5011 * @type: Pin type 5012 * @flags1: Input flags 5013 * @flags2: Input flags 5014 * @freq: Frequency in Hz 5015 * @phase_delay: Delay in ps 5016 * 5017 * Get CGU input config (0x0C63) 5018 * Return: 0 on success or negative value on failure. 5019 */ 5020 int 5021 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5022 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5023 { 5024 struct ice_aqc_get_cgu_input_config *cmd; 5025 struct ice_aq_desc desc; 5026 int ret; 5027 5028 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5029 cmd = &desc.params.get_cgu_input_config; 5030 cmd->input_idx = input_idx; 5031 5032 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5033 if (!ret) { 5034 if (status) 5035 *status = cmd->status; 5036 if (type) 5037 *type = cmd->type; 5038 if (flags1) 5039 *flags1 = cmd->flags1; 5040 if (flags2) 5041 *flags2 = cmd->flags2; 5042 if (freq) 5043 *freq = le32_to_cpu(cmd->freq); 5044 if (phase_delay) 5045 *phase_delay = le32_to_cpu(cmd->phase_delay); 5046 } 5047 5048 return ret; 5049 } 5050 5051 /** 5052 * ice_aq_set_output_pin_cfg - set output pin config 5053 * @hw: pointer to the HW struct 5054 * @output_idx: Output index 5055 * @flags: Output flags 5056 * @src_sel: Index of DPLL block 5057 * @freq: Output frequency 5058 * @phase_delay: Output phase compensation 5059 * 5060 * Set CGU output config (0x0C64) 5061 * Return: 0 on success or negative value on failure. 5062 */ 5063 int 5064 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5065 u8 src_sel, u32 freq, s32 phase_delay) 5066 { 5067 struct ice_aqc_set_cgu_output_config *cmd; 5068 struct ice_aq_desc desc; 5069 5070 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5071 cmd = &desc.params.set_cgu_output_config; 5072 cmd->output_idx = output_idx; 5073 cmd->flags = flags; 5074 cmd->src_sel = src_sel; 5075 cmd->freq = cpu_to_le32(freq); 5076 cmd->phase_delay = cpu_to_le32(phase_delay); 5077 5078 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5079 } 5080 5081 /** 5082 * ice_aq_get_output_pin_cfg - get output pin config 5083 * @hw: pointer to the HW struct 5084 * @output_idx: Output index 5085 * @flags: Output flags 5086 * @src_sel: Internal DPLL source 5087 * @freq: Output frequency 5088 * @src_freq: Source frequency 5089 * 5090 * Get CGU output config (0x0C65) 5091 * Return: 0 on success or negative value on failure. 5092 */ 5093 int 5094 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5095 u8 *src_sel, u32 *freq, u32 *src_freq) 5096 { 5097 struct ice_aqc_get_cgu_output_config *cmd; 5098 struct ice_aq_desc desc; 5099 int ret; 5100 5101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5102 cmd = &desc.params.get_cgu_output_config; 5103 cmd->output_idx = output_idx; 5104 5105 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5106 if (!ret) { 5107 if (flags) 5108 *flags = cmd->flags; 5109 if (src_sel) 5110 *src_sel = cmd->src_sel; 5111 if (freq) 5112 *freq = le32_to_cpu(cmd->freq); 5113 if (src_freq) 5114 *src_freq = le32_to_cpu(cmd->src_freq); 5115 } 5116 5117 return ret; 5118 } 5119 5120 /** 5121 * ice_aq_get_cgu_dpll_status - get dpll status 5122 * @hw: pointer to the HW struct 5123 * @dpll_num: DPLL index 5124 * @ref_state: Reference clock state 5125 * @config: current DPLL config 5126 * @dpll_state: current DPLL state 5127 * @phase_offset: Phase offset in ns 5128 * @eec_mode: EEC_mode 5129 * 5130 * Get CGU DPLL status (0x0C66) 5131 * Return: 0 on success or negative value on failure. 5132 */ 5133 int 5134 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5135 u8 *dpll_state, u8 *config, s64 *phase_offset, 5136 u8 *eec_mode) 5137 { 5138 struct ice_aqc_get_cgu_dpll_status *cmd; 5139 struct ice_aq_desc desc; 5140 int status; 5141 5142 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5143 cmd = &desc.params.get_cgu_dpll_status; 5144 cmd->dpll_num = dpll_num; 5145 5146 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5147 if (!status) { 5148 *ref_state = cmd->ref_state; 5149 *dpll_state = cmd->dpll_state; 5150 *config = cmd->config; 5151 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5152 *phase_offset <<= 32; 5153 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5154 *phase_offset = sign_extend64(*phase_offset, 47); 5155 *eec_mode = cmd->eec_mode; 5156 } 5157 5158 return status; 5159 } 5160 5161 /** 5162 * ice_aq_set_cgu_dpll_config - set dpll config 5163 * @hw: pointer to the HW struct 5164 * @dpll_num: DPLL index 5165 * @ref_state: Reference clock state 5166 * @config: DPLL config 5167 * @eec_mode: EEC mode 5168 * 5169 * Set CGU DPLL config (0x0C67) 5170 * Return: 0 on success or negative value on failure. 5171 */ 5172 int 5173 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5174 u8 config, u8 eec_mode) 5175 { 5176 struct ice_aqc_set_cgu_dpll_config *cmd; 5177 struct ice_aq_desc desc; 5178 5179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5180 cmd = &desc.params.set_cgu_dpll_config; 5181 cmd->dpll_num = dpll_num; 5182 cmd->ref_state = ref_state; 5183 cmd->config = config; 5184 cmd->eec_mode = eec_mode; 5185 5186 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5187 } 5188 5189 /** 5190 * ice_aq_set_cgu_ref_prio - set input reference priority 5191 * @hw: pointer to the HW struct 5192 * @dpll_num: DPLL index 5193 * @ref_idx: Reference pin index 5194 * @ref_priority: Reference input priority 5195 * 5196 * Set CGU reference priority (0x0C68) 5197 * Return: 0 on success or negative value on failure. 5198 */ 5199 int 5200 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5201 u8 ref_priority) 5202 { 5203 struct ice_aqc_set_cgu_ref_prio *cmd; 5204 struct ice_aq_desc desc; 5205 5206 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5207 cmd = &desc.params.set_cgu_ref_prio; 5208 cmd->dpll_num = dpll_num; 5209 cmd->ref_idx = ref_idx; 5210 cmd->ref_priority = ref_priority; 5211 5212 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5213 } 5214 5215 /** 5216 * ice_aq_get_cgu_ref_prio - get input reference priority 5217 * @hw: pointer to the HW struct 5218 * @dpll_num: DPLL index 5219 * @ref_idx: Reference pin index 5220 * @ref_prio: Reference input priority 5221 * 5222 * Get CGU reference priority (0x0C69) 5223 * Return: 0 on success or negative value on failure. 5224 */ 5225 int 5226 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5227 u8 *ref_prio) 5228 { 5229 struct ice_aqc_get_cgu_ref_prio *cmd; 5230 struct ice_aq_desc desc; 5231 int status; 5232 5233 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5234 cmd = &desc.params.get_cgu_ref_prio; 5235 cmd->dpll_num = dpll_num; 5236 cmd->ref_idx = ref_idx; 5237 5238 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5239 if (!status) 5240 *ref_prio = cmd->ref_priority; 5241 5242 return status; 5243 } 5244 5245 /** 5246 * ice_aq_get_cgu_info - get cgu info 5247 * @hw: pointer to the HW struct 5248 * @cgu_id: CGU ID 5249 * @cgu_cfg_ver: CGU config version 5250 * @cgu_fw_ver: CGU firmware version 5251 * 5252 * Get CGU info (0x0C6A) 5253 * Return: 0 on success or negative value on failure. 5254 */ 5255 int 5256 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5257 u32 *cgu_fw_ver) 5258 { 5259 struct ice_aqc_get_cgu_info *cmd; 5260 struct ice_aq_desc desc; 5261 int status; 5262 5263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5264 cmd = &desc.params.get_cgu_info; 5265 5266 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5267 if (!status) { 5268 *cgu_id = le32_to_cpu(cmd->cgu_id); 5269 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5270 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5271 } 5272 5273 return status; 5274 } 5275 5276 /** 5277 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5278 * @hw: pointer to the HW struct 5279 * @phy_output: PHY reference clock output pin 5280 * @enable: GPIO state to be applied 5281 * @freq: PHY output frequency 5282 * 5283 * Set phy recovered clock as reference (0x0630) 5284 * Return: 0 on success or negative value on failure. 5285 */ 5286 int 5287 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5288 u32 *freq) 5289 { 5290 struct ice_aqc_set_phy_rec_clk_out *cmd; 5291 struct ice_aq_desc desc; 5292 int status; 5293 5294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5295 cmd = &desc.params.set_phy_rec_clk_out; 5296 cmd->phy_output = phy_output; 5297 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5298 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5299 cmd->freq = cpu_to_le32(*freq); 5300 5301 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5302 if (!status) 5303 *freq = le32_to_cpu(cmd->freq); 5304 5305 return status; 5306 } 5307 5308 /** 5309 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5310 * @hw: pointer to the HW struct 5311 * @phy_output: PHY reference clock output pin 5312 * @port_num: Port number 5313 * @flags: PHY flags 5314 * @node_handle: PHY output frequency 5315 * 5316 * Get PHY recovered clock output info (0x0631) 5317 * Return: 0 on success or negative value on failure. 5318 */ 5319 int 5320 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5321 u8 *flags, u16 *node_handle) 5322 { 5323 struct ice_aqc_get_phy_rec_clk_out *cmd; 5324 struct ice_aq_desc desc; 5325 int status; 5326 5327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5328 cmd = &desc.params.get_phy_rec_clk_out; 5329 cmd->phy_output = *phy_output; 5330 5331 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5332 if (!status) { 5333 *phy_output = cmd->phy_output; 5334 if (port_num) 5335 *port_num = cmd->port_num; 5336 if (flags) 5337 *flags = cmd->flags; 5338 if (node_handle) 5339 *node_handle = le16_to_cpu(cmd->node_handle); 5340 } 5341 5342 return status; 5343 } 5344 5345 /** 5346 * ice_aq_get_sensor_reading 5347 * @hw: pointer to the HW struct 5348 * @data: pointer to data to be read from the sensor 5349 * 5350 * Get sensor reading (0x0632) 5351 */ 5352 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5353 struct ice_aqc_get_sensor_reading_resp *data) 5354 { 5355 struct ice_aqc_get_sensor_reading *cmd; 5356 struct ice_aq_desc desc; 5357 int status; 5358 5359 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5360 cmd = &desc.params.get_sensor_reading; 5361 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5362 #define ICE_INTERNAL_TEMP_SENSOR 0 5363 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5364 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5365 5366 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5367 if (!status) 5368 memcpy(data, &desc.params.get_sensor_reading_resp, 5369 sizeof(*data)); 5370 5371 return status; 5372 } 5373 5374 /** 5375 * ice_replay_pre_init - replay pre initialization 5376 * @hw: pointer to the HW struct 5377 * 5378 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5379 */ 5380 static int ice_replay_pre_init(struct ice_hw *hw) 5381 { 5382 struct ice_switch_info *sw = hw->switch_info; 5383 u8 i; 5384 5385 /* Delete old entries from replay filter list head if there is any */ 5386 ice_rm_all_sw_replay_rule_info(hw); 5387 /* In start of replay, move entries into replay_rules list, it 5388 * will allow adding rules entries back to filt_rules list, 5389 * which is operational list. 5390 */ 5391 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5392 list_replace_init(&sw->recp_list[i].filt_rules, 5393 &sw->recp_list[i].filt_replay_rules); 5394 ice_sched_replay_agg_vsi_preinit(hw); 5395 5396 return 0; 5397 } 5398 5399 /** 5400 * ice_replay_vsi - replay VSI configuration 5401 * @hw: pointer to the HW struct 5402 * @vsi_handle: driver VSI handle 5403 * 5404 * Restore all VSI configuration after reset. It is required to call this 5405 * function with main VSI first. 5406 */ 5407 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5408 { 5409 int status; 5410 5411 if (!ice_is_vsi_valid(hw, vsi_handle)) 5412 return -EINVAL; 5413 5414 /* Replay pre-initialization if there is any */ 5415 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5416 status = ice_replay_pre_init(hw); 5417 if (status) 5418 return status; 5419 } 5420 /* Replay per VSI all RSS configurations */ 5421 status = ice_replay_rss_cfg(hw, vsi_handle); 5422 if (status) 5423 return status; 5424 /* Replay per VSI all filters */ 5425 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5426 if (!status) 5427 status = ice_replay_vsi_agg(hw, vsi_handle); 5428 return status; 5429 } 5430 5431 /** 5432 * ice_replay_post - post replay configuration cleanup 5433 * @hw: pointer to the HW struct 5434 * 5435 * Post replay cleanup. 5436 */ 5437 void ice_replay_post(struct ice_hw *hw) 5438 { 5439 /* Delete old entries from replay filter list head */ 5440 ice_rm_all_sw_replay_rule_info(hw); 5441 ice_sched_replay_agg(hw); 5442 } 5443 5444 /** 5445 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5446 * @hw: ptr to the hardware info 5447 * @reg: offset of 64 bit HW register to read from 5448 * @prev_stat_loaded: bool to specify if previous stats are loaded 5449 * @prev_stat: ptr to previous loaded stat value 5450 * @cur_stat: ptr to current stat value 5451 */ 5452 void 5453 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5454 u64 *prev_stat, u64 *cur_stat) 5455 { 5456 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5457 5458 /* device stats are not reset at PFR, they likely will not be zeroed 5459 * when the driver starts. Thus, save the value from the first read 5460 * without adding to the statistic value so that we report stats which 5461 * count up from zero. 5462 */ 5463 if (!prev_stat_loaded) { 5464 *prev_stat = new_data; 5465 return; 5466 } 5467 5468 /* Calculate the difference between the new and old values, and then 5469 * add it to the software stat value. 5470 */ 5471 if (new_data >= *prev_stat) 5472 *cur_stat += new_data - *prev_stat; 5473 else 5474 /* to manage the potential roll-over */ 5475 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5476 5477 /* Update the previously stored value to prepare for next read */ 5478 *prev_stat = new_data; 5479 } 5480 5481 /** 5482 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5483 * @hw: ptr to the hardware info 5484 * @reg: offset of HW register to read from 5485 * @prev_stat_loaded: bool to specify if previous stats are loaded 5486 * @prev_stat: ptr to previous loaded stat value 5487 * @cur_stat: ptr to current stat value 5488 */ 5489 void 5490 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5491 u64 *prev_stat, u64 *cur_stat) 5492 { 5493 u32 new_data; 5494 5495 new_data = rd32(hw, reg); 5496 5497 /* device stats are not reset at PFR, they likely will not be zeroed 5498 * when the driver starts. Thus, save the value from the first read 5499 * without adding to the statistic value so that we report stats which 5500 * count up from zero. 5501 */ 5502 if (!prev_stat_loaded) { 5503 *prev_stat = new_data; 5504 return; 5505 } 5506 5507 /* Calculate the difference between the new and old values, and then 5508 * add it to the software stat value. 5509 */ 5510 if (new_data >= *prev_stat) 5511 *cur_stat += new_data - *prev_stat; 5512 else 5513 /* to manage the potential roll-over */ 5514 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5515 5516 /* Update the previously stored value to prepare for next read */ 5517 *prev_stat = new_data; 5518 } 5519 5520 /** 5521 * ice_sched_query_elem - query element information from HW 5522 * @hw: pointer to the HW struct 5523 * @node_teid: node TEID to be queried 5524 * @buf: buffer to element information 5525 * 5526 * This function queries HW element information 5527 */ 5528 int 5529 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5530 struct ice_aqc_txsched_elem_data *buf) 5531 { 5532 u16 buf_size, num_elem_ret = 0; 5533 int status; 5534 5535 buf_size = sizeof(*buf); 5536 memset(buf, 0, buf_size); 5537 buf->node_teid = cpu_to_le32(node_teid); 5538 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5539 NULL); 5540 if (status || num_elem_ret != 1) 5541 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5542 return status; 5543 } 5544 5545 /** 5546 * ice_aq_read_i2c 5547 * @hw: pointer to the hw struct 5548 * @topo_addr: topology address for a device to communicate with 5549 * @bus_addr: 7-bit I2C bus address 5550 * @addr: I2C memory address (I2C offset) with up to 16 bits 5551 * @params: I2C parameters: bit [7] - Repeated start, 5552 * bits [6:5] data offset size, 5553 * bit [4] - I2C address type, 5554 * bits [3:0] - data size to read (0-16 bytes) 5555 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5556 * @cd: pointer to command details structure or NULL 5557 * 5558 * Read I2C (0x06E2) 5559 */ 5560 int 5561 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5562 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5563 struct ice_sq_cd *cd) 5564 { 5565 struct ice_aq_desc desc = { 0 }; 5566 struct ice_aqc_i2c *cmd; 5567 u8 data_size; 5568 int status; 5569 5570 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5571 cmd = &desc.params.read_write_i2c; 5572 5573 if (!data) 5574 return -EINVAL; 5575 5576 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5577 5578 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5579 cmd->topo_addr = topo_addr; 5580 cmd->i2c_params = params; 5581 cmd->i2c_addr = addr; 5582 5583 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5584 if (!status) { 5585 struct ice_aqc_read_i2c_resp *resp; 5586 u8 i; 5587 5588 resp = &desc.params.read_i2c_resp; 5589 for (i = 0; i < data_size; i++) { 5590 *data = resp->i2c_data[i]; 5591 data++; 5592 } 5593 } 5594 5595 return status; 5596 } 5597 5598 /** 5599 * ice_aq_write_i2c 5600 * @hw: pointer to the hw struct 5601 * @topo_addr: topology address for a device to communicate with 5602 * @bus_addr: 7-bit I2C bus address 5603 * @addr: I2C memory address (I2C offset) with up to 16 bits 5604 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5605 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5606 * @cd: pointer to command details structure or NULL 5607 * 5608 * Write I2C (0x06E3) 5609 * 5610 * * Return: 5611 * * 0 - Successful write to the i2c device 5612 * * -EINVAL - Data size greater than 4 bytes 5613 * * -EIO - FW error 5614 */ 5615 int 5616 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5617 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5618 struct ice_sq_cd *cd) 5619 { 5620 struct ice_aq_desc desc = { 0 }; 5621 struct ice_aqc_i2c *cmd; 5622 u8 data_size; 5623 5624 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5625 cmd = &desc.params.read_write_i2c; 5626 5627 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5628 5629 /* data_size limited to 4 */ 5630 if (data_size > 4) 5631 return -EINVAL; 5632 5633 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5634 cmd->topo_addr = topo_addr; 5635 cmd->i2c_params = params; 5636 cmd->i2c_addr = addr; 5637 5638 memcpy(cmd->i2c_data, data, data_size); 5639 5640 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5641 } 5642 5643 /** 5644 * ice_aq_set_gpio 5645 * @hw: pointer to the hw struct 5646 * @gpio_ctrl_handle: GPIO controller node handle 5647 * @pin_idx: IO Number of the GPIO that needs to be set 5648 * @value: SW provide IO value to set in the LSB 5649 * @cd: pointer to command details structure or NULL 5650 * 5651 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5652 */ 5653 int 5654 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5655 struct ice_sq_cd *cd) 5656 { 5657 struct ice_aqc_gpio *cmd; 5658 struct ice_aq_desc desc; 5659 5660 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5661 cmd = &desc.params.read_write_gpio; 5662 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5663 cmd->gpio_num = pin_idx; 5664 cmd->gpio_val = value ? 1 : 0; 5665 5666 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5667 } 5668 5669 /** 5670 * ice_aq_get_gpio 5671 * @hw: pointer to the hw struct 5672 * @gpio_ctrl_handle: GPIO controller node handle 5673 * @pin_idx: IO Number of the GPIO that needs to be set 5674 * @value: IO value read 5675 * @cd: pointer to command details structure or NULL 5676 * 5677 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5678 * the topology 5679 */ 5680 int 5681 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5682 bool *value, struct ice_sq_cd *cd) 5683 { 5684 struct ice_aqc_gpio *cmd; 5685 struct ice_aq_desc desc; 5686 int status; 5687 5688 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5689 cmd = &desc.params.read_write_gpio; 5690 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5691 cmd->gpio_num = pin_idx; 5692 5693 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5694 if (status) 5695 return status; 5696 5697 *value = !!cmd->gpio_val; 5698 return 0; 5699 } 5700 5701 /** 5702 * ice_is_fw_api_min_ver 5703 * @hw: pointer to the hardware structure 5704 * @maj: major version 5705 * @min: minor version 5706 * @patch: patch version 5707 * 5708 * Checks if the firmware API is minimum version 5709 */ 5710 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5711 { 5712 if (hw->api_maj_ver == maj) { 5713 if (hw->api_min_ver > min) 5714 return true; 5715 if (hw->api_min_ver == min && hw->api_patch >= patch) 5716 return true; 5717 } else if (hw->api_maj_ver > maj) { 5718 return true; 5719 } 5720 5721 return false; 5722 } 5723 5724 /** 5725 * ice_fw_supports_link_override 5726 * @hw: pointer to the hardware structure 5727 * 5728 * Checks if the firmware supports link override 5729 */ 5730 bool ice_fw_supports_link_override(struct ice_hw *hw) 5731 { 5732 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5733 ICE_FW_API_LINK_OVERRIDE_MIN, 5734 ICE_FW_API_LINK_OVERRIDE_PATCH); 5735 } 5736 5737 /** 5738 * ice_get_link_default_override 5739 * @ldo: pointer to the link default override struct 5740 * @pi: pointer to the port info struct 5741 * 5742 * Gets the link default override for a port 5743 */ 5744 int 5745 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5746 struct ice_port_info *pi) 5747 { 5748 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5749 struct ice_hw *hw = pi->hw; 5750 int status; 5751 5752 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5753 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5754 if (status) { 5755 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5756 return status; 5757 } 5758 5759 /* Each port has its own config; calculate for our port */ 5760 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5761 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5762 5763 /* link options first */ 5764 status = ice_read_sr_word(hw, tlv_start, &buf); 5765 if (status) { 5766 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5767 return status; 5768 } 5769 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5770 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5771 ICE_LINK_OVERRIDE_PHY_CFG_S; 5772 5773 /* link PHY config */ 5774 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5775 status = ice_read_sr_word(hw, offset, &buf); 5776 if (status) { 5777 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5778 return status; 5779 } 5780 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5781 5782 /* PHY types low */ 5783 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5784 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5785 status = ice_read_sr_word(hw, (offset + i), &buf); 5786 if (status) { 5787 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5788 return status; 5789 } 5790 /* shift 16 bits at a time to fill 64 bits */ 5791 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5792 } 5793 5794 /* PHY types high */ 5795 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5796 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5797 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5798 status = ice_read_sr_word(hw, (offset + i), &buf); 5799 if (status) { 5800 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5801 return status; 5802 } 5803 /* shift 16 bits at a time to fill 64 bits */ 5804 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5805 } 5806 5807 return status; 5808 } 5809 5810 /** 5811 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5812 * @caps: get PHY capability data 5813 */ 5814 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5815 { 5816 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5817 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5818 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5819 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5820 return true; 5821 5822 return false; 5823 } 5824 5825 /** 5826 * ice_aq_set_lldp_mib - Set the LLDP MIB 5827 * @hw: pointer to the HW struct 5828 * @mib_type: Local, Remote or both Local and Remote MIBs 5829 * @buf: pointer to the caller-supplied buffer to store the MIB block 5830 * @buf_size: size of the buffer (in bytes) 5831 * @cd: pointer to command details structure or NULL 5832 * 5833 * Set the LLDP MIB. (0x0A08) 5834 */ 5835 int 5836 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5837 struct ice_sq_cd *cd) 5838 { 5839 struct ice_aqc_lldp_set_local_mib *cmd; 5840 struct ice_aq_desc desc; 5841 5842 cmd = &desc.params.lldp_set_mib; 5843 5844 if (buf_size == 0 || !buf) 5845 return -EINVAL; 5846 5847 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5848 5849 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5850 desc.datalen = cpu_to_le16(buf_size); 5851 5852 cmd->type = mib_type; 5853 cmd->length = cpu_to_le16(buf_size); 5854 5855 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5856 } 5857 5858 /** 5859 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5860 * @hw: pointer to HW struct 5861 */ 5862 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5863 { 5864 if (hw->mac_type != ICE_MAC_E810) 5865 return false; 5866 5867 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5868 ICE_FW_API_LLDP_FLTR_MIN, 5869 ICE_FW_API_LLDP_FLTR_PATCH); 5870 } 5871 5872 /** 5873 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5874 * @hw: pointer to HW struct 5875 * @vsi_num: absolute HW index for VSI 5876 * @add: boolean for if adding or removing a filter 5877 */ 5878 int 5879 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5880 { 5881 struct ice_aqc_lldp_filter_ctrl *cmd; 5882 struct ice_aq_desc desc; 5883 5884 cmd = &desc.params.lldp_filter_ctrl; 5885 5886 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5887 5888 if (add) 5889 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5890 else 5891 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5892 5893 cmd->vsi_num = cpu_to_le16(vsi_num); 5894 5895 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5896 } 5897 5898 /** 5899 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5900 * @hw: pointer to HW struct 5901 */ 5902 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5903 { 5904 struct ice_aq_desc desc; 5905 5906 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5907 5908 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5909 } 5910 5911 /** 5912 * ice_fw_supports_report_dflt_cfg 5913 * @hw: pointer to the hardware structure 5914 * 5915 * Checks if the firmware supports report default configuration 5916 */ 5917 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5918 { 5919 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5920 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5921 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5922 } 5923 5924 /* each of the indexes into the following array match the speed of a return 5925 * value from the list of AQ returned speeds like the range: 5926 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5927 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5928 * array. The array is defined as 15 elements long because the link_speed 5929 * returned by the firmware is a 16 bit * value, but is indexed 5930 * by [fls(speed) - 1] 5931 */ 5932 static const u32 ice_aq_to_link_speed[] = { 5933 SPEED_10, /* BIT(0) */ 5934 SPEED_100, 5935 SPEED_1000, 5936 SPEED_2500, 5937 SPEED_5000, 5938 SPEED_10000, 5939 SPEED_20000, 5940 SPEED_25000, 5941 SPEED_40000, 5942 SPEED_50000, 5943 SPEED_100000, /* BIT(10) */ 5944 SPEED_200000, 5945 }; 5946 5947 /** 5948 * ice_get_link_speed - get integer speed from table 5949 * @index: array index from fls(aq speed) - 1 5950 * 5951 * Returns: u32 value containing integer speed 5952 */ 5953 u32 ice_get_link_speed(u16 index) 5954 { 5955 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5956 return 0; 5957 5958 return ice_aq_to_link_speed[index]; 5959 } 5960