1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E830_BACKPLANE: 158 case ICE_DEV_ID_E830_QSFP56: 159 case ICE_DEV_ID_E830_SFP: 160 case ICE_DEV_ID_E830_SFP_DD: 161 hw->mac_type = ICE_MAC_E830; 162 break; 163 default: 164 hw->mac_type = ICE_MAC_UNKNOWN; 165 break; 166 } 167 168 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 169 return 0; 170 } 171 172 /** 173 * ice_is_e810 174 * @hw: pointer to the hardware structure 175 * 176 * returns true if the device is E810 based, false if not. 177 */ 178 bool ice_is_e810(struct ice_hw *hw) 179 { 180 return hw->mac_type == ICE_MAC_E810; 181 } 182 183 /** 184 * ice_is_e810t 185 * @hw: pointer to the hardware structure 186 * 187 * returns true if the device is E810T based, false if not. 188 */ 189 bool ice_is_e810t(struct ice_hw *hw) 190 { 191 switch (hw->device_id) { 192 case ICE_DEV_ID_E810C_SFP: 193 switch (hw->subsystem_device_id) { 194 case ICE_SUBDEV_ID_E810T: 195 case ICE_SUBDEV_ID_E810T2: 196 case ICE_SUBDEV_ID_E810T3: 197 case ICE_SUBDEV_ID_E810T4: 198 case ICE_SUBDEV_ID_E810T6: 199 case ICE_SUBDEV_ID_E810T7: 200 return true; 201 } 202 break; 203 case ICE_DEV_ID_E810C_QSFP: 204 switch (hw->subsystem_device_id) { 205 case ICE_SUBDEV_ID_E810T2: 206 case ICE_SUBDEV_ID_E810T3: 207 case ICE_SUBDEV_ID_E810T5: 208 return true; 209 } 210 break; 211 default: 212 break; 213 } 214 215 return false; 216 } 217 218 /** 219 * ice_is_e823 220 * @hw: pointer to the hardware structure 221 * 222 * returns true if the device is E823-L or E823-C based, false if not. 223 */ 224 bool ice_is_e823(struct ice_hw *hw) 225 { 226 switch (hw->device_id) { 227 case ICE_DEV_ID_E823L_BACKPLANE: 228 case ICE_DEV_ID_E823L_SFP: 229 case ICE_DEV_ID_E823L_10G_BASE_T: 230 case ICE_DEV_ID_E823L_1GBE: 231 case ICE_DEV_ID_E823L_QSFP: 232 case ICE_DEV_ID_E823C_BACKPLANE: 233 case ICE_DEV_ID_E823C_QSFP: 234 case ICE_DEV_ID_E823C_SFP: 235 case ICE_DEV_ID_E823C_10G_BASE_T: 236 case ICE_DEV_ID_E823C_SGMII: 237 return true; 238 default: 239 return false; 240 } 241 } 242 243 /** 244 * ice_clear_pf_cfg - Clear PF configuration 245 * @hw: pointer to the hardware structure 246 * 247 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 248 * configuration, flow director filters, etc.). 249 */ 250 int ice_clear_pf_cfg(struct ice_hw *hw) 251 { 252 struct ice_aq_desc desc; 253 254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 255 256 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 257 } 258 259 /** 260 * ice_aq_manage_mac_read - manage MAC address read command 261 * @hw: pointer to the HW struct 262 * @buf: a virtual buffer to hold the manage MAC read response 263 * @buf_size: Size of the virtual buffer 264 * @cd: pointer to command details structure or NULL 265 * 266 * This function is used to return per PF station MAC address (0x0107). 267 * NOTE: Upon successful completion of this command, MAC address information 268 * is returned in user specified buffer. Please interpret user specified 269 * buffer as "manage_mac_read" response. 270 * Response such as various MAC addresses are stored in HW struct (port.mac) 271 * ice_discover_dev_caps is expected to be called before this function is 272 * called. 273 */ 274 static int 275 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 276 struct ice_sq_cd *cd) 277 { 278 struct ice_aqc_manage_mac_read_resp *resp; 279 struct ice_aqc_manage_mac_read *cmd; 280 struct ice_aq_desc desc; 281 int status; 282 u16 flags; 283 u8 i; 284 285 cmd = &desc.params.mac_read; 286 287 if (buf_size < sizeof(*resp)) 288 return -EINVAL; 289 290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 291 292 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 293 if (status) 294 return status; 295 296 resp = buf; 297 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 298 299 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 300 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 301 return -EIO; 302 } 303 304 /* A single port can report up to two (LAN and WoL) addresses */ 305 for (i = 0; i < cmd->num_addr; i++) 306 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 307 ether_addr_copy(hw->port_info->mac.lan_addr, 308 resp[i].mac_addr); 309 ether_addr_copy(hw->port_info->mac.perm_addr, 310 resp[i].mac_addr); 311 break; 312 } 313 314 return 0; 315 } 316 317 /** 318 * ice_aq_get_phy_caps - returns PHY capabilities 319 * @pi: port information structure 320 * @qual_mods: report qualified modules 321 * @report_mode: report mode capabilities 322 * @pcaps: structure for PHY capabilities to be filled 323 * @cd: pointer to command details structure or NULL 324 * 325 * Returns the various PHY capabilities supported on the Port (0x0600) 326 */ 327 int 328 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 329 struct ice_aqc_get_phy_caps_data *pcaps, 330 struct ice_sq_cd *cd) 331 { 332 struct ice_aqc_get_phy_caps *cmd; 333 u16 pcaps_size = sizeof(*pcaps); 334 struct ice_aq_desc desc; 335 const char *prefix; 336 struct ice_hw *hw; 337 int status; 338 339 cmd = &desc.params.get_phy; 340 341 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 342 return -EINVAL; 343 hw = pi->hw; 344 345 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 346 !ice_fw_supports_report_dflt_cfg(hw)) 347 return -EINVAL; 348 349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 350 351 if (qual_mods) 352 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 353 354 cmd->param0 |= cpu_to_le16(report_mode); 355 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 356 357 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 358 359 switch (report_mode) { 360 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 361 prefix = "phy_caps_media"; 362 break; 363 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 364 prefix = "phy_caps_no_media"; 365 break; 366 case ICE_AQC_REPORT_ACTIVE_CFG: 367 prefix = "phy_caps_active"; 368 break; 369 case ICE_AQC_REPORT_DFLT_CFG: 370 prefix = "phy_caps_default"; 371 break; 372 default: 373 prefix = "phy_caps_invalid"; 374 } 375 376 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 377 le64_to_cpu(pcaps->phy_type_high), prefix); 378 379 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 380 prefix, report_mode); 381 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 382 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 383 pcaps->low_power_ctrl_an); 384 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 385 pcaps->eee_cap); 386 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 387 pcaps->eeer_value); 388 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 389 pcaps->link_fec_options); 390 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 391 prefix, pcaps->module_compliance_enforcement); 392 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 393 prefix, pcaps->extended_compliance_code); 394 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 395 pcaps->module_type[0]); 396 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 397 pcaps->module_type[1]); 398 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 399 pcaps->module_type[2]); 400 401 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 402 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 403 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 404 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 405 sizeof(pi->phy.link_info.module_type)); 406 } 407 408 return status; 409 } 410 411 /** 412 * ice_aq_get_link_topo_handle - get link topology node return status 413 * @pi: port information structure 414 * @node_type: requested node type 415 * @cd: pointer to command details structure or NULL 416 * 417 * Get link topology node return status for specified node type (0x06E0) 418 * 419 * Node type cage can be used to determine if cage is present. If AQC 420 * returns error (ENOENT), then no cage present. If no cage present, then 421 * connection type is backplane or BASE-T. 422 */ 423 static int 424 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 425 struct ice_sq_cd *cd) 426 { 427 struct ice_aqc_get_link_topo *cmd; 428 struct ice_aq_desc desc; 429 430 cmd = &desc.params.get_link_topo; 431 432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 433 434 cmd->addr.topo_params.node_type_ctx = 435 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 436 ICE_AQC_LINK_TOPO_NODE_CTX_S); 437 438 /* set node type */ 439 cmd->addr.topo_params.node_type_ctx |= 440 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 441 442 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 443 } 444 445 /** 446 * ice_aq_get_netlist_node 447 * @hw: pointer to the hw struct 448 * @cmd: get_link_topo AQ structure 449 * @node_part_number: output node part number if node found 450 * @node_handle: output node handle parameter if node found 451 * 452 * Get netlist node handle. 453 */ 454 int 455 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 456 u8 *node_part_number, u16 *node_handle) 457 { 458 struct ice_aq_desc desc; 459 460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 461 desc.params.get_link_topo = *cmd; 462 463 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 464 return -EINTR; 465 466 if (node_handle) 467 *node_handle = 468 le16_to_cpu(desc.params.get_link_topo.addr.handle); 469 if (node_part_number) 470 *node_part_number = desc.params.get_link_topo.node_part_num; 471 472 return 0; 473 } 474 475 /** 476 * ice_find_netlist_node 477 * @hw: pointer to the hw struct 478 * @node_type_ctx: type of netlist node to look for 479 * @node_part_number: node part number to look for 480 * @node_handle: output parameter if node found - optional 481 * 482 * Scan the netlist for a node handle of the given node type and part number. 483 * 484 * If node_handle is non-NULL it will be modified on function exit. It is only 485 * valid if the function returns zero, and should be ignored on any non-zero 486 * return value. 487 * 488 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 489 * a negative error code on failure to access the AQ. 490 */ 491 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 492 u8 node_part_number, u16 *node_handle) 493 { 494 u8 idx; 495 496 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 497 struct ice_aqc_get_link_topo cmd = {}; 498 u8 rec_node_part_number; 499 int status; 500 501 cmd.addr.topo_params.node_type_ctx = 502 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 503 node_type_ctx); 504 cmd.addr.topo_params.index = idx; 505 506 status = ice_aq_get_netlist_node(hw, &cmd, 507 &rec_node_part_number, 508 node_handle); 509 if (status) 510 return status; 511 512 if (rec_node_part_number == node_part_number) 513 return 0; 514 } 515 516 return -ENOENT; 517 } 518 519 /** 520 * ice_is_media_cage_present 521 * @pi: port information structure 522 * 523 * Returns true if media cage is present, else false. If no cage, then 524 * media type is backplane or BASE-T. 525 */ 526 static bool ice_is_media_cage_present(struct ice_port_info *pi) 527 { 528 /* Node type cage can be used to determine if cage is present. If AQC 529 * returns error (ENOENT), then no cage present. If no cage present then 530 * connection type is backplane or BASE-T. 531 */ 532 return !ice_aq_get_link_topo_handle(pi, 533 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 534 NULL); 535 } 536 537 /** 538 * ice_get_media_type - Gets media type 539 * @pi: port information structure 540 */ 541 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 542 { 543 struct ice_link_status *hw_link_info; 544 545 if (!pi) 546 return ICE_MEDIA_UNKNOWN; 547 548 hw_link_info = &pi->phy.link_info; 549 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 550 /* If more than one media type is selected, report unknown */ 551 return ICE_MEDIA_UNKNOWN; 552 553 if (hw_link_info->phy_type_low) { 554 /* 1G SGMII is a special case where some DA cable PHYs 555 * may show this as an option when it really shouldn't 556 * be since SGMII is meant to be between a MAC and a PHY 557 * in a backplane. Try to detect this case and handle it 558 */ 559 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 560 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 561 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 562 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 563 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 564 return ICE_MEDIA_DA; 565 566 switch (hw_link_info->phy_type_low) { 567 case ICE_PHY_TYPE_LOW_1000BASE_SX: 568 case ICE_PHY_TYPE_LOW_1000BASE_LX: 569 case ICE_PHY_TYPE_LOW_10GBASE_SR: 570 case ICE_PHY_TYPE_LOW_10GBASE_LR: 571 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 572 case ICE_PHY_TYPE_LOW_25GBASE_SR: 573 case ICE_PHY_TYPE_LOW_25GBASE_LR: 574 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 575 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 576 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 577 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 578 case ICE_PHY_TYPE_LOW_50GBASE_SR: 579 case ICE_PHY_TYPE_LOW_50GBASE_FR: 580 case ICE_PHY_TYPE_LOW_50GBASE_LR: 581 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 582 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 583 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 584 case ICE_PHY_TYPE_LOW_100GBASE_DR: 585 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 586 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 587 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 588 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 589 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 590 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 591 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 592 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 593 return ICE_MEDIA_FIBER; 594 case ICE_PHY_TYPE_LOW_100BASE_TX: 595 case ICE_PHY_TYPE_LOW_1000BASE_T: 596 case ICE_PHY_TYPE_LOW_2500BASE_T: 597 case ICE_PHY_TYPE_LOW_5GBASE_T: 598 case ICE_PHY_TYPE_LOW_10GBASE_T: 599 case ICE_PHY_TYPE_LOW_25GBASE_T: 600 return ICE_MEDIA_BASET; 601 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 602 case ICE_PHY_TYPE_LOW_25GBASE_CR: 603 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 604 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 605 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 606 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 607 case ICE_PHY_TYPE_LOW_50GBASE_CP: 608 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 609 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 610 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 611 return ICE_MEDIA_DA; 612 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 613 case ICE_PHY_TYPE_LOW_40G_XLAUI: 614 case ICE_PHY_TYPE_LOW_50G_LAUI2: 615 case ICE_PHY_TYPE_LOW_50G_AUI2: 616 case ICE_PHY_TYPE_LOW_50G_AUI1: 617 case ICE_PHY_TYPE_LOW_100G_AUI4: 618 case ICE_PHY_TYPE_LOW_100G_CAUI4: 619 if (ice_is_media_cage_present(pi)) 620 return ICE_MEDIA_DA; 621 fallthrough; 622 case ICE_PHY_TYPE_LOW_1000BASE_KX: 623 case ICE_PHY_TYPE_LOW_2500BASE_KX: 624 case ICE_PHY_TYPE_LOW_2500BASE_X: 625 case ICE_PHY_TYPE_LOW_5GBASE_KR: 626 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 627 case ICE_PHY_TYPE_LOW_25GBASE_KR: 628 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 629 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 630 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 631 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 632 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 633 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 634 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 635 return ICE_MEDIA_BACKPLANE; 636 } 637 } else { 638 switch (hw_link_info->phy_type_high) { 639 case ICE_PHY_TYPE_HIGH_100G_AUI2: 640 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 641 if (ice_is_media_cage_present(pi)) 642 return ICE_MEDIA_DA; 643 fallthrough; 644 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 645 return ICE_MEDIA_BACKPLANE; 646 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 647 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 648 return ICE_MEDIA_FIBER; 649 } 650 } 651 return ICE_MEDIA_UNKNOWN; 652 } 653 654 /** 655 * ice_get_link_status_datalen 656 * @hw: pointer to the HW struct 657 * 658 * Returns datalength for the Get Link Status AQ command, which is bigger for 659 * newer adapter families handled by ice driver. 660 */ 661 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 662 { 663 switch (hw->mac_type) { 664 case ICE_MAC_E830: 665 return ICE_AQC_LS_DATA_SIZE_V2; 666 case ICE_MAC_E810: 667 default: 668 return ICE_AQC_LS_DATA_SIZE_V1; 669 } 670 } 671 672 /** 673 * ice_aq_get_link_info 674 * @pi: port information structure 675 * @ena_lse: enable/disable LinkStatusEvent reporting 676 * @link: pointer to link status structure - optional 677 * @cd: pointer to command details structure or NULL 678 * 679 * Get Link Status (0x607). Returns the link status of the adapter. 680 */ 681 int 682 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 683 struct ice_link_status *link, struct ice_sq_cd *cd) 684 { 685 struct ice_aqc_get_link_status_data link_data = { 0 }; 686 struct ice_aqc_get_link_status *resp; 687 struct ice_link_status *li_old, *li; 688 enum ice_media_type *hw_media_type; 689 struct ice_fc_info *hw_fc_info; 690 bool tx_pause, rx_pause; 691 struct ice_aq_desc desc; 692 struct ice_hw *hw; 693 u16 cmd_flags; 694 int status; 695 696 if (!pi) 697 return -EINVAL; 698 hw = pi->hw; 699 li_old = &pi->phy.link_info_old; 700 hw_media_type = &pi->phy.media_type; 701 li = &pi->phy.link_info; 702 hw_fc_info = &pi->fc; 703 704 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 705 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 706 resp = &desc.params.get_link_status; 707 resp->cmd_flags = cpu_to_le16(cmd_flags); 708 resp->lport_num = pi->lport; 709 710 status = ice_aq_send_cmd(hw, &desc, &link_data, 711 ice_get_link_status_datalen(hw), cd); 712 if (status) 713 return status; 714 715 /* save off old link status information */ 716 *li_old = *li; 717 718 /* update current link status information */ 719 li->link_speed = le16_to_cpu(link_data.link_speed); 720 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 721 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 722 *hw_media_type = ice_get_media_type(pi); 723 li->link_info = link_data.link_info; 724 li->link_cfg_err = link_data.link_cfg_err; 725 li->an_info = link_data.an_info; 726 li->ext_info = link_data.ext_info; 727 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 728 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 729 li->topo_media_conflict = link_data.topo_media_conflict; 730 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 731 ICE_AQ_CFG_PACING_TYPE_M); 732 733 /* update fc info */ 734 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 735 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 736 if (tx_pause && rx_pause) 737 hw_fc_info->current_mode = ICE_FC_FULL; 738 else if (tx_pause) 739 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 740 else if (rx_pause) 741 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 742 else 743 hw_fc_info->current_mode = ICE_FC_NONE; 744 745 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 746 747 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 748 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 749 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 750 (unsigned long long)li->phy_type_low); 751 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 752 (unsigned long long)li->phy_type_high); 753 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 754 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 755 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 756 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 757 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 758 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 759 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 760 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 761 li->max_frame_size); 762 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 763 764 /* save link status information */ 765 if (link) 766 *link = *li; 767 768 /* flag cleared so calling functions don't call AQ again */ 769 pi->phy.get_link_info = false; 770 771 return 0; 772 } 773 774 /** 775 * ice_fill_tx_timer_and_fc_thresh 776 * @hw: pointer to the HW struct 777 * @cmd: pointer to MAC cfg structure 778 * 779 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 780 * descriptor 781 */ 782 static void 783 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 784 struct ice_aqc_set_mac_cfg *cmd) 785 { 786 u32 val, fc_thres_m; 787 788 /* We read back the transmit timer and FC threshold value of 789 * LFC. Thus, we will use index = 790 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 791 * 792 * Also, because we are operating on transmit timer and FC 793 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 794 */ 795 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 796 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 797 798 if (hw->mac_type == ICE_MAC_E830) { 799 /* Retrieve the transmit timer */ 800 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 801 cmd->tx_tmr_value = 802 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 803 804 /* Retrieve the fc threshold */ 805 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 806 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 807 } else { 808 /* Retrieve the transmit timer */ 809 val = rd32(hw, 810 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 811 cmd->tx_tmr_value = 812 le16_encode_bits(val, 813 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 814 815 /* Retrieve the fc threshold */ 816 val = rd32(hw, 817 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 818 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 819 } 820 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 821 } 822 823 /** 824 * ice_aq_set_mac_cfg 825 * @hw: pointer to the HW struct 826 * @max_frame_size: Maximum Frame Size to be supported 827 * @cd: pointer to command details structure or NULL 828 * 829 * Set MAC configuration (0x0603) 830 */ 831 int 832 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 833 { 834 struct ice_aqc_set_mac_cfg *cmd; 835 struct ice_aq_desc desc; 836 837 cmd = &desc.params.set_mac_cfg; 838 839 if (max_frame_size == 0) 840 return -EINVAL; 841 842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 843 844 cmd->max_frame_size = cpu_to_le16(max_frame_size); 845 846 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 847 848 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 849 } 850 851 /** 852 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 853 * @hw: pointer to the HW struct 854 */ 855 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 856 { 857 struct ice_switch_info *sw; 858 int status; 859 860 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 861 sizeof(*hw->switch_info), GFP_KERNEL); 862 sw = hw->switch_info; 863 864 if (!sw) 865 return -ENOMEM; 866 867 INIT_LIST_HEAD(&sw->vsi_list_map_head); 868 sw->prof_res_bm_init = 0; 869 870 status = ice_init_def_sw_recp(hw); 871 if (status) { 872 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 873 return status; 874 } 875 return 0; 876 } 877 878 /** 879 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 880 * @hw: pointer to the HW struct 881 */ 882 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 883 { 884 struct ice_switch_info *sw = hw->switch_info; 885 struct ice_vsi_list_map_info *v_pos_map; 886 struct ice_vsi_list_map_info *v_tmp_map; 887 struct ice_sw_recipe *recps; 888 u8 i; 889 890 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 891 list_entry) { 892 list_del(&v_pos_map->list_entry); 893 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 894 } 895 recps = sw->recp_list; 896 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 897 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 898 899 recps[i].root_rid = i; 900 list_for_each_entry_safe(rg_entry, tmprg_entry, 901 &recps[i].rg_list, l_entry) { 902 list_del(&rg_entry->l_entry); 903 devm_kfree(ice_hw_to_dev(hw), rg_entry); 904 } 905 906 if (recps[i].adv_rule) { 907 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 908 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 909 910 mutex_destroy(&recps[i].filt_rule_lock); 911 list_for_each_entry_safe(lst_itr, tmp_entry, 912 &recps[i].filt_rules, 913 list_entry) { 914 list_del(&lst_itr->list_entry); 915 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 916 devm_kfree(ice_hw_to_dev(hw), lst_itr); 917 } 918 } else { 919 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 920 921 mutex_destroy(&recps[i].filt_rule_lock); 922 list_for_each_entry_safe(lst_itr, tmp_entry, 923 &recps[i].filt_rules, 924 list_entry) { 925 list_del(&lst_itr->list_entry); 926 devm_kfree(ice_hw_to_dev(hw), lst_itr); 927 } 928 } 929 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 930 } 931 ice_rm_all_sw_replay_rule_info(hw); 932 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 933 devm_kfree(ice_hw_to_dev(hw), sw); 934 } 935 936 /** 937 * ice_get_itr_intrl_gran 938 * @hw: pointer to the HW struct 939 * 940 * Determines the ITR/INTRL granularities based on the maximum aggregate 941 * bandwidth according to the device's configuration during power-on. 942 */ 943 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 944 { 945 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, 946 rd32(hw, GL_PWR_MODE_CTL)); 947 948 switch (max_agg_bw) { 949 case ICE_MAX_AGG_BW_200G: 950 case ICE_MAX_AGG_BW_100G: 951 case ICE_MAX_AGG_BW_50G: 952 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 953 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 954 break; 955 case ICE_MAX_AGG_BW_25G: 956 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 957 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 958 break; 959 } 960 } 961 962 /** 963 * ice_init_hw - main hardware initialization routine 964 * @hw: pointer to the hardware structure 965 */ 966 int ice_init_hw(struct ice_hw *hw) 967 { 968 struct ice_aqc_get_phy_caps_data *pcaps; 969 u16 mac_buf_len; 970 void *mac_buf; 971 int status; 972 973 /* Set MAC type based on DeviceID */ 974 status = ice_set_mac_type(hw); 975 if (status) 976 return status; 977 978 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); 979 980 status = ice_reset(hw, ICE_RESET_PFR); 981 if (status) 982 return status; 983 984 ice_get_itr_intrl_gran(hw); 985 986 status = ice_create_all_ctrlq(hw); 987 if (status) 988 goto err_unroll_cqinit; 989 990 status = ice_fwlog_init(hw); 991 if (status) 992 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", 993 status); 994 995 status = ice_clear_pf_cfg(hw); 996 if (status) 997 goto err_unroll_cqinit; 998 999 /* Set bit to enable Flow Director filters */ 1000 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1001 INIT_LIST_HEAD(&hw->fdir_list_head); 1002 1003 ice_clear_pxe_mode(hw); 1004 1005 status = ice_init_nvm(hw); 1006 if (status) 1007 goto err_unroll_cqinit; 1008 1009 status = ice_get_caps(hw); 1010 if (status) 1011 goto err_unroll_cqinit; 1012 1013 if (!hw->port_info) 1014 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1015 sizeof(*hw->port_info), 1016 GFP_KERNEL); 1017 if (!hw->port_info) { 1018 status = -ENOMEM; 1019 goto err_unroll_cqinit; 1020 } 1021 1022 /* set the back pointer to HW */ 1023 hw->port_info->hw = hw; 1024 1025 /* Initialize port_info struct with switch configuration data */ 1026 status = ice_get_initial_sw_cfg(hw); 1027 if (status) 1028 goto err_unroll_alloc; 1029 1030 hw->evb_veb = true; 1031 1032 /* init xarray for identifying scheduling nodes uniquely */ 1033 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1034 1035 /* Query the allocated resources for Tx scheduler */ 1036 status = ice_sched_query_res_alloc(hw); 1037 if (status) { 1038 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1039 goto err_unroll_alloc; 1040 } 1041 ice_sched_get_psm_clk_freq(hw); 1042 1043 /* Initialize port_info struct with scheduler data */ 1044 status = ice_sched_init_port(hw->port_info); 1045 if (status) 1046 goto err_unroll_sched; 1047 1048 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1049 if (!pcaps) { 1050 status = -ENOMEM; 1051 goto err_unroll_sched; 1052 } 1053 1054 /* Initialize port_info struct with PHY capabilities */ 1055 status = ice_aq_get_phy_caps(hw->port_info, false, 1056 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1057 NULL); 1058 devm_kfree(ice_hw_to_dev(hw), pcaps); 1059 if (status) 1060 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1061 status); 1062 1063 /* Initialize port_info struct with link information */ 1064 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1065 if (status) 1066 goto err_unroll_sched; 1067 1068 /* need a valid SW entry point to build a Tx tree */ 1069 if (!hw->sw_entry_point_layer) { 1070 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1071 status = -EIO; 1072 goto err_unroll_sched; 1073 } 1074 INIT_LIST_HEAD(&hw->agg_list); 1075 /* Initialize max burst size */ 1076 if (!hw->max_burst_size) 1077 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1078 1079 status = ice_init_fltr_mgmt_struct(hw); 1080 if (status) 1081 goto err_unroll_sched; 1082 1083 /* Get MAC information */ 1084 /* A single port can report up to two (LAN and WoL) addresses */ 1085 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1086 sizeof(struct ice_aqc_manage_mac_read_resp), 1087 GFP_KERNEL); 1088 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1089 1090 if (!mac_buf) { 1091 status = -ENOMEM; 1092 goto err_unroll_fltr_mgmt_struct; 1093 } 1094 1095 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1096 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1097 1098 if (status) 1099 goto err_unroll_fltr_mgmt_struct; 1100 /* enable jumbo frame support at MAC level */ 1101 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1102 if (status) 1103 goto err_unroll_fltr_mgmt_struct; 1104 /* Obtain counter base index which would be used by flow director */ 1105 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1106 if (status) 1107 goto err_unroll_fltr_mgmt_struct; 1108 status = ice_init_hw_tbls(hw); 1109 if (status) 1110 goto err_unroll_fltr_mgmt_struct; 1111 mutex_init(&hw->tnl_lock); 1112 return 0; 1113 1114 err_unroll_fltr_mgmt_struct: 1115 ice_cleanup_fltr_mgmt_struct(hw); 1116 err_unroll_sched: 1117 ice_sched_cleanup_all(hw); 1118 err_unroll_alloc: 1119 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1120 err_unroll_cqinit: 1121 ice_destroy_all_ctrlq(hw); 1122 return status; 1123 } 1124 1125 /** 1126 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1127 * @hw: pointer to the hardware structure 1128 * 1129 * This should be called only during nominal operation, not as a result of 1130 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1131 * applicable initializations if it fails for any reason. 1132 */ 1133 void ice_deinit_hw(struct ice_hw *hw) 1134 { 1135 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1136 ice_cleanup_fltr_mgmt_struct(hw); 1137 1138 ice_sched_cleanup_all(hw); 1139 ice_sched_clear_agg(hw); 1140 ice_free_seg(hw); 1141 ice_free_hw_tbls(hw); 1142 mutex_destroy(&hw->tnl_lock); 1143 1144 ice_fwlog_deinit(hw); 1145 ice_destroy_all_ctrlq(hw); 1146 1147 /* Clear VSI contexts if not already cleared */ 1148 ice_clear_all_vsi_ctx(hw); 1149 } 1150 1151 /** 1152 * ice_check_reset - Check to see if a global reset is complete 1153 * @hw: pointer to the hardware structure 1154 */ 1155 int ice_check_reset(struct ice_hw *hw) 1156 { 1157 u32 cnt, reg = 0, grst_timeout, uld_mask; 1158 1159 /* Poll for Device Active state in case a recent CORER, GLOBR, 1160 * or EMPR has occurred. The grst delay value is in 100ms units. 1161 * Add 1sec for outstanding AQ commands that can take a long time. 1162 */ 1163 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, 1164 rd32(hw, GLGEN_RSTCTL)) + 10; 1165 1166 for (cnt = 0; cnt < grst_timeout; cnt++) { 1167 mdelay(100); 1168 reg = rd32(hw, GLGEN_RSTAT); 1169 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1170 break; 1171 } 1172 1173 if (cnt == grst_timeout) { 1174 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1175 return -EIO; 1176 } 1177 1178 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1179 GLNVM_ULD_PCIER_DONE_1_M |\ 1180 GLNVM_ULD_CORER_DONE_M |\ 1181 GLNVM_ULD_GLOBR_DONE_M |\ 1182 GLNVM_ULD_POR_DONE_M |\ 1183 GLNVM_ULD_POR_DONE_1_M |\ 1184 GLNVM_ULD_PCIER_DONE_2_M) 1185 1186 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1187 GLNVM_ULD_PE_DONE_M : 0); 1188 1189 /* Device is Active; check Global Reset processes are done */ 1190 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1191 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1192 if (reg == uld_mask) { 1193 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1194 break; 1195 } 1196 mdelay(10); 1197 } 1198 1199 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1200 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1201 reg); 1202 return -EIO; 1203 } 1204 1205 return 0; 1206 } 1207 1208 /** 1209 * ice_pf_reset - Reset the PF 1210 * @hw: pointer to the hardware structure 1211 * 1212 * If a global reset has been triggered, this function checks 1213 * for its completion and then issues the PF reset 1214 */ 1215 static int ice_pf_reset(struct ice_hw *hw) 1216 { 1217 u32 cnt, reg; 1218 1219 /* If at function entry a global reset was already in progress, i.e. 1220 * state is not 'device active' or any of the reset done bits are not 1221 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1222 * global reset is done. 1223 */ 1224 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1225 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1226 /* poll on global reset currently in progress until done */ 1227 if (ice_check_reset(hw)) 1228 return -EIO; 1229 1230 return 0; 1231 } 1232 1233 /* Reset the PF */ 1234 reg = rd32(hw, PFGEN_CTRL); 1235 1236 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1237 1238 /* Wait for the PFR to complete. The wait time is the global config lock 1239 * timeout plus the PFR timeout which will account for a possible reset 1240 * that is occurring during a download package operation. 1241 */ 1242 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1243 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1244 reg = rd32(hw, PFGEN_CTRL); 1245 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1246 break; 1247 1248 mdelay(1); 1249 } 1250 1251 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1252 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1253 return -EIO; 1254 } 1255 1256 return 0; 1257 } 1258 1259 /** 1260 * ice_reset - Perform different types of reset 1261 * @hw: pointer to the hardware structure 1262 * @req: reset request 1263 * 1264 * This function triggers a reset as specified by the req parameter. 1265 * 1266 * Note: 1267 * If anything other than a PF reset is triggered, PXE mode is restored. 1268 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1269 * interface has been restored in the rebuild flow. 1270 */ 1271 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1272 { 1273 u32 val = 0; 1274 1275 switch (req) { 1276 case ICE_RESET_PFR: 1277 return ice_pf_reset(hw); 1278 case ICE_RESET_CORER: 1279 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1280 val = GLGEN_RTRIG_CORER_M; 1281 break; 1282 case ICE_RESET_GLOBR: 1283 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1284 val = GLGEN_RTRIG_GLOBR_M; 1285 break; 1286 default: 1287 return -EINVAL; 1288 } 1289 1290 val |= rd32(hw, GLGEN_RTRIG); 1291 wr32(hw, GLGEN_RTRIG, val); 1292 ice_flush(hw); 1293 1294 /* wait for the FW to be ready */ 1295 return ice_check_reset(hw); 1296 } 1297 1298 /** 1299 * ice_copy_rxq_ctx_to_hw 1300 * @hw: pointer to the hardware structure 1301 * @ice_rxq_ctx: pointer to the rxq context 1302 * @rxq_index: the index of the Rx queue 1303 * 1304 * Copies rxq context from dense structure to HW register space 1305 */ 1306 static int 1307 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1308 { 1309 u8 i; 1310 1311 if (!ice_rxq_ctx) 1312 return -EINVAL; 1313 1314 if (rxq_index > QRX_CTRL_MAX_INDEX) 1315 return -EINVAL; 1316 1317 /* Copy each dword separately to HW */ 1318 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1319 wr32(hw, QRX_CONTEXT(i, rxq_index), 1320 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1321 1322 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1323 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1324 } 1325 1326 return 0; 1327 } 1328 1329 /* LAN Rx Queue Context */ 1330 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1331 /* Field Width LSB */ 1332 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1333 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1334 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1335 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1336 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1337 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1338 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1339 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1340 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1341 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1342 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1343 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1344 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1345 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1346 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1347 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1348 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1349 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1350 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1351 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1352 { 0 } 1353 }; 1354 1355 /** 1356 * ice_write_rxq_ctx 1357 * @hw: pointer to the hardware structure 1358 * @rlan_ctx: pointer to the rxq context 1359 * @rxq_index: the index of the Rx queue 1360 * 1361 * Converts rxq context from sparse to dense structure and then writes 1362 * it to HW register space and enables the hardware to prefetch descriptors 1363 * instead of only fetching them on demand 1364 */ 1365 int 1366 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1367 u32 rxq_index) 1368 { 1369 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1370 1371 if (!rlan_ctx) 1372 return -EINVAL; 1373 1374 rlan_ctx->prefena = 1; 1375 1376 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1377 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1378 } 1379 1380 /* LAN Tx Queue Context */ 1381 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1382 /* Field Width LSB */ 1383 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1384 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1385 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1386 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1387 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1388 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1389 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1390 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1391 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1392 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1393 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1394 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1395 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1396 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1397 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1398 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1399 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1400 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1401 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1402 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1403 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1404 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1405 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1406 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1407 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1408 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1409 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1410 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1411 { 0 } 1412 }; 1413 1414 /* Sideband Queue command wrappers */ 1415 1416 /** 1417 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1418 * @hw: pointer to the HW struct 1419 * @desc: descriptor describing the command 1420 * @buf: buffer to use for indirect commands (NULL for direct commands) 1421 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1422 * @cd: pointer to command details structure 1423 */ 1424 static int 1425 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1426 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1427 { 1428 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1429 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1430 } 1431 1432 /** 1433 * ice_sbq_rw_reg - Fill Sideband Queue command 1434 * @hw: pointer to the HW struct 1435 * @in: message info to be filled in descriptor 1436 */ 1437 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1438 { 1439 struct ice_sbq_cmd_desc desc = {0}; 1440 struct ice_sbq_msg_req msg = {0}; 1441 u16 msg_len; 1442 int status; 1443 1444 msg_len = sizeof(msg); 1445 1446 msg.dest_dev = in->dest_dev; 1447 msg.opcode = in->opcode; 1448 msg.flags = ICE_SBQ_MSG_FLAGS; 1449 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1450 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1451 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1452 1453 if (in->opcode) 1454 msg.data = cpu_to_le32(in->data); 1455 else 1456 /* data read comes back in completion, so shorten the struct by 1457 * sizeof(msg.data) 1458 */ 1459 msg_len -= sizeof(msg.data); 1460 1461 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1462 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1463 desc.param0.cmd_len = cpu_to_le16(msg_len); 1464 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1465 if (!status && !in->opcode) 1466 in->data = le32_to_cpu 1467 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1468 return status; 1469 } 1470 1471 /* FW Admin Queue command wrappers */ 1472 1473 /* Software lock/mutex that is meant to be held while the Global Config Lock 1474 * in firmware is acquired by the software to prevent most (but not all) types 1475 * of AQ commands from being sent to FW 1476 */ 1477 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1478 1479 /** 1480 * ice_should_retry_sq_send_cmd 1481 * @opcode: AQ opcode 1482 * 1483 * Decide if we should retry the send command routine for the ATQ, depending 1484 * on the opcode. 1485 */ 1486 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1487 { 1488 switch (opcode) { 1489 case ice_aqc_opc_get_link_topo: 1490 case ice_aqc_opc_lldp_stop: 1491 case ice_aqc_opc_lldp_start: 1492 case ice_aqc_opc_lldp_filter_ctrl: 1493 return true; 1494 } 1495 1496 return false; 1497 } 1498 1499 /** 1500 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1501 * @hw: pointer to the HW struct 1502 * @cq: pointer to the specific Control queue 1503 * @desc: prefilled descriptor describing the command 1504 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1505 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1506 * @cd: pointer to command details structure 1507 * 1508 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1509 * Queue if the EBUSY AQ error is returned. 1510 */ 1511 static int 1512 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1513 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1514 struct ice_sq_cd *cd) 1515 { 1516 struct ice_aq_desc desc_cpy; 1517 bool is_cmd_for_retry; 1518 u8 idx = 0; 1519 u16 opcode; 1520 int status; 1521 1522 opcode = le16_to_cpu(desc->opcode); 1523 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1524 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1525 1526 if (is_cmd_for_retry) { 1527 /* All retryable cmds are direct, without buf. */ 1528 WARN_ON(buf); 1529 1530 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1531 } 1532 1533 do { 1534 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1535 1536 if (!is_cmd_for_retry || !status || 1537 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1538 break; 1539 1540 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1541 1542 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1543 1544 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1545 1546 return status; 1547 } 1548 1549 /** 1550 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1551 * @hw: pointer to the HW struct 1552 * @desc: descriptor describing the command 1553 * @buf: buffer to use for indirect commands (NULL for direct commands) 1554 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1555 * @cd: pointer to command details structure 1556 * 1557 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1558 */ 1559 int 1560 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1561 u16 buf_size, struct ice_sq_cd *cd) 1562 { 1563 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1564 bool lock_acquired = false; 1565 int status; 1566 1567 /* When a package download is in process (i.e. when the firmware's 1568 * Global Configuration Lock resource is held), only the Download 1569 * Package, Get Version, Get Package Info List, Upload Section, 1570 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1571 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1572 * Recipes to Profile Association, and Release Resource (with resource 1573 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1574 * must block until the package download completes and the Global Config 1575 * Lock is released. See also ice_acquire_global_cfg_lock(). 1576 */ 1577 switch (le16_to_cpu(desc->opcode)) { 1578 case ice_aqc_opc_download_pkg: 1579 case ice_aqc_opc_get_pkg_info_list: 1580 case ice_aqc_opc_get_ver: 1581 case ice_aqc_opc_upload_section: 1582 case ice_aqc_opc_update_pkg: 1583 case ice_aqc_opc_set_port_params: 1584 case ice_aqc_opc_get_vlan_mode_parameters: 1585 case ice_aqc_opc_set_vlan_mode_parameters: 1586 case ice_aqc_opc_add_recipe: 1587 case ice_aqc_opc_recipe_to_profile: 1588 case ice_aqc_opc_get_recipe: 1589 case ice_aqc_opc_get_recipe_to_profile: 1590 break; 1591 case ice_aqc_opc_release_res: 1592 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1593 break; 1594 fallthrough; 1595 default: 1596 mutex_lock(&ice_global_cfg_lock_sw); 1597 lock_acquired = true; 1598 break; 1599 } 1600 1601 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1602 if (lock_acquired) 1603 mutex_unlock(&ice_global_cfg_lock_sw); 1604 1605 return status; 1606 } 1607 1608 /** 1609 * ice_aq_get_fw_ver 1610 * @hw: pointer to the HW struct 1611 * @cd: pointer to command details structure or NULL 1612 * 1613 * Get the firmware version (0x0001) from the admin queue commands 1614 */ 1615 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1616 { 1617 struct ice_aqc_get_ver *resp; 1618 struct ice_aq_desc desc; 1619 int status; 1620 1621 resp = &desc.params.get_ver; 1622 1623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1624 1625 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1626 1627 if (!status) { 1628 hw->fw_branch = resp->fw_branch; 1629 hw->fw_maj_ver = resp->fw_major; 1630 hw->fw_min_ver = resp->fw_minor; 1631 hw->fw_patch = resp->fw_patch; 1632 hw->fw_build = le32_to_cpu(resp->fw_build); 1633 hw->api_branch = resp->api_branch; 1634 hw->api_maj_ver = resp->api_major; 1635 hw->api_min_ver = resp->api_minor; 1636 hw->api_patch = resp->api_patch; 1637 } 1638 1639 return status; 1640 } 1641 1642 /** 1643 * ice_aq_send_driver_ver 1644 * @hw: pointer to the HW struct 1645 * @dv: driver's major, minor version 1646 * @cd: pointer to command details structure or NULL 1647 * 1648 * Send the driver version (0x0002) to the firmware 1649 */ 1650 int 1651 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1652 struct ice_sq_cd *cd) 1653 { 1654 struct ice_aqc_driver_ver *cmd; 1655 struct ice_aq_desc desc; 1656 u16 len; 1657 1658 cmd = &desc.params.driver_ver; 1659 1660 if (!dv) 1661 return -EINVAL; 1662 1663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1664 1665 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1666 cmd->major_ver = dv->major_ver; 1667 cmd->minor_ver = dv->minor_ver; 1668 cmd->build_ver = dv->build_ver; 1669 cmd->subbuild_ver = dv->subbuild_ver; 1670 1671 len = 0; 1672 while (len < sizeof(dv->driver_string) && 1673 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1674 len++; 1675 1676 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1677 } 1678 1679 /** 1680 * ice_aq_q_shutdown 1681 * @hw: pointer to the HW struct 1682 * @unloading: is the driver unloading itself 1683 * 1684 * Tell the Firmware that we're shutting down the AdminQ and whether 1685 * or not the driver is unloading as well (0x0003). 1686 */ 1687 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1688 { 1689 struct ice_aqc_q_shutdown *cmd; 1690 struct ice_aq_desc desc; 1691 1692 cmd = &desc.params.q_shutdown; 1693 1694 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1695 1696 if (unloading) 1697 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1698 1699 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1700 } 1701 1702 /** 1703 * ice_aq_req_res 1704 * @hw: pointer to the HW struct 1705 * @res: resource ID 1706 * @access: access type 1707 * @sdp_number: resource number 1708 * @timeout: the maximum time in ms that the driver may hold the resource 1709 * @cd: pointer to command details structure or NULL 1710 * 1711 * Requests common resource using the admin queue commands (0x0008). 1712 * When attempting to acquire the Global Config Lock, the driver can 1713 * learn of three states: 1714 * 1) 0 - acquired lock, and can perform download package 1715 * 2) -EIO - did not get lock, driver should fail to load 1716 * 3) -EALREADY - did not get lock, but another driver has 1717 * successfully downloaded the package; the driver does 1718 * not have to download the package and can continue 1719 * loading 1720 * 1721 * Note that if the caller is in an acquire lock, perform action, release lock 1722 * phase of operation, it is possible that the FW may detect a timeout and issue 1723 * a CORER. In this case, the driver will receive a CORER interrupt and will 1724 * have to determine its cause. The calling thread that is handling this flow 1725 * will likely get an error propagated back to it indicating the Download 1726 * Package, Update Package or the Release Resource AQ commands timed out. 1727 */ 1728 static int 1729 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1730 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1731 struct ice_sq_cd *cd) 1732 { 1733 struct ice_aqc_req_res *cmd_resp; 1734 struct ice_aq_desc desc; 1735 int status; 1736 1737 cmd_resp = &desc.params.res_owner; 1738 1739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1740 1741 cmd_resp->res_id = cpu_to_le16(res); 1742 cmd_resp->access_type = cpu_to_le16(access); 1743 cmd_resp->res_number = cpu_to_le32(sdp_number); 1744 cmd_resp->timeout = cpu_to_le32(*timeout); 1745 *timeout = 0; 1746 1747 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1748 1749 /* The completion specifies the maximum time in ms that the driver 1750 * may hold the resource in the Timeout field. 1751 */ 1752 1753 /* Global config lock response utilizes an additional status field. 1754 * 1755 * If the Global config lock resource is held by some other driver, the 1756 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1757 * and the timeout field indicates the maximum time the current owner 1758 * of the resource has to free it. 1759 */ 1760 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1761 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1762 *timeout = le32_to_cpu(cmd_resp->timeout); 1763 return 0; 1764 } else if (le16_to_cpu(cmd_resp->status) == 1765 ICE_AQ_RES_GLBL_IN_PROG) { 1766 *timeout = le32_to_cpu(cmd_resp->timeout); 1767 return -EIO; 1768 } else if (le16_to_cpu(cmd_resp->status) == 1769 ICE_AQ_RES_GLBL_DONE) { 1770 return -EALREADY; 1771 } 1772 1773 /* invalid FW response, force a timeout immediately */ 1774 *timeout = 0; 1775 return -EIO; 1776 } 1777 1778 /* If the resource is held by some other driver, the command completes 1779 * with a busy return value and the timeout field indicates the maximum 1780 * time the current owner of the resource has to free it. 1781 */ 1782 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1783 *timeout = le32_to_cpu(cmd_resp->timeout); 1784 1785 return status; 1786 } 1787 1788 /** 1789 * ice_aq_release_res 1790 * @hw: pointer to the HW struct 1791 * @res: resource ID 1792 * @sdp_number: resource number 1793 * @cd: pointer to command details structure or NULL 1794 * 1795 * release common resource using the admin queue commands (0x0009) 1796 */ 1797 static int 1798 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1799 struct ice_sq_cd *cd) 1800 { 1801 struct ice_aqc_req_res *cmd; 1802 struct ice_aq_desc desc; 1803 1804 cmd = &desc.params.res_owner; 1805 1806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1807 1808 cmd->res_id = cpu_to_le16(res); 1809 cmd->res_number = cpu_to_le32(sdp_number); 1810 1811 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1812 } 1813 1814 /** 1815 * ice_acquire_res 1816 * @hw: pointer to the HW structure 1817 * @res: resource ID 1818 * @access: access type (read or write) 1819 * @timeout: timeout in milliseconds 1820 * 1821 * This function will attempt to acquire the ownership of a resource. 1822 */ 1823 int 1824 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1825 enum ice_aq_res_access_type access, u32 timeout) 1826 { 1827 #define ICE_RES_POLLING_DELAY_MS 10 1828 u32 delay = ICE_RES_POLLING_DELAY_MS; 1829 u32 time_left = timeout; 1830 int status; 1831 1832 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1833 1834 /* A return code of -EALREADY means that another driver has 1835 * previously acquired the resource and performed any necessary updates; 1836 * in this case the caller does not obtain the resource and has no 1837 * further work to do. 1838 */ 1839 if (status == -EALREADY) 1840 goto ice_acquire_res_exit; 1841 1842 if (status) 1843 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1844 1845 /* If necessary, poll until the current lock owner timeouts */ 1846 timeout = time_left; 1847 while (status && timeout && time_left) { 1848 mdelay(delay); 1849 timeout = (timeout > delay) ? timeout - delay : 0; 1850 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1851 1852 if (status == -EALREADY) 1853 /* lock free, but no work to do */ 1854 break; 1855 1856 if (!status) 1857 /* lock acquired */ 1858 break; 1859 } 1860 if (status && status != -EALREADY) 1861 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1862 1863 ice_acquire_res_exit: 1864 if (status == -EALREADY) { 1865 if (access == ICE_RES_WRITE) 1866 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1867 else 1868 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1869 } 1870 return status; 1871 } 1872 1873 /** 1874 * ice_release_res 1875 * @hw: pointer to the HW structure 1876 * @res: resource ID 1877 * 1878 * This function will release a resource using the proper Admin Command. 1879 */ 1880 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1881 { 1882 unsigned long timeout; 1883 int status; 1884 1885 /* there are some rare cases when trying to release the resource 1886 * results in an admin queue timeout, so handle them correctly 1887 */ 1888 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1889 do { 1890 status = ice_aq_release_res(hw, res, 0, NULL); 1891 if (status != -EIO) 1892 break; 1893 usleep_range(1000, 2000); 1894 } while (time_before(jiffies, timeout)); 1895 } 1896 1897 /** 1898 * ice_aq_alloc_free_res - command to allocate/free resources 1899 * @hw: pointer to the HW struct 1900 * @buf: Indirect buffer to hold data parameters and response 1901 * @buf_size: size of buffer for indirect commands 1902 * @opc: pass in the command opcode 1903 * 1904 * Helper function to allocate/free resources using the admin queue commands 1905 */ 1906 int ice_aq_alloc_free_res(struct ice_hw *hw, 1907 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1908 enum ice_adminq_opc opc) 1909 { 1910 struct ice_aqc_alloc_free_res_cmd *cmd; 1911 struct ice_aq_desc desc; 1912 1913 cmd = &desc.params.sw_res_ctrl; 1914 1915 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 1916 return -EINVAL; 1917 1918 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1919 1920 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1921 1922 cmd->num_entries = cpu_to_le16(1); 1923 1924 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 1925 } 1926 1927 /** 1928 * ice_alloc_hw_res - allocate resource 1929 * @hw: pointer to the HW struct 1930 * @type: type of resource 1931 * @num: number of resources to allocate 1932 * @btm: allocate from bottom 1933 * @res: pointer to array that will receive the resources 1934 */ 1935 int 1936 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1937 { 1938 struct ice_aqc_alloc_free_res_elem *buf; 1939 u16 buf_len; 1940 int status; 1941 1942 buf_len = struct_size(buf, elem, num); 1943 buf = kzalloc(buf_len, GFP_KERNEL); 1944 if (!buf) 1945 return -ENOMEM; 1946 1947 /* Prepare buffer to allocate resource. */ 1948 buf->num_elems = cpu_to_le16(num); 1949 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1950 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1951 if (btm) 1952 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1953 1954 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 1955 if (status) 1956 goto ice_alloc_res_exit; 1957 1958 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1959 1960 ice_alloc_res_exit: 1961 kfree(buf); 1962 return status; 1963 } 1964 1965 /** 1966 * ice_free_hw_res - free allocated HW resource 1967 * @hw: pointer to the HW struct 1968 * @type: type of resource to free 1969 * @num: number of resources 1970 * @res: pointer to array that contains the resources to free 1971 */ 1972 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1973 { 1974 struct ice_aqc_alloc_free_res_elem *buf; 1975 u16 buf_len; 1976 int status; 1977 1978 buf_len = struct_size(buf, elem, num); 1979 buf = kzalloc(buf_len, GFP_KERNEL); 1980 if (!buf) 1981 return -ENOMEM; 1982 1983 /* Prepare buffer to free resource. */ 1984 buf->num_elems = cpu_to_le16(num); 1985 buf->res_type = cpu_to_le16(type); 1986 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 1987 1988 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 1989 if (status) 1990 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1991 1992 kfree(buf); 1993 return status; 1994 } 1995 1996 /** 1997 * ice_get_num_per_func - determine number of resources per PF 1998 * @hw: pointer to the HW structure 1999 * @max: value to be evenly split between each PF 2000 * 2001 * Determine the number of valid functions by going through the bitmap returned 2002 * from parsing capabilities and use this to calculate the number of resources 2003 * per PF based on the max value passed in. 2004 */ 2005 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2006 { 2007 u8 funcs; 2008 2009 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2010 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2011 ICE_CAPS_VALID_FUNCS_M); 2012 2013 if (!funcs) 2014 return 0; 2015 2016 return max / funcs; 2017 } 2018 2019 /** 2020 * ice_parse_common_caps - parse common device/function capabilities 2021 * @hw: pointer to the HW struct 2022 * @caps: pointer to common capabilities structure 2023 * @elem: the capability element to parse 2024 * @prefix: message prefix for tracing capabilities 2025 * 2026 * Given a capability element, extract relevant details into the common 2027 * capability structure. 2028 * 2029 * Returns: true if the capability matches one of the common capability ids, 2030 * false otherwise. 2031 */ 2032 static bool 2033 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2034 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2035 { 2036 u32 logical_id = le32_to_cpu(elem->logical_id); 2037 u32 phys_id = le32_to_cpu(elem->phys_id); 2038 u32 number = le32_to_cpu(elem->number); 2039 u16 cap = le16_to_cpu(elem->cap); 2040 bool found = true; 2041 2042 switch (cap) { 2043 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2044 caps->valid_functions = number; 2045 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2046 caps->valid_functions); 2047 break; 2048 case ICE_AQC_CAPS_SRIOV: 2049 caps->sr_iov_1_1 = (number == 1); 2050 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2051 caps->sr_iov_1_1); 2052 break; 2053 case ICE_AQC_CAPS_DCB: 2054 caps->dcb = (number == 1); 2055 caps->active_tc_bitmap = logical_id; 2056 caps->maxtc = phys_id; 2057 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2058 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2059 caps->active_tc_bitmap); 2060 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2061 break; 2062 case ICE_AQC_CAPS_RSS: 2063 caps->rss_table_size = number; 2064 caps->rss_table_entry_width = logical_id; 2065 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2066 caps->rss_table_size); 2067 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2068 caps->rss_table_entry_width); 2069 break; 2070 case ICE_AQC_CAPS_RXQS: 2071 caps->num_rxq = number; 2072 caps->rxq_first_id = phys_id; 2073 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2074 caps->num_rxq); 2075 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2076 caps->rxq_first_id); 2077 break; 2078 case ICE_AQC_CAPS_TXQS: 2079 caps->num_txq = number; 2080 caps->txq_first_id = phys_id; 2081 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2082 caps->num_txq); 2083 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2084 caps->txq_first_id); 2085 break; 2086 case ICE_AQC_CAPS_MSIX: 2087 caps->num_msix_vectors = number; 2088 caps->msix_vector_first_id = phys_id; 2089 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2090 caps->num_msix_vectors); 2091 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2092 caps->msix_vector_first_id); 2093 break; 2094 case ICE_AQC_CAPS_PENDING_NVM_VER: 2095 caps->nvm_update_pending_nvm = true; 2096 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2097 break; 2098 case ICE_AQC_CAPS_PENDING_OROM_VER: 2099 caps->nvm_update_pending_orom = true; 2100 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2101 break; 2102 case ICE_AQC_CAPS_PENDING_NET_VER: 2103 caps->nvm_update_pending_netlist = true; 2104 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2105 break; 2106 case ICE_AQC_CAPS_NVM_MGMT: 2107 caps->nvm_unified_update = 2108 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2109 true : false; 2110 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2111 caps->nvm_unified_update); 2112 break; 2113 case ICE_AQC_CAPS_RDMA: 2114 caps->rdma = (number == 1); 2115 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2116 break; 2117 case ICE_AQC_CAPS_MAX_MTU: 2118 caps->max_mtu = number; 2119 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2120 prefix, caps->max_mtu); 2121 break; 2122 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2123 caps->pcie_reset_avoidance = (number > 0); 2124 ice_debug(hw, ICE_DBG_INIT, 2125 "%s: pcie_reset_avoidance = %d\n", prefix, 2126 caps->pcie_reset_avoidance); 2127 break; 2128 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2129 caps->reset_restrict_support = (number == 1); 2130 ice_debug(hw, ICE_DBG_INIT, 2131 "%s: reset_restrict_support = %d\n", prefix, 2132 caps->reset_restrict_support); 2133 break; 2134 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2135 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2136 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2137 prefix, caps->roce_lag); 2138 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2139 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2140 prefix, caps->sriov_lag); 2141 break; 2142 default: 2143 /* Not one of the recognized common capabilities */ 2144 found = false; 2145 } 2146 2147 return found; 2148 } 2149 2150 /** 2151 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2152 * @hw: pointer to the HW structure 2153 * @caps: pointer to capabilities structure to fix 2154 * 2155 * Re-calculate the capabilities that are dependent on the number of physical 2156 * ports; i.e. some features are not supported or function differently on 2157 * devices with more than 4 ports. 2158 */ 2159 static void 2160 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2161 { 2162 /* This assumes device capabilities are always scanned before function 2163 * capabilities during the initialization flow. 2164 */ 2165 if (hw->dev_caps.num_funcs > 4) { 2166 /* Max 4 TCs per port */ 2167 caps->maxtc = 4; 2168 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2169 caps->maxtc); 2170 if (caps->rdma) { 2171 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2172 caps->rdma = 0; 2173 } 2174 2175 /* print message only when processing device capabilities 2176 * during initialization. 2177 */ 2178 if (caps == &hw->dev_caps.common_cap) 2179 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2180 } 2181 } 2182 2183 /** 2184 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2185 * @hw: pointer to the HW struct 2186 * @func_p: pointer to function capabilities structure 2187 * @cap: pointer to the capability element to parse 2188 * 2189 * Extract function capabilities for ICE_AQC_CAPS_VF. 2190 */ 2191 static void 2192 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2193 struct ice_aqc_list_caps_elem *cap) 2194 { 2195 u32 logical_id = le32_to_cpu(cap->logical_id); 2196 u32 number = le32_to_cpu(cap->number); 2197 2198 func_p->num_allocd_vfs = number; 2199 func_p->vf_base_id = logical_id; 2200 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2201 func_p->num_allocd_vfs); 2202 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2203 func_p->vf_base_id); 2204 } 2205 2206 /** 2207 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2208 * @hw: pointer to the HW struct 2209 * @func_p: pointer to function capabilities structure 2210 * @cap: pointer to the capability element to parse 2211 * 2212 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2213 */ 2214 static void 2215 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2216 struct ice_aqc_list_caps_elem *cap) 2217 { 2218 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2219 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2220 le32_to_cpu(cap->number)); 2221 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2222 func_p->guar_num_vsi); 2223 } 2224 2225 /** 2226 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2227 * @hw: pointer to the HW struct 2228 * @func_p: pointer to function capabilities structure 2229 * @cap: pointer to the capability element to parse 2230 * 2231 * Extract function capabilities for ICE_AQC_CAPS_1588. 2232 */ 2233 static void 2234 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2235 struct ice_aqc_list_caps_elem *cap) 2236 { 2237 struct ice_ts_func_info *info = &func_p->ts_func_info; 2238 u32 number = le32_to_cpu(cap->number); 2239 2240 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2241 func_p->common_cap.ieee_1588 = info->ena; 2242 2243 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2244 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2245 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2246 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2247 2248 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); 2249 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2250 2251 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2252 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2253 } else { 2254 /* Unknown clock frequency, so assume a (probably incorrect) 2255 * default to avoid out-of-bounds look ups of frequency 2256 * related information. 2257 */ 2258 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2259 info->clk_freq); 2260 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2261 } 2262 2263 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2264 func_p->common_cap.ieee_1588); 2265 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2266 info->src_tmr_owned); 2267 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2268 info->tmr_ena); 2269 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2270 info->tmr_index_owned); 2271 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2272 info->tmr_index_assoc); 2273 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2274 info->clk_freq); 2275 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2276 info->clk_src); 2277 } 2278 2279 /** 2280 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2281 * @hw: pointer to the HW struct 2282 * @func_p: pointer to function capabilities structure 2283 * 2284 * Extract function capabilities for ICE_AQC_CAPS_FD. 2285 */ 2286 static void 2287 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2288 { 2289 u32 reg_val, gsize, bsize; 2290 2291 reg_val = rd32(hw, GLQF_FD_SIZE); 2292 switch (hw->mac_type) { 2293 case ICE_MAC_E830: 2294 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2295 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2296 break; 2297 case ICE_MAC_E810: 2298 default: 2299 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2300 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2301 } 2302 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2303 func_p->fd_fltr_best_effort = bsize; 2304 2305 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2306 func_p->fd_fltr_guar); 2307 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2308 func_p->fd_fltr_best_effort); 2309 } 2310 2311 /** 2312 * ice_parse_func_caps - Parse function capabilities 2313 * @hw: pointer to the HW struct 2314 * @func_p: pointer to function capabilities structure 2315 * @buf: buffer containing the function capability records 2316 * @cap_count: the number of capabilities 2317 * 2318 * Helper function to parse function (0x000A) capabilities list. For 2319 * capabilities shared between device and function, this relies on 2320 * ice_parse_common_caps. 2321 * 2322 * Loop through the list of provided capabilities and extract the relevant 2323 * data into the function capabilities structured. 2324 */ 2325 static void 2326 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2327 void *buf, u32 cap_count) 2328 { 2329 struct ice_aqc_list_caps_elem *cap_resp; 2330 u32 i; 2331 2332 cap_resp = buf; 2333 2334 memset(func_p, 0, sizeof(*func_p)); 2335 2336 for (i = 0; i < cap_count; i++) { 2337 u16 cap = le16_to_cpu(cap_resp[i].cap); 2338 bool found; 2339 2340 found = ice_parse_common_caps(hw, &func_p->common_cap, 2341 &cap_resp[i], "func caps"); 2342 2343 switch (cap) { 2344 case ICE_AQC_CAPS_VF: 2345 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2346 break; 2347 case ICE_AQC_CAPS_VSI: 2348 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2349 break; 2350 case ICE_AQC_CAPS_1588: 2351 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2352 break; 2353 case ICE_AQC_CAPS_FD: 2354 ice_parse_fdir_func_caps(hw, func_p); 2355 break; 2356 default: 2357 /* Don't list common capabilities as unknown */ 2358 if (!found) 2359 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2360 i, cap); 2361 break; 2362 } 2363 } 2364 2365 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2366 } 2367 2368 /** 2369 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2370 * @hw: pointer to the HW struct 2371 * @dev_p: pointer to device capabilities structure 2372 * @cap: capability element to parse 2373 * 2374 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2375 */ 2376 static void 2377 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2378 struct ice_aqc_list_caps_elem *cap) 2379 { 2380 u32 number = le32_to_cpu(cap->number); 2381 2382 dev_p->num_funcs = hweight32(number); 2383 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2384 dev_p->num_funcs); 2385 } 2386 2387 /** 2388 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2389 * @hw: pointer to the HW struct 2390 * @dev_p: pointer to device capabilities structure 2391 * @cap: capability element to parse 2392 * 2393 * Parse ICE_AQC_CAPS_VF for device capabilities. 2394 */ 2395 static void 2396 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2397 struct ice_aqc_list_caps_elem *cap) 2398 { 2399 u32 number = le32_to_cpu(cap->number); 2400 2401 dev_p->num_vfs_exposed = number; 2402 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2403 dev_p->num_vfs_exposed); 2404 } 2405 2406 /** 2407 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2408 * @hw: pointer to the HW struct 2409 * @dev_p: pointer to device capabilities structure 2410 * @cap: capability element to parse 2411 * 2412 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2413 */ 2414 static void 2415 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2416 struct ice_aqc_list_caps_elem *cap) 2417 { 2418 u32 number = le32_to_cpu(cap->number); 2419 2420 dev_p->num_vsi_allocd_to_host = number; 2421 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2422 dev_p->num_vsi_allocd_to_host); 2423 } 2424 2425 /** 2426 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2427 * @hw: pointer to the HW struct 2428 * @dev_p: pointer to device capabilities structure 2429 * @cap: capability element to parse 2430 * 2431 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2432 */ 2433 static void 2434 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2435 struct ice_aqc_list_caps_elem *cap) 2436 { 2437 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2438 u32 logical_id = le32_to_cpu(cap->logical_id); 2439 u32 phys_id = le32_to_cpu(cap->phys_id); 2440 u32 number = le32_to_cpu(cap->number); 2441 2442 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2443 dev_p->common_cap.ieee_1588 = info->ena; 2444 2445 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2446 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2447 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2448 2449 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); 2450 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2451 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2452 2453 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2454 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); 2455 2456 info->ena_ports = logical_id; 2457 info->tmr_own_map = phys_id; 2458 2459 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2460 dev_p->common_cap.ieee_1588); 2461 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2462 info->tmr0_owner); 2463 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2464 info->tmr0_owned); 2465 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2466 info->tmr0_ena); 2467 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2468 info->tmr1_owner); 2469 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2470 info->tmr1_owned); 2471 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2472 info->tmr1_ena); 2473 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2474 info->ts_ll_read); 2475 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", 2476 info->ts_ll_int_read); 2477 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2478 info->ena_ports); 2479 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2480 info->tmr_own_map); 2481 } 2482 2483 /** 2484 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2485 * @hw: pointer to the HW struct 2486 * @dev_p: pointer to device capabilities structure 2487 * @cap: capability element to parse 2488 * 2489 * Parse ICE_AQC_CAPS_FD for device capabilities. 2490 */ 2491 static void 2492 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2493 struct ice_aqc_list_caps_elem *cap) 2494 { 2495 u32 number = le32_to_cpu(cap->number); 2496 2497 dev_p->num_flow_director_fltr = number; 2498 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2499 dev_p->num_flow_director_fltr); 2500 } 2501 2502 /** 2503 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2504 * @hw: pointer to the HW struct 2505 * @dev_p: pointer to device capabilities structure 2506 * @cap: capability element to parse 2507 * 2508 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2509 * enabled sensors. 2510 */ 2511 static void 2512 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2513 struct ice_aqc_list_caps_elem *cap) 2514 { 2515 dev_p->supported_sensors = le32_to_cpu(cap->number); 2516 2517 ice_debug(hw, ICE_DBG_INIT, 2518 "dev caps: supported sensors (bitmap) = 0x%x\n", 2519 dev_p->supported_sensors); 2520 } 2521 2522 /** 2523 * ice_parse_dev_caps - Parse device capabilities 2524 * @hw: pointer to the HW struct 2525 * @dev_p: pointer to device capabilities structure 2526 * @buf: buffer containing the device capability records 2527 * @cap_count: the number of capabilities 2528 * 2529 * Helper device to parse device (0x000B) capabilities list. For 2530 * capabilities shared between device and function, this relies on 2531 * ice_parse_common_caps. 2532 * 2533 * Loop through the list of provided capabilities and extract the relevant 2534 * data into the device capabilities structured. 2535 */ 2536 static void 2537 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2538 void *buf, u32 cap_count) 2539 { 2540 struct ice_aqc_list_caps_elem *cap_resp; 2541 u32 i; 2542 2543 cap_resp = buf; 2544 2545 memset(dev_p, 0, sizeof(*dev_p)); 2546 2547 for (i = 0; i < cap_count; i++) { 2548 u16 cap = le16_to_cpu(cap_resp[i].cap); 2549 bool found; 2550 2551 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2552 &cap_resp[i], "dev caps"); 2553 2554 switch (cap) { 2555 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2556 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2557 break; 2558 case ICE_AQC_CAPS_VF: 2559 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2560 break; 2561 case ICE_AQC_CAPS_VSI: 2562 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2563 break; 2564 case ICE_AQC_CAPS_1588: 2565 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2566 break; 2567 case ICE_AQC_CAPS_FD: 2568 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2569 break; 2570 case ICE_AQC_CAPS_SENSOR_READING: 2571 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2572 break; 2573 default: 2574 /* Don't list common capabilities as unknown */ 2575 if (!found) 2576 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2577 i, cap); 2578 break; 2579 } 2580 } 2581 2582 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2583 } 2584 2585 /** 2586 * ice_is_pf_c827 - check if pf contains c827 phy 2587 * @hw: pointer to the hw struct 2588 */ 2589 bool ice_is_pf_c827(struct ice_hw *hw) 2590 { 2591 struct ice_aqc_get_link_topo cmd = {}; 2592 u8 node_part_number; 2593 u16 node_handle; 2594 int status; 2595 2596 if (hw->mac_type != ICE_MAC_E810) 2597 return false; 2598 2599 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2600 return true; 2601 2602 cmd.addr.topo_params.node_type_ctx = 2603 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2604 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2605 cmd.addr.topo_params.index = 0; 2606 2607 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2608 &node_handle); 2609 2610 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2611 return false; 2612 2613 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2614 return true; 2615 2616 return false; 2617 } 2618 2619 /** 2620 * ice_is_phy_rclk_in_netlist 2621 * @hw: pointer to the hw struct 2622 * 2623 * Check if the PHY Recovered Clock device is present in the netlist 2624 */ 2625 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2626 { 2627 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2628 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2629 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2630 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2631 return false; 2632 2633 return true; 2634 } 2635 2636 /** 2637 * ice_is_clock_mux_in_netlist 2638 * @hw: pointer to the hw struct 2639 * 2640 * Check if the Clock Multiplexer device is present in the netlist 2641 */ 2642 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2643 { 2644 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2645 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2646 NULL)) 2647 return false; 2648 2649 return true; 2650 } 2651 2652 /** 2653 * ice_is_cgu_in_netlist - check for CGU presence 2654 * @hw: pointer to the hw struct 2655 * 2656 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2657 * Save the CGU part number in the hw structure for later use. 2658 * Return: 2659 * * true - cgu is present 2660 * * false - cgu is not present 2661 */ 2662 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2663 { 2664 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2665 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2666 NULL)) { 2667 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2668 return true; 2669 } else if (!ice_find_netlist_node(hw, 2670 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2671 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2672 NULL)) { 2673 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2674 return true; 2675 } 2676 2677 return false; 2678 } 2679 2680 /** 2681 * ice_is_gps_in_netlist 2682 * @hw: pointer to the hw struct 2683 * 2684 * Check if the GPS generic device is present in the netlist 2685 */ 2686 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2687 { 2688 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2689 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2690 return false; 2691 2692 return true; 2693 } 2694 2695 /** 2696 * ice_aq_list_caps - query function/device capabilities 2697 * @hw: pointer to the HW struct 2698 * @buf: a buffer to hold the capabilities 2699 * @buf_size: size of the buffer 2700 * @cap_count: if not NULL, set to the number of capabilities reported 2701 * @opc: capabilities type to discover, device or function 2702 * @cd: pointer to command details structure or NULL 2703 * 2704 * Get the function (0x000A) or device (0x000B) capabilities description from 2705 * firmware and store it in the buffer. 2706 * 2707 * If the cap_count pointer is not NULL, then it is set to the number of 2708 * capabilities firmware will report. Note that if the buffer size is too 2709 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2710 * cap_count will still be updated in this case. It is recommended that the 2711 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2712 * firmware could return) to avoid this. 2713 */ 2714 int 2715 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2716 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2717 { 2718 struct ice_aqc_list_caps *cmd; 2719 struct ice_aq_desc desc; 2720 int status; 2721 2722 cmd = &desc.params.get_cap; 2723 2724 if (opc != ice_aqc_opc_list_func_caps && 2725 opc != ice_aqc_opc_list_dev_caps) 2726 return -EINVAL; 2727 2728 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2729 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2730 2731 if (cap_count) 2732 *cap_count = le32_to_cpu(cmd->count); 2733 2734 return status; 2735 } 2736 2737 /** 2738 * ice_discover_dev_caps - Read and extract device capabilities 2739 * @hw: pointer to the hardware structure 2740 * @dev_caps: pointer to device capabilities structure 2741 * 2742 * Read the device capabilities and extract them into the dev_caps structure 2743 * for later use. 2744 */ 2745 int 2746 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2747 { 2748 u32 cap_count = 0; 2749 void *cbuf; 2750 int status; 2751 2752 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2753 if (!cbuf) 2754 return -ENOMEM; 2755 2756 /* Although the driver doesn't know the number of capabilities the 2757 * device will return, we can simply send a 4KB buffer, the maximum 2758 * possible size that firmware can return. 2759 */ 2760 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2761 2762 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2763 ice_aqc_opc_list_dev_caps, NULL); 2764 if (!status) 2765 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2766 kfree(cbuf); 2767 2768 return status; 2769 } 2770 2771 /** 2772 * ice_discover_func_caps - Read and extract function capabilities 2773 * @hw: pointer to the hardware structure 2774 * @func_caps: pointer to function capabilities structure 2775 * 2776 * Read the function capabilities and extract them into the func_caps structure 2777 * for later use. 2778 */ 2779 static int 2780 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2781 { 2782 u32 cap_count = 0; 2783 void *cbuf; 2784 int status; 2785 2786 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2787 if (!cbuf) 2788 return -ENOMEM; 2789 2790 /* Although the driver doesn't know the number of capabilities the 2791 * device will return, we can simply send a 4KB buffer, the maximum 2792 * possible size that firmware can return. 2793 */ 2794 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2795 2796 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2797 ice_aqc_opc_list_func_caps, NULL); 2798 if (!status) 2799 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2800 kfree(cbuf); 2801 2802 return status; 2803 } 2804 2805 /** 2806 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2807 * @hw: pointer to the hardware structure 2808 */ 2809 void ice_set_safe_mode_caps(struct ice_hw *hw) 2810 { 2811 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2812 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2813 struct ice_hw_common_caps cached_caps; 2814 u32 num_funcs; 2815 2816 /* cache some func_caps values that should be restored after memset */ 2817 cached_caps = func_caps->common_cap; 2818 2819 /* unset func capabilities */ 2820 memset(func_caps, 0, sizeof(*func_caps)); 2821 2822 #define ICE_RESTORE_FUNC_CAP(name) \ 2823 func_caps->common_cap.name = cached_caps.name 2824 2825 /* restore cached values */ 2826 ICE_RESTORE_FUNC_CAP(valid_functions); 2827 ICE_RESTORE_FUNC_CAP(txq_first_id); 2828 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2829 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2830 ICE_RESTORE_FUNC_CAP(max_mtu); 2831 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2832 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2833 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2834 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2835 2836 /* one Tx and one Rx queue in safe mode */ 2837 func_caps->common_cap.num_rxq = 1; 2838 func_caps->common_cap.num_txq = 1; 2839 2840 /* two MSIX vectors, one for traffic and one for misc causes */ 2841 func_caps->common_cap.num_msix_vectors = 2; 2842 func_caps->guar_num_vsi = 1; 2843 2844 /* cache some dev_caps values that should be restored after memset */ 2845 cached_caps = dev_caps->common_cap; 2846 num_funcs = dev_caps->num_funcs; 2847 2848 /* unset dev capabilities */ 2849 memset(dev_caps, 0, sizeof(*dev_caps)); 2850 2851 #define ICE_RESTORE_DEV_CAP(name) \ 2852 dev_caps->common_cap.name = cached_caps.name 2853 2854 /* restore cached values */ 2855 ICE_RESTORE_DEV_CAP(valid_functions); 2856 ICE_RESTORE_DEV_CAP(txq_first_id); 2857 ICE_RESTORE_DEV_CAP(rxq_first_id); 2858 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2859 ICE_RESTORE_DEV_CAP(max_mtu); 2860 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2861 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2862 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2863 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2864 dev_caps->num_funcs = num_funcs; 2865 2866 /* one Tx and one Rx queue per function in safe mode */ 2867 dev_caps->common_cap.num_rxq = num_funcs; 2868 dev_caps->common_cap.num_txq = num_funcs; 2869 2870 /* two MSIX vectors per function */ 2871 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2872 } 2873 2874 /** 2875 * ice_get_caps - get info about the HW 2876 * @hw: pointer to the hardware structure 2877 */ 2878 int ice_get_caps(struct ice_hw *hw) 2879 { 2880 int status; 2881 2882 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2883 if (status) 2884 return status; 2885 2886 return ice_discover_func_caps(hw, &hw->func_caps); 2887 } 2888 2889 /** 2890 * ice_aq_manage_mac_write - manage MAC address write command 2891 * @hw: pointer to the HW struct 2892 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2893 * @flags: flags to control write behavior 2894 * @cd: pointer to command details structure or NULL 2895 * 2896 * This function is used to write MAC address to the NVM (0x0108). 2897 */ 2898 int 2899 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2900 struct ice_sq_cd *cd) 2901 { 2902 struct ice_aqc_manage_mac_write *cmd; 2903 struct ice_aq_desc desc; 2904 2905 cmd = &desc.params.mac_write; 2906 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2907 2908 cmd->flags = flags; 2909 ether_addr_copy(cmd->mac_addr, mac_addr); 2910 2911 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2912 } 2913 2914 /** 2915 * ice_aq_clear_pxe_mode 2916 * @hw: pointer to the HW struct 2917 * 2918 * Tell the firmware that the driver is taking over from PXE (0x0110). 2919 */ 2920 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2921 { 2922 struct ice_aq_desc desc; 2923 2924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2925 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2926 2927 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2928 } 2929 2930 /** 2931 * ice_clear_pxe_mode - clear pxe operations mode 2932 * @hw: pointer to the HW struct 2933 * 2934 * Make sure all PXE mode settings are cleared, including things 2935 * like descriptor fetch/write-back mode. 2936 */ 2937 void ice_clear_pxe_mode(struct ice_hw *hw) 2938 { 2939 if (ice_check_sq_alive(hw, &hw->adminq)) 2940 ice_aq_clear_pxe_mode(hw); 2941 } 2942 2943 /** 2944 * ice_aq_set_port_params - set physical port parameters. 2945 * @pi: pointer to the port info struct 2946 * @double_vlan: if set double VLAN is enabled 2947 * @cd: pointer to command details structure or NULL 2948 * 2949 * Set Physical port parameters (0x0203) 2950 */ 2951 int 2952 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2953 struct ice_sq_cd *cd) 2954 2955 { 2956 struct ice_aqc_set_port_params *cmd; 2957 struct ice_hw *hw = pi->hw; 2958 struct ice_aq_desc desc; 2959 u16 cmd_flags = 0; 2960 2961 cmd = &desc.params.set_port_params; 2962 2963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2964 if (double_vlan) 2965 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 2966 cmd->cmd_flags = cpu_to_le16(cmd_flags); 2967 2968 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2969 } 2970 2971 /** 2972 * ice_is_100m_speed_supported 2973 * @hw: pointer to the HW struct 2974 * 2975 * returns true if 100M speeds are supported by the device, 2976 * false otherwise. 2977 */ 2978 bool ice_is_100m_speed_supported(struct ice_hw *hw) 2979 { 2980 switch (hw->device_id) { 2981 case ICE_DEV_ID_E822C_SGMII: 2982 case ICE_DEV_ID_E822L_SGMII: 2983 case ICE_DEV_ID_E823L_1GBE: 2984 case ICE_DEV_ID_E823C_SGMII: 2985 return true; 2986 default: 2987 return false; 2988 } 2989 } 2990 2991 /** 2992 * ice_get_link_speed_based_on_phy_type - returns link speed 2993 * @phy_type_low: lower part of phy_type 2994 * @phy_type_high: higher part of phy_type 2995 * 2996 * This helper function will convert an entry in PHY type structure 2997 * [phy_type_low, phy_type_high] to its corresponding link speed. 2998 * Note: In the structure of [phy_type_low, phy_type_high], there should 2999 * be one bit set, as this function will convert one PHY type to its 3000 * speed. 3001 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3002 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3003 */ 3004 static u16 3005 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3006 { 3007 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3008 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3009 3010 switch (phy_type_low) { 3011 case ICE_PHY_TYPE_LOW_100BASE_TX: 3012 case ICE_PHY_TYPE_LOW_100M_SGMII: 3013 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3014 break; 3015 case ICE_PHY_TYPE_LOW_1000BASE_T: 3016 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3017 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3018 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3019 case ICE_PHY_TYPE_LOW_1G_SGMII: 3020 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3021 break; 3022 case ICE_PHY_TYPE_LOW_2500BASE_T: 3023 case ICE_PHY_TYPE_LOW_2500BASE_X: 3024 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3025 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3026 break; 3027 case ICE_PHY_TYPE_LOW_5GBASE_T: 3028 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3029 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3030 break; 3031 case ICE_PHY_TYPE_LOW_10GBASE_T: 3032 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3033 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3034 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3035 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3036 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3037 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3038 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3039 break; 3040 case ICE_PHY_TYPE_LOW_25GBASE_T: 3041 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3042 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3043 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3044 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3045 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3046 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3047 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3048 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3049 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3050 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3051 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3052 break; 3053 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3054 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3055 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3056 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3057 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3058 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3059 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3060 break; 3061 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3062 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3063 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3064 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3065 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3066 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3067 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3068 case ICE_PHY_TYPE_LOW_50G_AUI2: 3069 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3070 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3071 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3072 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3073 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3074 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3075 case ICE_PHY_TYPE_LOW_50G_AUI1: 3076 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3077 break; 3078 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3079 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3080 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3081 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3082 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3083 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3084 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3085 case ICE_PHY_TYPE_LOW_100G_AUI4: 3086 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3087 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3088 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3089 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3090 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3091 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3092 break; 3093 default: 3094 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3095 break; 3096 } 3097 3098 switch (phy_type_high) { 3099 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3100 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3101 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3102 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3103 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3104 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3105 break; 3106 default: 3107 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3108 break; 3109 } 3110 3111 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3112 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3113 return ICE_AQ_LINK_SPEED_UNKNOWN; 3114 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3115 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3116 return ICE_AQ_LINK_SPEED_UNKNOWN; 3117 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3118 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3119 return speed_phy_type_low; 3120 else 3121 return speed_phy_type_high; 3122 } 3123 3124 /** 3125 * ice_update_phy_type 3126 * @phy_type_low: pointer to the lower part of phy_type 3127 * @phy_type_high: pointer to the higher part of phy_type 3128 * @link_speeds_bitmap: targeted link speeds bitmap 3129 * 3130 * Note: For the link_speeds_bitmap structure, you can check it at 3131 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3132 * link_speeds_bitmap include multiple speeds. 3133 * 3134 * Each entry in this [phy_type_low, phy_type_high] structure will 3135 * present a certain link speed. This helper function will turn on bits 3136 * in [phy_type_low, phy_type_high] structure based on the value of 3137 * link_speeds_bitmap input parameter. 3138 */ 3139 void 3140 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3141 u16 link_speeds_bitmap) 3142 { 3143 u64 pt_high; 3144 u64 pt_low; 3145 int index; 3146 u16 speed; 3147 3148 /* We first check with low part of phy_type */ 3149 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3150 pt_low = BIT_ULL(index); 3151 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3152 3153 if (link_speeds_bitmap & speed) 3154 *phy_type_low |= BIT_ULL(index); 3155 } 3156 3157 /* We then check with high part of phy_type */ 3158 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3159 pt_high = BIT_ULL(index); 3160 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3161 3162 if (link_speeds_bitmap & speed) 3163 *phy_type_high |= BIT_ULL(index); 3164 } 3165 } 3166 3167 /** 3168 * ice_aq_set_phy_cfg 3169 * @hw: pointer to the HW struct 3170 * @pi: port info structure of the interested logical port 3171 * @cfg: structure with PHY configuration data to be set 3172 * @cd: pointer to command details structure or NULL 3173 * 3174 * Set the various PHY configuration parameters supported on the Port. 3175 * One or more of the Set PHY config parameters may be ignored in an MFP 3176 * mode as the PF may not have the privilege to set some of the PHY Config 3177 * parameters. This status will be indicated by the command response (0x0601). 3178 */ 3179 int 3180 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3181 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3182 { 3183 struct ice_aq_desc desc; 3184 int status; 3185 3186 if (!cfg) 3187 return -EINVAL; 3188 3189 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3190 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3191 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3192 cfg->caps); 3193 3194 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3195 } 3196 3197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3198 desc.params.set_phy.lport_num = pi->lport; 3199 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3200 3201 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3202 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3203 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3204 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3205 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3206 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3207 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3208 cfg->low_power_ctrl_an); 3209 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3210 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3211 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3212 cfg->link_fec_opt); 3213 3214 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3215 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3216 status = 0; 3217 3218 if (!status) 3219 pi->phy.curr_user_phy_cfg = *cfg; 3220 3221 return status; 3222 } 3223 3224 /** 3225 * ice_update_link_info - update status of the HW network link 3226 * @pi: port info structure of the interested logical port 3227 */ 3228 int ice_update_link_info(struct ice_port_info *pi) 3229 { 3230 struct ice_link_status *li; 3231 int status; 3232 3233 if (!pi) 3234 return -EINVAL; 3235 3236 li = &pi->phy.link_info; 3237 3238 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3239 if (status) 3240 return status; 3241 3242 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3243 struct ice_aqc_get_phy_caps_data *pcaps; 3244 struct ice_hw *hw; 3245 3246 hw = pi->hw; 3247 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3248 GFP_KERNEL); 3249 if (!pcaps) 3250 return -ENOMEM; 3251 3252 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3253 pcaps, NULL); 3254 3255 devm_kfree(ice_hw_to_dev(hw), pcaps); 3256 } 3257 3258 return status; 3259 } 3260 3261 /** 3262 * ice_cache_phy_user_req 3263 * @pi: port information structure 3264 * @cache_data: PHY logging data 3265 * @cache_mode: PHY logging mode 3266 * 3267 * Log the user request on (FC, FEC, SPEED) for later use. 3268 */ 3269 static void 3270 ice_cache_phy_user_req(struct ice_port_info *pi, 3271 struct ice_phy_cache_mode_data cache_data, 3272 enum ice_phy_cache_mode cache_mode) 3273 { 3274 if (!pi) 3275 return; 3276 3277 switch (cache_mode) { 3278 case ICE_FC_MODE: 3279 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3280 break; 3281 case ICE_SPEED_MODE: 3282 pi->phy.curr_user_speed_req = 3283 cache_data.data.curr_user_speed_req; 3284 break; 3285 case ICE_FEC_MODE: 3286 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3287 break; 3288 default: 3289 break; 3290 } 3291 } 3292 3293 /** 3294 * ice_caps_to_fc_mode 3295 * @caps: PHY capabilities 3296 * 3297 * Convert PHY FC capabilities to ice FC mode 3298 */ 3299 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3300 { 3301 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3302 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3303 return ICE_FC_FULL; 3304 3305 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3306 return ICE_FC_TX_PAUSE; 3307 3308 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3309 return ICE_FC_RX_PAUSE; 3310 3311 return ICE_FC_NONE; 3312 } 3313 3314 /** 3315 * ice_caps_to_fec_mode 3316 * @caps: PHY capabilities 3317 * @fec_options: Link FEC options 3318 * 3319 * Convert PHY FEC capabilities to ice FEC mode 3320 */ 3321 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3322 { 3323 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3324 return ICE_FEC_AUTO; 3325 3326 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3327 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3328 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3329 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3330 return ICE_FEC_BASER; 3331 3332 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3333 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3334 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3335 return ICE_FEC_RS; 3336 3337 return ICE_FEC_NONE; 3338 } 3339 3340 /** 3341 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3342 * @pi: port information structure 3343 * @cfg: PHY configuration data to set FC mode 3344 * @req_mode: FC mode to configure 3345 */ 3346 int 3347 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3348 enum ice_fc_mode req_mode) 3349 { 3350 struct ice_phy_cache_mode_data cache_data; 3351 u8 pause_mask = 0x0; 3352 3353 if (!pi || !cfg) 3354 return -EINVAL; 3355 3356 switch (req_mode) { 3357 case ICE_FC_FULL: 3358 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3359 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3360 break; 3361 case ICE_FC_RX_PAUSE: 3362 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3363 break; 3364 case ICE_FC_TX_PAUSE: 3365 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3366 break; 3367 default: 3368 break; 3369 } 3370 3371 /* clear the old pause settings */ 3372 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3373 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3374 3375 /* set the new capabilities */ 3376 cfg->caps |= pause_mask; 3377 3378 /* Cache user FC request */ 3379 cache_data.data.curr_user_fc_req = req_mode; 3380 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3381 3382 return 0; 3383 } 3384 3385 /** 3386 * ice_set_fc 3387 * @pi: port information structure 3388 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3389 * @ena_auto_link_update: enable automatic link update 3390 * 3391 * Set the requested flow control mode. 3392 */ 3393 int 3394 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3395 { 3396 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3397 struct ice_aqc_get_phy_caps_data *pcaps; 3398 struct ice_hw *hw; 3399 int status; 3400 3401 if (!pi || !aq_failures) 3402 return -EINVAL; 3403 3404 *aq_failures = 0; 3405 hw = pi->hw; 3406 3407 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3408 if (!pcaps) 3409 return -ENOMEM; 3410 3411 /* Get the current PHY config */ 3412 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3413 pcaps, NULL); 3414 if (status) { 3415 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3416 goto out; 3417 } 3418 3419 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3420 3421 /* Configure the set PHY data */ 3422 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3423 if (status) 3424 goto out; 3425 3426 /* If the capabilities have changed, then set the new config */ 3427 if (cfg.caps != pcaps->caps) { 3428 int retry_count, retry_max = 10; 3429 3430 /* Auto restart link so settings take effect */ 3431 if (ena_auto_link_update) 3432 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3433 3434 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3435 if (status) { 3436 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3437 goto out; 3438 } 3439 3440 /* Update the link info 3441 * It sometimes takes a really long time for link to 3442 * come back from the atomic reset. Thus, we wait a 3443 * little bit. 3444 */ 3445 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3446 status = ice_update_link_info(pi); 3447 3448 if (!status) 3449 break; 3450 3451 mdelay(100); 3452 } 3453 3454 if (status) 3455 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3456 } 3457 3458 out: 3459 devm_kfree(ice_hw_to_dev(hw), pcaps); 3460 return status; 3461 } 3462 3463 /** 3464 * ice_phy_caps_equals_cfg 3465 * @phy_caps: PHY capabilities 3466 * @phy_cfg: PHY configuration 3467 * 3468 * Helper function to determine if PHY capabilities matches PHY 3469 * configuration 3470 */ 3471 bool 3472 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3473 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3474 { 3475 u8 caps_mask, cfg_mask; 3476 3477 if (!phy_caps || !phy_cfg) 3478 return false; 3479 3480 /* These bits are not common between capabilities and configuration. 3481 * Do not use them to determine equality. 3482 */ 3483 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3484 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3485 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3486 3487 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3488 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3489 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3490 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3491 phy_caps->eee_cap != phy_cfg->eee_cap || 3492 phy_caps->eeer_value != phy_cfg->eeer_value || 3493 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3494 return false; 3495 3496 return true; 3497 } 3498 3499 /** 3500 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3501 * @pi: port information structure 3502 * @caps: PHY ability structure to copy date from 3503 * @cfg: PHY configuration structure to copy data to 3504 * 3505 * Helper function to copy AQC PHY get ability data to PHY set configuration 3506 * data structure 3507 */ 3508 void 3509 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3510 struct ice_aqc_get_phy_caps_data *caps, 3511 struct ice_aqc_set_phy_cfg_data *cfg) 3512 { 3513 if (!pi || !caps || !cfg) 3514 return; 3515 3516 memset(cfg, 0, sizeof(*cfg)); 3517 cfg->phy_type_low = caps->phy_type_low; 3518 cfg->phy_type_high = caps->phy_type_high; 3519 cfg->caps = caps->caps; 3520 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3521 cfg->eee_cap = caps->eee_cap; 3522 cfg->eeer_value = caps->eeer_value; 3523 cfg->link_fec_opt = caps->link_fec_options; 3524 cfg->module_compliance_enforcement = 3525 caps->module_compliance_enforcement; 3526 } 3527 3528 /** 3529 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3530 * @pi: port information structure 3531 * @cfg: PHY configuration data to set FEC mode 3532 * @fec: FEC mode to configure 3533 */ 3534 int 3535 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3536 enum ice_fec_mode fec) 3537 { 3538 struct ice_aqc_get_phy_caps_data *pcaps; 3539 struct ice_hw *hw; 3540 int status; 3541 3542 if (!pi || !cfg) 3543 return -EINVAL; 3544 3545 hw = pi->hw; 3546 3547 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3548 if (!pcaps) 3549 return -ENOMEM; 3550 3551 status = ice_aq_get_phy_caps(pi, false, 3552 (ice_fw_supports_report_dflt_cfg(hw) ? 3553 ICE_AQC_REPORT_DFLT_CFG : 3554 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3555 if (status) 3556 goto out; 3557 3558 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3559 cfg->link_fec_opt = pcaps->link_fec_options; 3560 3561 switch (fec) { 3562 case ICE_FEC_BASER: 3563 /* Clear RS bits, and AND BASE-R ability 3564 * bits and OR request bits. 3565 */ 3566 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3567 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3568 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3569 ICE_AQC_PHY_FEC_25G_KR_REQ; 3570 break; 3571 case ICE_FEC_RS: 3572 /* Clear BASE-R bits, and AND RS ability 3573 * bits and OR request bits. 3574 */ 3575 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3576 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3577 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3578 break; 3579 case ICE_FEC_NONE: 3580 /* Clear all FEC option bits. */ 3581 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3582 break; 3583 case ICE_FEC_AUTO: 3584 /* AND auto FEC bit, and all caps bits. */ 3585 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3586 cfg->link_fec_opt |= pcaps->link_fec_options; 3587 break; 3588 default: 3589 status = -EINVAL; 3590 break; 3591 } 3592 3593 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3594 !ice_fw_supports_report_dflt_cfg(hw)) { 3595 struct ice_link_default_override_tlv tlv = { 0 }; 3596 3597 status = ice_get_link_default_override(&tlv, pi); 3598 if (status) 3599 goto out; 3600 3601 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3602 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3603 cfg->link_fec_opt = tlv.fec_options; 3604 } 3605 3606 out: 3607 kfree(pcaps); 3608 3609 return status; 3610 } 3611 3612 /** 3613 * ice_get_link_status - get status of the HW network link 3614 * @pi: port information structure 3615 * @link_up: pointer to bool (true/false = linkup/linkdown) 3616 * 3617 * Variable link_up is true if link is up, false if link is down. 3618 * The variable link_up is invalid if status is non zero. As a 3619 * result of this call, link status reporting becomes enabled 3620 */ 3621 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3622 { 3623 struct ice_phy_info *phy_info; 3624 int status = 0; 3625 3626 if (!pi || !link_up) 3627 return -EINVAL; 3628 3629 phy_info = &pi->phy; 3630 3631 if (phy_info->get_link_info) { 3632 status = ice_update_link_info(pi); 3633 3634 if (status) 3635 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3636 status); 3637 } 3638 3639 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3640 3641 return status; 3642 } 3643 3644 /** 3645 * ice_aq_set_link_restart_an 3646 * @pi: pointer to the port information structure 3647 * @ena_link: if true: enable link, if false: disable link 3648 * @cd: pointer to command details structure or NULL 3649 * 3650 * Sets up the link and restarts the Auto-Negotiation over the link. 3651 */ 3652 int 3653 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3654 struct ice_sq_cd *cd) 3655 { 3656 struct ice_aqc_restart_an *cmd; 3657 struct ice_aq_desc desc; 3658 3659 cmd = &desc.params.restart_an; 3660 3661 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3662 3663 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3664 cmd->lport_num = pi->lport; 3665 if (ena_link) 3666 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3667 else 3668 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3669 3670 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3671 } 3672 3673 /** 3674 * ice_aq_set_event_mask 3675 * @hw: pointer to the HW struct 3676 * @port_num: port number of the physical function 3677 * @mask: event mask to be set 3678 * @cd: pointer to command details structure or NULL 3679 * 3680 * Set event mask (0x0613) 3681 */ 3682 int 3683 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3684 struct ice_sq_cd *cd) 3685 { 3686 struct ice_aqc_set_event_mask *cmd; 3687 struct ice_aq_desc desc; 3688 3689 cmd = &desc.params.set_event_mask; 3690 3691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3692 3693 cmd->lport_num = port_num; 3694 3695 cmd->event_mask = cpu_to_le16(mask); 3696 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3697 } 3698 3699 /** 3700 * ice_aq_set_mac_loopback 3701 * @hw: pointer to the HW struct 3702 * @ena_lpbk: Enable or Disable loopback 3703 * @cd: pointer to command details structure or NULL 3704 * 3705 * Enable/disable loopback on a given port 3706 */ 3707 int 3708 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3709 { 3710 struct ice_aqc_set_mac_lb *cmd; 3711 struct ice_aq_desc desc; 3712 3713 cmd = &desc.params.set_mac_lb; 3714 3715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3716 if (ena_lpbk) 3717 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3718 3719 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3720 } 3721 3722 /** 3723 * ice_aq_set_port_id_led 3724 * @pi: pointer to the port information 3725 * @is_orig_mode: is this LED set to original mode (by the net-list) 3726 * @cd: pointer to command details structure or NULL 3727 * 3728 * Set LED value for the given port (0x06e9) 3729 */ 3730 int 3731 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3732 struct ice_sq_cd *cd) 3733 { 3734 struct ice_aqc_set_port_id_led *cmd; 3735 struct ice_hw *hw = pi->hw; 3736 struct ice_aq_desc desc; 3737 3738 cmd = &desc.params.set_port_id_led; 3739 3740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3741 3742 if (is_orig_mode) 3743 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3744 else 3745 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3746 3747 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3748 } 3749 3750 /** 3751 * ice_aq_get_port_options 3752 * @hw: pointer to the HW struct 3753 * @options: buffer for the resultant port options 3754 * @option_count: input - size of the buffer in port options structures, 3755 * output - number of returned port options 3756 * @lport: logical port to call the command with (optional) 3757 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3758 * when PF owns more than 1 port it must be true 3759 * @active_option_idx: index of active port option in returned buffer 3760 * @active_option_valid: active option in returned buffer is valid 3761 * @pending_option_idx: index of pending port option in returned buffer 3762 * @pending_option_valid: pending option in returned buffer is valid 3763 * 3764 * Calls Get Port Options AQC (0x06ea) and verifies result. 3765 */ 3766 int 3767 ice_aq_get_port_options(struct ice_hw *hw, 3768 struct ice_aqc_get_port_options_elem *options, 3769 u8 *option_count, u8 lport, bool lport_valid, 3770 u8 *active_option_idx, bool *active_option_valid, 3771 u8 *pending_option_idx, bool *pending_option_valid) 3772 { 3773 struct ice_aqc_get_port_options *cmd; 3774 struct ice_aq_desc desc; 3775 int status; 3776 u8 i; 3777 3778 /* options buffer shall be able to hold max returned options */ 3779 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3780 return -EINVAL; 3781 3782 cmd = &desc.params.get_port_options; 3783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3784 3785 if (lport_valid) 3786 cmd->lport_num = lport; 3787 cmd->lport_num_valid = lport_valid; 3788 3789 status = ice_aq_send_cmd(hw, &desc, options, 3790 *option_count * sizeof(*options), NULL); 3791 if (status) 3792 return status; 3793 3794 /* verify direct FW response & set output parameters */ 3795 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3796 cmd->port_options_count); 3797 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3798 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3799 cmd->port_options); 3800 if (*active_option_valid) { 3801 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3802 cmd->port_options); 3803 if (*active_option_idx > (*option_count - 1)) 3804 return -EIO; 3805 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3806 *active_option_idx); 3807 } 3808 3809 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3810 cmd->pending_port_option_status); 3811 if (*pending_option_valid) { 3812 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3813 cmd->pending_port_option_status); 3814 if (*pending_option_idx > (*option_count - 1)) 3815 return -EIO; 3816 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3817 *pending_option_idx); 3818 } 3819 3820 /* mask output options fields */ 3821 for (i = 0; i < *option_count; i++) { 3822 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3823 options[i].pmd); 3824 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3825 options[i].max_lane_speed); 3826 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3827 options[i].pmd, options[i].max_lane_speed); 3828 } 3829 3830 return 0; 3831 } 3832 3833 /** 3834 * ice_aq_set_port_option 3835 * @hw: pointer to the HW struct 3836 * @lport: logical port to call the command with 3837 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3838 * when PF owns more than 1 port it must be true 3839 * @new_option: new port option to be written 3840 * 3841 * Calls Set Port Options AQC (0x06eb). 3842 */ 3843 int 3844 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3845 u8 new_option) 3846 { 3847 struct ice_aqc_set_port_option *cmd; 3848 struct ice_aq_desc desc; 3849 3850 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3851 return -EINVAL; 3852 3853 cmd = &desc.params.set_port_option; 3854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3855 3856 if (lport_valid) 3857 cmd->lport_num = lport; 3858 3859 cmd->lport_num_valid = lport_valid; 3860 cmd->selected_port_option = new_option; 3861 3862 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3863 } 3864 3865 /** 3866 * ice_aq_sff_eeprom 3867 * @hw: pointer to the HW struct 3868 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3869 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3870 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3871 * @page: QSFP page 3872 * @set_page: set or ignore the page 3873 * @data: pointer to data buffer to be read/written to the I2C device. 3874 * @length: 1-16 for read, 1 for write. 3875 * @write: 0 read, 1 for write. 3876 * @cd: pointer to command details structure or NULL 3877 * 3878 * Read/Write SFF EEPROM (0x06EE) 3879 */ 3880 int 3881 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3882 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3883 bool write, struct ice_sq_cd *cd) 3884 { 3885 struct ice_aqc_sff_eeprom *cmd; 3886 struct ice_aq_desc desc; 3887 u16 i2c_bus_addr; 3888 int status; 3889 3890 if (!data || (mem_addr & 0xff00)) 3891 return -EINVAL; 3892 3893 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3894 cmd = &desc.params.read_write_sff_param; 3895 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3896 cmd->lport_num = (u8)(lport & 0xff); 3897 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3898 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | 3899 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); 3900 if (write) 3901 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; 3902 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); 3903 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3904 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); 3905 3906 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3907 return status; 3908 } 3909 3910 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3911 { 3912 switch (type) { 3913 case ICE_LUT_VSI: 3914 return ICE_LUT_VSI_SIZE; 3915 case ICE_LUT_GLOBAL: 3916 return ICE_LUT_GLOBAL_SIZE; 3917 case ICE_LUT_PF: 3918 return ICE_LUT_PF_SIZE; 3919 } 3920 WARN_ONCE(1, "incorrect type passed"); 3921 return ICE_LUT_VSI_SIZE; 3922 } 3923 3924 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3925 { 3926 switch (size) { 3927 case ICE_LUT_VSI_SIZE: 3928 return ICE_AQC_LUT_SIZE_SMALL; 3929 case ICE_LUT_GLOBAL_SIZE: 3930 return ICE_AQC_LUT_SIZE_512; 3931 case ICE_LUT_PF_SIZE: 3932 return ICE_AQC_LUT_SIZE_2K; 3933 } 3934 WARN_ONCE(1, "incorrect size passed"); 3935 return 0; 3936 } 3937 3938 /** 3939 * __ice_aq_get_set_rss_lut 3940 * @hw: pointer to the hardware structure 3941 * @params: RSS LUT parameters 3942 * @set: set true to set the table, false to get the table 3943 * 3944 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3945 */ 3946 static int 3947 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3948 struct ice_aq_get_set_rss_lut_params *params, bool set) 3949 { 3950 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3951 enum ice_lut_type lut_type = params->lut_type; 3952 struct ice_aqc_get_set_rss_lut *desc_params; 3953 enum ice_aqc_lut_flags flags; 3954 enum ice_lut_size lut_size; 3955 struct ice_aq_desc desc; 3956 u8 *lut = params->lut; 3957 3958 3959 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 3960 return -EINVAL; 3961 3962 lut_size = ice_lut_type_to_size(lut_type); 3963 if (lut_size > params->lut_size) 3964 return -EINVAL; 3965 else if (set && lut_size != params->lut_size) 3966 return -EINVAL; 3967 3968 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 3969 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 3970 if (set) 3971 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3972 3973 desc_params = &desc.params.get_set_rss_lut; 3974 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3975 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 3976 3977 if (lut_type == ICE_LUT_GLOBAL) 3978 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 3979 params->global_lut_id); 3980 3981 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 3982 desc_params->flags = cpu_to_le16(flags); 3983 3984 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3985 } 3986 3987 /** 3988 * ice_aq_get_rss_lut 3989 * @hw: pointer to the hardware structure 3990 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3991 * 3992 * get the RSS lookup table, PF or VSI type 3993 */ 3994 int 3995 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3996 { 3997 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3998 } 3999 4000 /** 4001 * ice_aq_set_rss_lut 4002 * @hw: pointer to the hardware structure 4003 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4004 * 4005 * set the RSS lookup table, PF or VSI type 4006 */ 4007 int 4008 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4009 { 4010 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4011 } 4012 4013 /** 4014 * __ice_aq_get_set_rss_key 4015 * @hw: pointer to the HW struct 4016 * @vsi_id: VSI FW index 4017 * @key: pointer to key info struct 4018 * @set: set true to set the key, false to get the key 4019 * 4020 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4021 */ 4022 static int 4023 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4024 struct ice_aqc_get_set_rss_keys *key, bool set) 4025 { 4026 struct ice_aqc_get_set_rss_key *desc_params; 4027 u16 key_size = sizeof(*key); 4028 struct ice_aq_desc desc; 4029 4030 if (set) { 4031 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4032 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4033 } else { 4034 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4035 } 4036 4037 desc_params = &desc.params.get_set_rss_key; 4038 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4039 4040 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4041 } 4042 4043 /** 4044 * ice_aq_get_rss_key 4045 * @hw: pointer to the HW struct 4046 * @vsi_handle: software VSI handle 4047 * @key: pointer to key info struct 4048 * 4049 * get the RSS key per VSI 4050 */ 4051 int 4052 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4053 struct ice_aqc_get_set_rss_keys *key) 4054 { 4055 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4056 return -EINVAL; 4057 4058 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4059 key, false); 4060 } 4061 4062 /** 4063 * ice_aq_set_rss_key 4064 * @hw: pointer to the HW struct 4065 * @vsi_handle: software VSI handle 4066 * @keys: pointer to key info struct 4067 * 4068 * set the RSS key per VSI 4069 */ 4070 int 4071 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4072 struct ice_aqc_get_set_rss_keys *keys) 4073 { 4074 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4075 return -EINVAL; 4076 4077 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4078 keys, true); 4079 } 4080 4081 /** 4082 * ice_aq_add_lan_txq 4083 * @hw: pointer to the hardware structure 4084 * @num_qgrps: Number of added queue groups 4085 * @qg_list: list of queue groups to be added 4086 * @buf_size: size of buffer for indirect command 4087 * @cd: pointer to command details structure or NULL 4088 * 4089 * Add Tx LAN queue (0x0C30) 4090 * 4091 * NOTE: 4092 * Prior to calling add Tx LAN queue: 4093 * Initialize the following as part of the Tx queue context: 4094 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4095 * Cache profile and Packet shaper profile. 4096 * 4097 * After add Tx LAN queue AQ command is completed: 4098 * Interrupts should be associated with specific queues, 4099 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4100 * flow. 4101 */ 4102 static int 4103 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4104 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4105 struct ice_sq_cd *cd) 4106 { 4107 struct ice_aqc_add_tx_qgrp *list; 4108 struct ice_aqc_add_txqs *cmd; 4109 struct ice_aq_desc desc; 4110 u16 i, sum_size = 0; 4111 4112 cmd = &desc.params.add_txqs; 4113 4114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4115 4116 if (!qg_list) 4117 return -EINVAL; 4118 4119 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4120 return -EINVAL; 4121 4122 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4123 sum_size += struct_size(list, txqs, list->num_txqs); 4124 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4125 list->num_txqs); 4126 } 4127 4128 if (buf_size != sum_size) 4129 return -EINVAL; 4130 4131 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4132 4133 cmd->num_qgrps = num_qgrps; 4134 4135 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4136 } 4137 4138 /** 4139 * ice_aq_dis_lan_txq 4140 * @hw: pointer to the hardware structure 4141 * @num_qgrps: number of groups in the list 4142 * @qg_list: the list of groups to disable 4143 * @buf_size: the total size of the qg_list buffer in bytes 4144 * @rst_src: if called due to reset, specifies the reset source 4145 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4146 * @cd: pointer to command details structure or NULL 4147 * 4148 * Disable LAN Tx queue (0x0C31) 4149 */ 4150 static int 4151 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4152 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4153 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4154 struct ice_sq_cd *cd) 4155 { 4156 struct ice_aqc_dis_txq_item *item; 4157 struct ice_aqc_dis_txqs *cmd; 4158 struct ice_aq_desc desc; 4159 u16 vmvf_and_timeout; 4160 u16 i, sz = 0; 4161 int status; 4162 4163 cmd = &desc.params.dis_txqs; 4164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4165 4166 /* qg_list can be NULL only in VM/VF reset flow */ 4167 if (!qg_list && !rst_src) 4168 return -EINVAL; 4169 4170 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4171 return -EINVAL; 4172 4173 cmd->num_entries = num_qgrps; 4174 4175 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); 4176 4177 switch (rst_src) { 4178 case ICE_VM_RESET: 4179 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4180 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; 4181 break; 4182 case ICE_VF_RESET: 4183 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4184 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4185 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & 4186 ICE_AQC_Q_DIS_VMVF_NUM_M; 4187 break; 4188 case ICE_NO_RESET: 4189 default: 4190 break; 4191 } 4192 4193 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); 4194 4195 /* flush pipe on time out */ 4196 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4197 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4198 if (!qg_list) 4199 goto do_aq; 4200 4201 /* set RD bit to indicate that command buffer is provided by the driver 4202 * and it needs to be read by the firmware 4203 */ 4204 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4205 4206 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4207 u16 item_size = struct_size(item, q_id, item->num_qs); 4208 4209 /* If the num of queues is even, add 2 bytes of padding */ 4210 if ((item->num_qs % 2) == 0) 4211 item_size += 2; 4212 4213 sz += item_size; 4214 4215 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4216 } 4217 4218 if (buf_size != sz) 4219 return -EINVAL; 4220 4221 do_aq: 4222 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4223 if (status) { 4224 if (!qg_list) 4225 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4226 vmvf_num, hw->adminq.sq_last_status); 4227 else 4228 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4229 le16_to_cpu(qg_list[0].q_id[0]), 4230 hw->adminq.sq_last_status); 4231 } 4232 return status; 4233 } 4234 4235 /** 4236 * ice_aq_cfg_lan_txq 4237 * @hw: pointer to the hardware structure 4238 * @buf: buffer for command 4239 * @buf_size: size of buffer in bytes 4240 * @num_qs: number of queues being configured 4241 * @oldport: origination lport 4242 * @newport: destination lport 4243 * @cd: pointer to command details structure or NULL 4244 * 4245 * Move/Configure LAN Tx queue (0x0C32) 4246 * 4247 * There is a better AQ command to use for moving nodes, so only coding 4248 * this one for configuring the node. 4249 */ 4250 int 4251 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4252 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4253 struct ice_sq_cd *cd) 4254 { 4255 struct ice_aqc_cfg_txqs *cmd; 4256 struct ice_aq_desc desc; 4257 int status; 4258 4259 cmd = &desc.params.cfg_txqs; 4260 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4261 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4262 4263 if (!buf) 4264 return -EINVAL; 4265 4266 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4267 cmd->num_qs = num_qs; 4268 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4269 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); 4270 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); 4271 cmd->blocked_cgds = 0; 4272 4273 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4274 if (status) 4275 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4276 hw->adminq.sq_last_status); 4277 return status; 4278 } 4279 4280 /** 4281 * ice_aq_add_rdma_qsets 4282 * @hw: pointer to the hardware structure 4283 * @num_qset_grps: Number of RDMA Qset groups 4284 * @qset_list: list of Qset groups to be added 4285 * @buf_size: size of buffer for indirect command 4286 * @cd: pointer to command details structure or NULL 4287 * 4288 * Add Tx RDMA Qsets (0x0C33) 4289 */ 4290 static int 4291 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4292 struct ice_aqc_add_rdma_qset_data *qset_list, 4293 u16 buf_size, struct ice_sq_cd *cd) 4294 { 4295 struct ice_aqc_add_rdma_qset_data *list; 4296 struct ice_aqc_add_rdma_qset *cmd; 4297 struct ice_aq_desc desc; 4298 u16 i, sum_size = 0; 4299 4300 cmd = &desc.params.add_rdma_qset; 4301 4302 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4303 4304 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4305 return -EINVAL; 4306 4307 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4308 u16 num_qsets = le16_to_cpu(list->num_qsets); 4309 4310 sum_size += struct_size(list, rdma_qsets, num_qsets); 4311 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4312 num_qsets); 4313 } 4314 4315 if (buf_size != sum_size) 4316 return -EINVAL; 4317 4318 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4319 4320 cmd->num_qset_grps = num_qset_grps; 4321 4322 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4323 } 4324 4325 /* End of FW Admin Queue command wrappers */ 4326 4327 /** 4328 * ice_write_byte - write a byte to a packed context structure 4329 * @src_ctx: the context structure to read from 4330 * @dest_ctx: the context to be written to 4331 * @ce_info: a description of the struct to be filled 4332 */ 4333 static void 4334 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4335 { 4336 u8 src_byte, dest_byte, mask; 4337 u8 *from, *dest; 4338 u16 shift_width; 4339 4340 /* copy from the next struct field */ 4341 from = src_ctx + ce_info->offset; 4342 4343 /* prepare the bits and mask */ 4344 shift_width = ce_info->lsb % 8; 4345 mask = (u8)(BIT(ce_info->width) - 1); 4346 4347 src_byte = *from; 4348 src_byte &= mask; 4349 4350 /* shift to correct alignment */ 4351 mask <<= shift_width; 4352 src_byte <<= shift_width; 4353 4354 /* get the current bits from the target bit string */ 4355 dest = dest_ctx + (ce_info->lsb / 8); 4356 4357 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4358 4359 dest_byte &= ~mask; /* get the bits not changing */ 4360 dest_byte |= src_byte; /* add in the new bits */ 4361 4362 /* put it all back */ 4363 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4364 } 4365 4366 /** 4367 * ice_write_word - write a word to a packed context structure 4368 * @src_ctx: the context structure to read from 4369 * @dest_ctx: the context to be written to 4370 * @ce_info: a description of the struct to be filled 4371 */ 4372 static void 4373 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4374 { 4375 u16 src_word, mask; 4376 __le16 dest_word; 4377 u8 *from, *dest; 4378 u16 shift_width; 4379 4380 /* copy from the next struct field */ 4381 from = src_ctx + ce_info->offset; 4382 4383 /* prepare the bits and mask */ 4384 shift_width = ce_info->lsb % 8; 4385 mask = BIT(ce_info->width) - 1; 4386 4387 /* don't swizzle the bits until after the mask because the mask bits 4388 * will be in a different bit position on big endian machines 4389 */ 4390 src_word = *(u16 *)from; 4391 src_word &= mask; 4392 4393 /* shift to correct alignment */ 4394 mask <<= shift_width; 4395 src_word <<= shift_width; 4396 4397 /* get the current bits from the target bit string */ 4398 dest = dest_ctx + (ce_info->lsb / 8); 4399 4400 memcpy(&dest_word, dest, sizeof(dest_word)); 4401 4402 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4403 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4404 4405 /* put it all back */ 4406 memcpy(dest, &dest_word, sizeof(dest_word)); 4407 } 4408 4409 /** 4410 * ice_write_dword - write a dword to a packed context structure 4411 * @src_ctx: the context structure to read from 4412 * @dest_ctx: the context to be written to 4413 * @ce_info: a description of the struct to be filled 4414 */ 4415 static void 4416 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4417 { 4418 u32 src_dword, mask; 4419 __le32 dest_dword; 4420 u8 *from, *dest; 4421 u16 shift_width; 4422 4423 /* copy from the next struct field */ 4424 from = src_ctx + ce_info->offset; 4425 4426 /* prepare the bits and mask */ 4427 shift_width = ce_info->lsb % 8; 4428 4429 /* if the field width is exactly 32 on an x86 machine, then the shift 4430 * operation will not work because the SHL instructions count is masked 4431 * to 5 bits so the shift will do nothing 4432 */ 4433 if (ce_info->width < 32) 4434 mask = BIT(ce_info->width) - 1; 4435 else 4436 mask = (u32)~0; 4437 4438 /* don't swizzle the bits until after the mask because the mask bits 4439 * will be in a different bit position on big endian machines 4440 */ 4441 src_dword = *(u32 *)from; 4442 src_dword &= mask; 4443 4444 /* shift to correct alignment */ 4445 mask <<= shift_width; 4446 src_dword <<= shift_width; 4447 4448 /* get the current bits from the target bit string */ 4449 dest = dest_ctx + (ce_info->lsb / 8); 4450 4451 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4452 4453 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4454 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4455 4456 /* put it all back */ 4457 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4458 } 4459 4460 /** 4461 * ice_write_qword - write a qword to a packed context structure 4462 * @src_ctx: the context structure to read from 4463 * @dest_ctx: the context to be written to 4464 * @ce_info: a description of the struct to be filled 4465 */ 4466 static void 4467 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4468 { 4469 u64 src_qword, mask; 4470 __le64 dest_qword; 4471 u8 *from, *dest; 4472 u16 shift_width; 4473 4474 /* copy from the next struct field */ 4475 from = src_ctx + ce_info->offset; 4476 4477 /* prepare the bits and mask */ 4478 shift_width = ce_info->lsb % 8; 4479 4480 /* if the field width is exactly 64 on an x86 machine, then the shift 4481 * operation will not work because the SHL instructions count is masked 4482 * to 6 bits so the shift will do nothing 4483 */ 4484 if (ce_info->width < 64) 4485 mask = BIT_ULL(ce_info->width) - 1; 4486 else 4487 mask = (u64)~0; 4488 4489 /* don't swizzle the bits until after the mask because the mask bits 4490 * will be in a different bit position on big endian machines 4491 */ 4492 src_qword = *(u64 *)from; 4493 src_qword &= mask; 4494 4495 /* shift to correct alignment */ 4496 mask <<= shift_width; 4497 src_qword <<= shift_width; 4498 4499 /* get the current bits from the target bit string */ 4500 dest = dest_ctx + (ce_info->lsb / 8); 4501 4502 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4503 4504 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4505 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4506 4507 /* put it all back */ 4508 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4509 } 4510 4511 /** 4512 * ice_set_ctx - set context bits in packed structure 4513 * @hw: pointer to the hardware structure 4514 * @src_ctx: pointer to a generic non-packed context structure 4515 * @dest_ctx: pointer to memory for the packed structure 4516 * @ce_info: a description of the structure to be transformed 4517 */ 4518 int 4519 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4520 const struct ice_ctx_ele *ce_info) 4521 { 4522 int f; 4523 4524 for (f = 0; ce_info[f].width; f++) { 4525 /* We have to deal with each element of the FW response 4526 * using the correct size so that we are correct regardless 4527 * of the endianness of the machine. 4528 */ 4529 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4530 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4531 f, ce_info[f].width, ce_info[f].size_of); 4532 continue; 4533 } 4534 switch (ce_info[f].size_of) { 4535 case sizeof(u8): 4536 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4537 break; 4538 case sizeof(u16): 4539 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4540 break; 4541 case sizeof(u32): 4542 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4543 break; 4544 case sizeof(u64): 4545 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4546 break; 4547 default: 4548 return -EINVAL; 4549 } 4550 } 4551 4552 return 0; 4553 } 4554 4555 /** 4556 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4557 * @hw: pointer to the HW struct 4558 * @vsi_handle: software VSI handle 4559 * @tc: TC number 4560 * @q_handle: software queue handle 4561 */ 4562 struct ice_q_ctx * 4563 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4564 { 4565 struct ice_vsi_ctx *vsi; 4566 struct ice_q_ctx *q_ctx; 4567 4568 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4569 if (!vsi) 4570 return NULL; 4571 if (q_handle >= vsi->num_lan_q_entries[tc]) 4572 return NULL; 4573 if (!vsi->lan_q_ctx[tc]) 4574 return NULL; 4575 q_ctx = vsi->lan_q_ctx[tc]; 4576 return &q_ctx[q_handle]; 4577 } 4578 4579 /** 4580 * ice_ena_vsi_txq 4581 * @pi: port information structure 4582 * @vsi_handle: software VSI handle 4583 * @tc: TC number 4584 * @q_handle: software queue handle 4585 * @num_qgrps: Number of added queue groups 4586 * @buf: list of queue groups to be added 4587 * @buf_size: size of buffer for indirect command 4588 * @cd: pointer to command details structure or NULL 4589 * 4590 * This function adds one LAN queue 4591 */ 4592 int 4593 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4594 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4595 struct ice_sq_cd *cd) 4596 { 4597 struct ice_aqc_txsched_elem_data node = { 0 }; 4598 struct ice_sched_node *parent; 4599 struct ice_q_ctx *q_ctx; 4600 struct ice_hw *hw; 4601 int status; 4602 4603 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4604 return -EIO; 4605 4606 if (num_qgrps > 1 || buf->num_txqs > 1) 4607 return -ENOSPC; 4608 4609 hw = pi->hw; 4610 4611 if (!ice_is_vsi_valid(hw, vsi_handle)) 4612 return -EINVAL; 4613 4614 mutex_lock(&pi->sched_lock); 4615 4616 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4617 if (!q_ctx) { 4618 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4619 q_handle); 4620 status = -EINVAL; 4621 goto ena_txq_exit; 4622 } 4623 4624 /* find a parent node */ 4625 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4626 ICE_SCHED_NODE_OWNER_LAN); 4627 if (!parent) { 4628 status = -EINVAL; 4629 goto ena_txq_exit; 4630 } 4631 4632 buf->parent_teid = parent->info.node_teid; 4633 node.parent_teid = parent->info.node_teid; 4634 /* Mark that the values in the "generic" section as valid. The default 4635 * value in the "generic" section is zero. This means that : 4636 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4637 * - 0 priority among siblings, indicated by Bit 1-3. 4638 * - WFQ, indicated by Bit 4. 4639 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4640 * Bit 5-6. 4641 * - Bit 7 is reserved. 4642 * Without setting the generic section as valid in valid_sections, the 4643 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4644 */ 4645 buf->txqs[0].info.valid_sections = 4646 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4647 ICE_AQC_ELEM_VALID_EIR; 4648 buf->txqs[0].info.generic = 0; 4649 buf->txqs[0].info.cir_bw.bw_profile_idx = 4650 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4651 buf->txqs[0].info.cir_bw.bw_alloc = 4652 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4653 buf->txqs[0].info.eir_bw.bw_profile_idx = 4654 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4655 buf->txqs[0].info.eir_bw.bw_alloc = 4656 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4657 4658 /* add the LAN queue */ 4659 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4660 if (status) { 4661 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4662 le16_to_cpu(buf->txqs[0].txq_id), 4663 hw->adminq.sq_last_status); 4664 goto ena_txq_exit; 4665 } 4666 4667 node.node_teid = buf->txqs[0].q_teid; 4668 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4669 q_ctx->q_handle = q_handle; 4670 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4671 4672 /* add a leaf node into scheduler tree queue layer */ 4673 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4674 if (!status) 4675 status = ice_sched_replay_q_bw(pi, q_ctx); 4676 4677 ena_txq_exit: 4678 mutex_unlock(&pi->sched_lock); 4679 return status; 4680 } 4681 4682 /** 4683 * ice_dis_vsi_txq 4684 * @pi: port information structure 4685 * @vsi_handle: software VSI handle 4686 * @tc: TC number 4687 * @num_queues: number of queues 4688 * @q_handles: pointer to software queue handle array 4689 * @q_ids: pointer to the q_id array 4690 * @q_teids: pointer to queue node teids 4691 * @rst_src: if called due to reset, specifies the reset source 4692 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4693 * @cd: pointer to command details structure or NULL 4694 * 4695 * This function removes queues and their corresponding nodes in SW DB 4696 */ 4697 int 4698 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4699 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4700 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4701 struct ice_sq_cd *cd) 4702 { 4703 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4704 u16 i, buf_size = __struct_size(qg_list); 4705 struct ice_q_ctx *q_ctx; 4706 int status = -ENOENT; 4707 struct ice_hw *hw; 4708 4709 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4710 return -EIO; 4711 4712 hw = pi->hw; 4713 4714 if (!num_queues) { 4715 /* if queue is disabled already yet the disable queue command 4716 * has to be sent to complete the VF reset, then call 4717 * ice_aq_dis_lan_txq without any queue information 4718 */ 4719 if (rst_src) 4720 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4721 vmvf_num, NULL); 4722 return -EIO; 4723 } 4724 4725 mutex_lock(&pi->sched_lock); 4726 4727 for (i = 0; i < num_queues; i++) { 4728 struct ice_sched_node *node; 4729 4730 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4731 if (!node) 4732 continue; 4733 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4734 if (!q_ctx) { 4735 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4736 q_handles[i]); 4737 continue; 4738 } 4739 if (q_ctx->q_handle != q_handles[i]) { 4740 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4741 q_ctx->q_handle, q_handles[i]); 4742 continue; 4743 } 4744 qg_list->parent_teid = node->info.parent_teid; 4745 qg_list->num_qs = 1; 4746 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4747 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4748 vmvf_num, cd); 4749 4750 if (status) 4751 break; 4752 ice_free_sched_node(pi, node); 4753 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4754 q_ctx->q_teid = ICE_INVAL_TEID; 4755 } 4756 mutex_unlock(&pi->sched_lock); 4757 return status; 4758 } 4759 4760 /** 4761 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4762 * @pi: port information structure 4763 * @vsi_handle: software VSI handle 4764 * @tc_bitmap: TC bitmap 4765 * @maxqs: max queues array per TC 4766 * @owner: LAN or RDMA 4767 * 4768 * This function adds/updates the VSI queues per TC. 4769 */ 4770 static int 4771 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4772 u16 *maxqs, u8 owner) 4773 { 4774 int status = 0; 4775 u8 i; 4776 4777 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4778 return -EIO; 4779 4780 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4781 return -EINVAL; 4782 4783 mutex_lock(&pi->sched_lock); 4784 4785 ice_for_each_traffic_class(i) { 4786 /* configuration is possible only if TC node is present */ 4787 if (!ice_sched_get_tc_node(pi, i)) 4788 continue; 4789 4790 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4791 ice_is_tc_ena(tc_bitmap, i)); 4792 if (status) 4793 break; 4794 } 4795 4796 mutex_unlock(&pi->sched_lock); 4797 return status; 4798 } 4799 4800 /** 4801 * ice_cfg_vsi_lan - configure VSI LAN queues 4802 * @pi: port information structure 4803 * @vsi_handle: software VSI handle 4804 * @tc_bitmap: TC bitmap 4805 * @max_lanqs: max LAN queues array per TC 4806 * 4807 * This function adds/updates the VSI LAN queues per TC. 4808 */ 4809 int 4810 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4811 u16 *max_lanqs) 4812 { 4813 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4814 ICE_SCHED_NODE_OWNER_LAN); 4815 } 4816 4817 /** 4818 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4819 * @pi: port information structure 4820 * @vsi_handle: software VSI handle 4821 * @tc_bitmap: TC bitmap 4822 * @max_rdmaqs: max RDMA queues array per TC 4823 * 4824 * This function adds/updates the VSI RDMA queues per TC. 4825 */ 4826 int 4827 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4828 u16 *max_rdmaqs) 4829 { 4830 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4831 ICE_SCHED_NODE_OWNER_RDMA); 4832 } 4833 4834 /** 4835 * ice_ena_vsi_rdma_qset 4836 * @pi: port information structure 4837 * @vsi_handle: software VSI handle 4838 * @tc: TC number 4839 * @rdma_qset: pointer to RDMA Qset 4840 * @num_qsets: number of RDMA Qsets 4841 * @qset_teid: pointer to Qset node TEIDs 4842 * 4843 * This function adds RDMA Qset 4844 */ 4845 int 4846 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4847 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4848 { 4849 struct ice_aqc_txsched_elem_data node = { 0 }; 4850 struct ice_aqc_add_rdma_qset_data *buf; 4851 struct ice_sched_node *parent; 4852 struct ice_hw *hw; 4853 u16 i, buf_size; 4854 int ret; 4855 4856 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4857 return -EIO; 4858 hw = pi->hw; 4859 4860 if (!ice_is_vsi_valid(hw, vsi_handle)) 4861 return -EINVAL; 4862 4863 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4864 buf = kzalloc(buf_size, GFP_KERNEL); 4865 if (!buf) 4866 return -ENOMEM; 4867 mutex_lock(&pi->sched_lock); 4868 4869 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4870 ICE_SCHED_NODE_OWNER_RDMA); 4871 if (!parent) { 4872 ret = -EINVAL; 4873 goto rdma_error_exit; 4874 } 4875 buf->parent_teid = parent->info.node_teid; 4876 node.parent_teid = parent->info.node_teid; 4877 4878 buf->num_qsets = cpu_to_le16(num_qsets); 4879 for (i = 0; i < num_qsets; i++) { 4880 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4881 buf->rdma_qsets[i].info.valid_sections = 4882 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4883 ICE_AQC_ELEM_VALID_EIR; 4884 buf->rdma_qsets[i].info.generic = 0; 4885 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4886 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4887 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4888 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4889 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4890 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4891 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4892 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4893 } 4894 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4895 if (ret) { 4896 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4897 goto rdma_error_exit; 4898 } 4899 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4900 for (i = 0; i < num_qsets; i++) { 4901 node.node_teid = buf->rdma_qsets[i].qset_teid; 4902 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4903 &node, NULL); 4904 if (ret) 4905 break; 4906 qset_teid[i] = le32_to_cpu(node.node_teid); 4907 } 4908 rdma_error_exit: 4909 mutex_unlock(&pi->sched_lock); 4910 kfree(buf); 4911 return ret; 4912 } 4913 4914 /** 4915 * ice_dis_vsi_rdma_qset - free RDMA resources 4916 * @pi: port_info struct 4917 * @count: number of RDMA Qsets to free 4918 * @qset_teid: TEID of Qset node 4919 * @q_id: list of queue IDs being disabled 4920 */ 4921 int 4922 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4923 u16 *q_id) 4924 { 4925 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4926 u16 qg_size = __struct_size(qg_list); 4927 struct ice_hw *hw; 4928 int status = 0; 4929 int i; 4930 4931 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4932 return -EIO; 4933 4934 hw = pi->hw; 4935 4936 mutex_lock(&pi->sched_lock); 4937 4938 for (i = 0; i < count; i++) { 4939 struct ice_sched_node *node; 4940 4941 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4942 if (!node) 4943 continue; 4944 4945 qg_list->parent_teid = node->info.parent_teid; 4946 qg_list->num_qs = 1; 4947 qg_list->q_id[0] = 4948 cpu_to_le16(q_id[i] | 4949 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4950 4951 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4952 ICE_NO_RESET, 0, NULL); 4953 if (status) 4954 break; 4955 4956 ice_free_sched_node(pi, node); 4957 } 4958 4959 mutex_unlock(&pi->sched_lock); 4960 return status; 4961 } 4962 4963 /** 4964 * ice_aq_get_cgu_abilities - get cgu abilities 4965 * @hw: pointer to the HW struct 4966 * @abilities: CGU abilities 4967 * 4968 * Get CGU abilities (0x0C61) 4969 * Return: 0 on success or negative value on failure. 4970 */ 4971 int 4972 ice_aq_get_cgu_abilities(struct ice_hw *hw, 4973 struct ice_aqc_get_cgu_abilities *abilities) 4974 { 4975 struct ice_aq_desc desc; 4976 4977 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 4978 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 4979 } 4980 4981 /** 4982 * ice_aq_set_input_pin_cfg - set input pin config 4983 * @hw: pointer to the HW struct 4984 * @input_idx: Input index 4985 * @flags1: Input flags 4986 * @flags2: Input flags 4987 * @freq: Frequency in Hz 4988 * @phase_delay: Delay in ps 4989 * 4990 * Set CGU input config (0x0C62) 4991 * Return: 0 on success or negative value on failure. 4992 */ 4993 int 4994 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 4995 u32 freq, s32 phase_delay) 4996 { 4997 struct ice_aqc_set_cgu_input_config *cmd; 4998 struct ice_aq_desc desc; 4999 5000 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5001 cmd = &desc.params.set_cgu_input_config; 5002 cmd->input_idx = input_idx; 5003 cmd->flags1 = flags1; 5004 cmd->flags2 = flags2; 5005 cmd->freq = cpu_to_le32(freq); 5006 cmd->phase_delay = cpu_to_le32(phase_delay); 5007 5008 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5009 } 5010 5011 /** 5012 * ice_aq_get_input_pin_cfg - get input pin config 5013 * @hw: pointer to the HW struct 5014 * @input_idx: Input index 5015 * @status: Pin status 5016 * @type: Pin type 5017 * @flags1: Input flags 5018 * @flags2: Input flags 5019 * @freq: Frequency in Hz 5020 * @phase_delay: Delay in ps 5021 * 5022 * Get CGU input config (0x0C63) 5023 * Return: 0 on success or negative value on failure. 5024 */ 5025 int 5026 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5027 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5028 { 5029 struct ice_aqc_get_cgu_input_config *cmd; 5030 struct ice_aq_desc desc; 5031 int ret; 5032 5033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5034 cmd = &desc.params.get_cgu_input_config; 5035 cmd->input_idx = input_idx; 5036 5037 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5038 if (!ret) { 5039 if (status) 5040 *status = cmd->status; 5041 if (type) 5042 *type = cmd->type; 5043 if (flags1) 5044 *flags1 = cmd->flags1; 5045 if (flags2) 5046 *flags2 = cmd->flags2; 5047 if (freq) 5048 *freq = le32_to_cpu(cmd->freq); 5049 if (phase_delay) 5050 *phase_delay = le32_to_cpu(cmd->phase_delay); 5051 } 5052 5053 return ret; 5054 } 5055 5056 /** 5057 * ice_aq_set_output_pin_cfg - set output pin config 5058 * @hw: pointer to the HW struct 5059 * @output_idx: Output index 5060 * @flags: Output flags 5061 * @src_sel: Index of DPLL block 5062 * @freq: Output frequency 5063 * @phase_delay: Output phase compensation 5064 * 5065 * Set CGU output config (0x0C64) 5066 * Return: 0 on success or negative value on failure. 5067 */ 5068 int 5069 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5070 u8 src_sel, u32 freq, s32 phase_delay) 5071 { 5072 struct ice_aqc_set_cgu_output_config *cmd; 5073 struct ice_aq_desc desc; 5074 5075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5076 cmd = &desc.params.set_cgu_output_config; 5077 cmd->output_idx = output_idx; 5078 cmd->flags = flags; 5079 cmd->src_sel = src_sel; 5080 cmd->freq = cpu_to_le32(freq); 5081 cmd->phase_delay = cpu_to_le32(phase_delay); 5082 5083 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5084 } 5085 5086 /** 5087 * ice_aq_get_output_pin_cfg - get output pin config 5088 * @hw: pointer to the HW struct 5089 * @output_idx: Output index 5090 * @flags: Output flags 5091 * @src_sel: Internal DPLL source 5092 * @freq: Output frequency 5093 * @src_freq: Source frequency 5094 * 5095 * Get CGU output config (0x0C65) 5096 * Return: 0 on success or negative value on failure. 5097 */ 5098 int 5099 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5100 u8 *src_sel, u32 *freq, u32 *src_freq) 5101 { 5102 struct ice_aqc_get_cgu_output_config *cmd; 5103 struct ice_aq_desc desc; 5104 int ret; 5105 5106 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5107 cmd = &desc.params.get_cgu_output_config; 5108 cmd->output_idx = output_idx; 5109 5110 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5111 if (!ret) { 5112 if (flags) 5113 *flags = cmd->flags; 5114 if (src_sel) 5115 *src_sel = cmd->src_sel; 5116 if (freq) 5117 *freq = le32_to_cpu(cmd->freq); 5118 if (src_freq) 5119 *src_freq = le32_to_cpu(cmd->src_freq); 5120 } 5121 5122 return ret; 5123 } 5124 5125 /** 5126 * ice_aq_get_cgu_dpll_status - get dpll status 5127 * @hw: pointer to the HW struct 5128 * @dpll_num: DPLL index 5129 * @ref_state: Reference clock state 5130 * @config: current DPLL config 5131 * @dpll_state: current DPLL state 5132 * @phase_offset: Phase offset in ns 5133 * @eec_mode: EEC_mode 5134 * 5135 * Get CGU DPLL status (0x0C66) 5136 * Return: 0 on success or negative value on failure. 5137 */ 5138 int 5139 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5140 u8 *dpll_state, u8 *config, s64 *phase_offset, 5141 u8 *eec_mode) 5142 { 5143 struct ice_aqc_get_cgu_dpll_status *cmd; 5144 struct ice_aq_desc desc; 5145 int status; 5146 5147 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5148 cmd = &desc.params.get_cgu_dpll_status; 5149 cmd->dpll_num = dpll_num; 5150 5151 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5152 if (!status) { 5153 *ref_state = cmd->ref_state; 5154 *dpll_state = cmd->dpll_state; 5155 *config = cmd->config; 5156 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5157 *phase_offset <<= 32; 5158 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5159 *phase_offset = sign_extend64(*phase_offset, 47); 5160 *eec_mode = cmd->eec_mode; 5161 } 5162 5163 return status; 5164 } 5165 5166 /** 5167 * ice_aq_set_cgu_dpll_config - set dpll config 5168 * @hw: pointer to the HW struct 5169 * @dpll_num: DPLL index 5170 * @ref_state: Reference clock state 5171 * @config: DPLL config 5172 * @eec_mode: EEC mode 5173 * 5174 * Set CGU DPLL config (0x0C67) 5175 * Return: 0 on success or negative value on failure. 5176 */ 5177 int 5178 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5179 u8 config, u8 eec_mode) 5180 { 5181 struct ice_aqc_set_cgu_dpll_config *cmd; 5182 struct ice_aq_desc desc; 5183 5184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5185 cmd = &desc.params.set_cgu_dpll_config; 5186 cmd->dpll_num = dpll_num; 5187 cmd->ref_state = ref_state; 5188 cmd->config = config; 5189 cmd->eec_mode = eec_mode; 5190 5191 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5192 } 5193 5194 /** 5195 * ice_aq_set_cgu_ref_prio - set input reference priority 5196 * @hw: pointer to the HW struct 5197 * @dpll_num: DPLL index 5198 * @ref_idx: Reference pin index 5199 * @ref_priority: Reference input priority 5200 * 5201 * Set CGU reference priority (0x0C68) 5202 * Return: 0 on success or negative value on failure. 5203 */ 5204 int 5205 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5206 u8 ref_priority) 5207 { 5208 struct ice_aqc_set_cgu_ref_prio *cmd; 5209 struct ice_aq_desc desc; 5210 5211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5212 cmd = &desc.params.set_cgu_ref_prio; 5213 cmd->dpll_num = dpll_num; 5214 cmd->ref_idx = ref_idx; 5215 cmd->ref_priority = ref_priority; 5216 5217 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5218 } 5219 5220 /** 5221 * ice_aq_get_cgu_ref_prio - get input reference priority 5222 * @hw: pointer to the HW struct 5223 * @dpll_num: DPLL index 5224 * @ref_idx: Reference pin index 5225 * @ref_prio: Reference input priority 5226 * 5227 * Get CGU reference priority (0x0C69) 5228 * Return: 0 on success or negative value on failure. 5229 */ 5230 int 5231 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5232 u8 *ref_prio) 5233 { 5234 struct ice_aqc_get_cgu_ref_prio *cmd; 5235 struct ice_aq_desc desc; 5236 int status; 5237 5238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5239 cmd = &desc.params.get_cgu_ref_prio; 5240 cmd->dpll_num = dpll_num; 5241 cmd->ref_idx = ref_idx; 5242 5243 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5244 if (!status) 5245 *ref_prio = cmd->ref_priority; 5246 5247 return status; 5248 } 5249 5250 /** 5251 * ice_aq_get_cgu_info - get cgu info 5252 * @hw: pointer to the HW struct 5253 * @cgu_id: CGU ID 5254 * @cgu_cfg_ver: CGU config version 5255 * @cgu_fw_ver: CGU firmware version 5256 * 5257 * Get CGU info (0x0C6A) 5258 * Return: 0 on success or negative value on failure. 5259 */ 5260 int 5261 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5262 u32 *cgu_fw_ver) 5263 { 5264 struct ice_aqc_get_cgu_info *cmd; 5265 struct ice_aq_desc desc; 5266 int status; 5267 5268 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5269 cmd = &desc.params.get_cgu_info; 5270 5271 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5272 if (!status) { 5273 *cgu_id = le32_to_cpu(cmd->cgu_id); 5274 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5275 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5276 } 5277 5278 return status; 5279 } 5280 5281 /** 5282 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5283 * @hw: pointer to the HW struct 5284 * @phy_output: PHY reference clock output pin 5285 * @enable: GPIO state to be applied 5286 * @freq: PHY output frequency 5287 * 5288 * Set phy recovered clock as reference (0x0630) 5289 * Return: 0 on success or negative value on failure. 5290 */ 5291 int 5292 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5293 u32 *freq) 5294 { 5295 struct ice_aqc_set_phy_rec_clk_out *cmd; 5296 struct ice_aq_desc desc; 5297 int status; 5298 5299 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5300 cmd = &desc.params.set_phy_rec_clk_out; 5301 cmd->phy_output = phy_output; 5302 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5303 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5304 cmd->freq = cpu_to_le32(*freq); 5305 5306 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5307 if (!status) 5308 *freq = le32_to_cpu(cmd->freq); 5309 5310 return status; 5311 } 5312 5313 /** 5314 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5315 * @hw: pointer to the HW struct 5316 * @phy_output: PHY reference clock output pin 5317 * @port_num: Port number 5318 * @flags: PHY flags 5319 * @node_handle: PHY output frequency 5320 * 5321 * Get PHY recovered clock output info (0x0631) 5322 * Return: 0 on success or negative value on failure. 5323 */ 5324 int 5325 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5326 u8 *flags, u16 *node_handle) 5327 { 5328 struct ice_aqc_get_phy_rec_clk_out *cmd; 5329 struct ice_aq_desc desc; 5330 int status; 5331 5332 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5333 cmd = &desc.params.get_phy_rec_clk_out; 5334 cmd->phy_output = *phy_output; 5335 5336 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5337 if (!status) { 5338 *phy_output = cmd->phy_output; 5339 if (port_num) 5340 *port_num = cmd->port_num; 5341 if (flags) 5342 *flags = cmd->flags; 5343 if (node_handle) 5344 *node_handle = le16_to_cpu(cmd->node_handle); 5345 } 5346 5347 return status; 5348 } 5349 5350 /** 5351 * ice_aq_get_sensor_reading 5352 * @hw: pointer to the HW struct 5353 * @data: pointer to data to be read from the sensor 5354 * 5355 * Get sensor reading (0x0632) 5356 */ 5357 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5358 struct ice_aqc_get_sensor_reading_resp *data) 5359 { 5360 struct ice_aqc_get_sensor_reading *cmd; 5361 struct ice_aq_desc desc; 5362 int status; 5363 5364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5365 cmd = &desc.params.get_sensor_reading; 5366 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5367 #define ICE_INTERNAL_TEMP_SENSOR 0 5368 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5369 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5370 5371 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5372 if (!status) 5373 memcpy(data, &desc.params.get_sensor_reading_resp, 5374 sizeof(*data)); 5375 5376 return status; 5377 } 5378 5379 /** 5380 * ice_replay_pre_init - replay pre initialization 5381 * @hw: pointer to the HW struct 5382 * 5383 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5384 */ 5385 static int ice_replay_pre_init(struct ice_hw *hw) 5386 { 5387 struct ice_switch_info *sw = hw->switch_info; 5388 u8 i; 5389 5390 /* Delete old entries from replay filter list head if there is any */ 5391 ice_rm_all_sw_replay_rule_info(hw); 5392 /* In start of replay, move entries into replay_rules list, it 5393 * will allow adding rules entries back to filt_rules list, 5394 * which is operational list. 5395 */ 5396 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5397 list_replace_init(&sw->recp_list[i].filt_rules, 5398 &sw->recp_list[i].filt_replay_rules); 5399 ice_sched_replay_agg_vsi_preinit(hw); 5400 5401 return 0; 5402 } 5403 5404 /** 5405 * ice_replay_vsi - replay VSI configuration 5406 * @hw: pointer to the HW struct 5407 * @vsi_handle: driver VSI handle 5408 * 5409 * Restore all VSI configuration after reset. It is required to call this 5410 * function with main VSI first. 5411 */ 5412 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5413 { 5414 int status; 5415 5416 if (!ice_is_vsi_valid(hw, vsi_handle)) 5417 return -EINVAL; 5418 5419 /* Replay pre-initialization if there is any */ 5420 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5421 status = ice_replay_pre_init(hw); 5422 if (status) 5423 return status; 5424 } 5425 /* Replay per VSI all RSS configurations */ 5426 status = ice_replay_rss_cfg(hw, vsi_handle); 5427 if (status) 5428 return status; 5429 /* Replay per VSI all filters */ 5430 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5431 if (!status) 5432 status = ice_replay_vsi_agg(hw, vsi_handle); 5433 return status; 5434 } 5435 5436 /** 5437 * ice_replay_post - post replay configuration cleanup 5438 * @hw: pointer to the HW struct 5439 * 5440 * Post replay cleanup. 5441 */ 5442 void ice_replay_post(struct ice_hw *hw) 5443 { 5444 /* Delete old entries from replay filter list head */ 5445 ice_rm_all_sw_replay_rule_info(hw); 5446 ice_sched_replay_agg(hw); 5447 } 5448 5449 /** 5450 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5451 * @hw: ptr to the hardware info 5452 * @reg: offset of 64 bit HW register to read from 5453 * @prev_stat_loaded: bool to specify if previous stats are loaded 5454 * @prev_stat: ptr to previous loaded stat value 5455 * @cur_stat: ptr to current stat value 5456 */ 5457 void 5458 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5459 u64 *prev_stat, u64 *cur_stat) 5460 { 5461 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5462 5463 /* device stats are not reset at PFR, they likely will not be zeroed 5464 * when the driver starts. Thus, save the value from the first read 5465 * without adding to the statistic value so that we report stats which 5466 * count up from zero. 5467 */ 5468 if (!prev_stat_loaded) { 5469 *prev_stat = new_data; 5470 return; 5471 } 5472 5473 /* Calculate the difference between the new and old values, and then 5474 * add it to the software stat value. 5475 */ 5476 if (new_data >= *prev_stat) 5477 *cur_stat += new_data - *prev_stat; 5478 else 5479 /* to manage the potential roll-over */ 5480 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5481 5482 /* Update the previously stored value to prepare for next read */ 5483 *prev_stat = new_data; 5484 } 5485 5486 /** 5487 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5488 * @hw: ptr to the hardware info 5489 * @reg: offset of HW register to read from 5490 * @prev_stat_loaded: bool to specify if previous stats are loaded 5491 * @prev_stat: ptr to previous loaded stat value 5492 * @cur_stat: ptr to current stat value 5493 */ 5494 void 5495 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5496 u64 *prev_stat, u64 *cur_stat) 5497 { 5498 u32 new_data; 5499 5500 new_data = rd32(hw, reg); 5501 5502 /* device stats are not reset at PFR, they likely will not be zeroed 5503 * when the driver starts. Thus, save the value from the first read 5504 * without adding to the statistic value so that we report stats which 5505 * count up from zero. 5506 */ 5507 if (!prev_stat_loaded) { 5508 *prev_stat = new_data; 5509 return; 5510 } 5511 5512 /* Calculate the difference between the new and old values, and then 5513 * add it to the software stat value. 5514 */ 5515 if (new_data >= *prev_stat) 5516 *cur_stat += new_data - *prev_stat; 5517 else 5518 /* to manage the potential roll-over */ 5519 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5520 5521 /* Update the previously stored value to prepare for next read */ 5522 *prev_stat = new_data; 5523 } 5524 5525 /** 5526 * ice_sched_query_elem - query element information from HW 5527 * @hw: pointer to the HW struct 5528 * @node_teid: node TEID to be queried 5529 * @buf: buffer to element information 5530 * 5531 * This function queries HW element information 5532 */ 5533 int 5534 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5535 struct ice_aqc_txsched_elem_data *buf) 5536 { 5537 u16 buf_size, num_elem_ret = 0; 5538 int status; 5539 5540 buf_size = sizeof(*buf); 5541 memset(buf, 0, buf_size); 5542 buf->node_teid = cpu_to_le32(node_teid); 5543 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5544 NULL); 5545 if (status || num_elem_ret != 1) 5546 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5547 return status; 5548 } 5549 5550 /** 5551 * ice_aq_read_i2c 5552 * @hw: pointer to the hw struct 5553 * @topo_addr: topology address for a device to communicate with 5554 * @bus_addr: 7-bit I2C bus address 5555 * @addr: I2C memory address (I2C offset) with up to 16 bits 5556 * @params: I2C parameters: bit [7] - Repeated start, 5557 * bits [6:5] data offset size, 5558 * bit [4] - I2C address type, 5559 * bits [3:0] - data size to read (0-16 bytes) 5560 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5561 * @cd: pointer to command details structure or NULL 5562 * 5563 * Read I2C (0x06E2) 5564 */ 5565 int 5566 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5567 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5568 struct ice_sq_cd *cd) 5569 { 5570 struct ice_aq_desc desc = { 0 }; 5571 struct ice_aqc_i2c *cmd; 5572 u8 data_size; 5573 int status; 5574 5575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5576 cmd = &desc.params.read_write_i2c; 5577 5578 if (!data) 5579 return -EINVAL; 5580 5581 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5582 5583 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5584 cmd->topo_addr = topo_addr; 5585 cmd->i2c_params = params; 5586 cmd->i2c_addr = addr; 5587 5588 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5589 if (!status) { 5590 struct ice_aqc_read_i2c_resp *resp; 5591 u8 i; 5592 5593 resp = &desc.params.read_i2c_resp; 5594 for (i = 0; i < data_size; i++) { 5595 *data = resp->i2c_data[i]; 5596 data++; 5597 } 5598 } 5599 5600 return status; 5601 } 5602 5603 /** 5604 * ice_aq_write_i2c 5605 * @hw: pointer to the hw struct 5606 * @topo_addr: topology address for a device to communicate with 5607 * @bus_addr: 7-bit I2C bus address 5608 * @addr: I2C memory address (I2C offset) with up to 16 bits 5609 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5610 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5611 * @cd: pointer to command details structure or NULL 5612 * 5613 * Write I2C (0x06E3) 5614 * 5615 * * Return: 5616 * * 0 - Successful write to the i2c device 5617 * * -EINVAL - Data size greater than 4 bytes 5618 * * -EIO - FW error 5619 */ 5620 int 5621 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5622 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5623 struct ice_sq_cd *cd) 5624 { 5625 struct ice_aq_desc desc = { 0 }; 5626 struct ice_aqc_i2c *cmd; 5627 u8 data_size; 5628 5629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5630 cmd = &desc.params.read_write_i2c; 5631 5632 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5633 5634 /* data_size limited to 4 */ 5635 if (data_size > 4) 5636 return -EINVAL; 5637 5638 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5639 cmd->topo_addr = topo_addr; 5640 cmd->i2c_params = params; 5641 cmd->i2c_addr = addr; 5642 5643 memcpy(cmd->i2c_data, data, data_size); 5644 5645 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5646 } 5647 5648 /** 5649 * ice_aq_set_gpio 5650 * @hw: pointer to the hw struct 5651 * @gpio_ctrl_handle: GPIO controller node handle 5652 * @pin_idx: IO Number of the GPIO that needs to be set 5653 * @value: SW provide IO value to set in the LSB 5654 * @cd: pointer to command details structure or NULL 5655 * 5656 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5657 */ 5658 int 5659 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5660 struct ice_sq_cd *cd) 5661 { 5662 struct ice_aqc_gpio *cmd; 5663 struct ice_aq_desc desc; 5664 5665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5666 cmd = &desc.params.read_write_gpio; 5667 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5668 cmd->gpio_num = pin_idx; 5669 cmd->gpio_val = value ? 1 : 0; 5670 5671 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5672 } 5673 5674 /** 5675 * ice_aq_get_gpio 5676 * @hw: pointer to the hw struct 5677 * @gpio_ctrl_handle: GPIO controller node handle 5678 * @pin_idx: IO Number of the GPIO that needs to be set 5679 * @value: IO value read 5680 * @cd: pointer to command details structure or NULL 5681 * 5682 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5683 * the topology 5684 */ 5685 int 5686 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5687 bool *value, struct ice_sq_cd *cd) 5688 { 5689 struct ice_aqc_gpio *cmd; 5690 struct ice_aq_desc desc; 5691 int status; 5692 5693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5694 cmd = &desc.params.read_write_gpio; 5695 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5696 cmd->gpio_num = pin_idx; 5697 5698 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5699 if (status) 5700 return status; 5701 5702 *value = !!cmd->gpio_val; 5703 return 0; 5704 } 5705 5706 /** 5707 * ice_is_fw_api_min_ver 5708 * @hw: pointer to the hardware structure 5709 * @maj: major version 5710 * @min: minor version 5711 * @patch: patch version 5712 * 5713 * Checks if the firmware API is minimum version 5714 */ 5715 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5716 { 5717 if (hw->api_maj_ver == maj) { 5718 if (hw->api_min_ver > min) 5719 return true; 5720 if (hw->api_min_ver == min && hw->api_patch >= patch) 5721 return true; 5722 } else if (hw->api_maj_ver > maj) { 5723 return true; 5724 } 5725 5726 return false; 5727 } 5728 5729 /** 5730 * ice_fw_supports_link_override 5731 * @hw: pointer to the hardware structure 5732 * 5733 * Checks if the firmware supports link override 5734 */ 5735 bool ice_fw_supports_link_override(struct ice_hw *hw) 5736 { 5737 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5738 ICE_FW_API_LINK_OVERRIDE_MIN, 5739 ICE_FW_API_LINK_OVERRIDE_PATCH); 5740 } 5741 5742 /** 5743 * ice_get_link_default_override 5744 * @ldo: pointer to the link default override struct 5745 * @pi: pointer to the port info struct 5746 * 5747 * Gets the link default override for a port 5748 */ 5749 int 5750 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5751 struct ice_port_info *pi) 5752 { 5753 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5754 struct ice_hw *hw = pi->hw; 5755 int status; 5756 5757 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5758 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5759 if (status) { 5760 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5761 return status; 5762 } 5763 5764 /* Each port has its own config; calculate for our port */ 5765 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5766 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5767 5768 /* link options first */ 5769 status = ice_read_sr_word(hw, tlv_start, &buf); 5770 if (status) { 5771 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5772 return status; 5773 } 5774 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); 5775 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5776 ICE_LINK_OVERRIDE_PHY_CFG_S; 5777 5778 /* link PHY config */ 5779 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5780 status = ice_read_sr_word(hw, offset, &buf); 5781 if (status) { 5782 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5783 return status; 5784 } 5785 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5786 5787 /* PHY types low */ 5788 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5789 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5790 status = ice_read_sr_word(hw, (offset + i), &buf); 5791 if (status) { 5792 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5793 return status; 5794 } 5795 /* shift 16 bits at a time to fill 64 bits */ 5796 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5797 } 5798 5799 /* PHY types high */ 5800 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5801 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5802 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5803 status = ice_read_sr_word(hw, (offset + i), &buf); 5804 if (status) { 5805 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5806 return status; 5807 } 5808 /* shift 16 bits at a time to fill 64 bits */ 5809 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5810 } 5811 5812 return status; 5813 } 5814 5815 /** 5816 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5817 * @caps: get PHY capability data 5818 */ 5819 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5820 { 5821 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5822 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5823 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5824 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5825 return true; 5826 5827 return false; 5828 } 5829 5830 /** 5831 * ice_aq_set_lldp_mib - Set the LLDP MIB 5832 * @hw: pointer to the HW struct 5833 * @mib_type: Local, Remote or both Local and Remote MIBs 5834 * @buf: pointer to the caller-supplied buffer to store the MIB block 5835 * @buf_size: size of the buffer (in bytes) 5836 * @cd: pointer to command details structure or NULL 5837 * 5838 * Set the LLDP MIB. (0x0A08) 5839 */ 5840 int 5841 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5842 struct ice_sq_cd *cd) 5843 { 5844 struct ice_aqc_lldp_set_local_mib *cmd; 5845 struct ice_aq_desc desc; 5846 5847 cmd = &desc.params.lldp_set_mib; 5848 5849 if (buf_size == 0 || !buf) 5850 return -EINVAL; 5851 5852 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5853 5854 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5855 desc.datalen = cpu_to_le16(buf_size); 5856 5857 cmd->type = mib_type; 5858 cmd->length = cpu_to_le16(buf_size); 5859 5860 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5861 } 5862 5863 /** 5864 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5865 * @hw: pointer to HW struct 5866 */ 5867 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5868 { 5869 if (hw->mac_type != ICE_MAC_E810) 5870 return false; 5871 5872 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5873 ICE_FW_API_LLDP_FLTR_MIN, 5874 ICE_FW_API_LLDP_FLTR_PATCH); 5875 } 5876 5877 /** 5878 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5879 * @hw: pointer to HW struct 5880 * @vsi_num: absolute HW index for VSI 5881 * @add: boolean for if adding or removing a filter 5882 */ 5883 int 5884 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5885 { 5886 struct ice_aqc_lldp_filter_ctrl *cmd; 5887 struct ice_aq_desc desc; 5888 5889 cmd = &desc.params.lldp_filter_ctrl; 5890 5891 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5892 5893 if (add) 5894 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5895 else 5896 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5897 5898 cmd->vsi_num = cpu_to_le16(vsi_num); 5899 5900 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5901 } 5902 5903 /** 5904 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5905 * @hw: pointer to HW struct 5906 */ 5907 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5908 { 5909 struct ice_aq_desc desc; 5910 5911 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5912 5913 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5914 } 5915 5916 /** 5917 * ice_fw_supports_report_dflt_cfg 5918 * @hw: pointer to the hardware structure 5919 * 5920 * Checks if the firmware supports report default configuration 5921 */ 5922 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5923 { 5924 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5925 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5926 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5927 } 5928 5929 /* each of the indexes into the following array match the speed of a return 5930 * value from the list of AQ returned speeds like the range: 5931 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5932 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5933 * array. The array is defined as 15 elements long because the link_speed 5934 * returned by the firmware is a 16 bit * value, but is indexed 5935 * by [fls(speed) - 1] 5936 */ 5937 static const u32 ice_aq_to_link_speed[] = { 5938 SPEED_10, /* BIT(0) */ 5939 SPEED_100, 5940 SPEED_1000, 5941 SPEED_2500, 5942 SPEED_5000, 5943 SPEED_10000, 5944 SPEED_20000, 5945 SPEED_25000, 5946 SPEED_40000, 5947 SPEED_50000, 5948 SPEED_100000, /* BIT(10) */ 5949 SPEED_200000, 5950 }; 5951 5952 /** 5953 * ice_get_link_speed - get integer speed from table 5954 * @index: array index from fls(aq speed) - 1 5955 * 5956 * Returns: u32 value containing integer speed 5957 */ 5958 u32 ice_get_link_speed(u16 index) 5959 { 5960 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5961 return 0; 5962 5963 return ice_aq_to_link_speed[index]; 5964 } 5965