1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 default: 158 hw->mac_type = ICE_MAC_UNKNOWN; 159 break; 160 } 161 162 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 163 return 0; 164 } 165 166 /** 167 * ice_is_e810 168 * @hw: pointer to the hardware structure 169 * 170 * returns true if the device is E810 based, false if not. 171 */ 172 bool ice_is_e810(struct ice_hw *hw) 173 { 174 return hw->mac_type == ICE_MAC_E810; 175 } 176 177 /** 178 * ice_is_e810t 179 * @hw: pointer to the hardware structure 180 * 181 * returns true if the device is E810T based, false if not. 182 */ 183 bool ice_is_e810t(struct ice_hw *hw) 184 { 185 switch (hw->device_id) { 186 case ICE_DEV_ID_E810C_SFP: 187 switch (hw->subsystem_device_id) { 188 case ICE_SUBDEV_ID_E810T: 189 case ICE_SUBDEV_ID_E810T2: 190 case ICE_SUBDEV_ID_E810T3: 191 case ICE_SUBDEV_ID_E810T4: 192 case ICE_SUBDEV_ID_E810T6: 193 case ICE_SUBDEV_ID_E810T7: 194 return true; 195 } 196 break; 197 case ICE_DEV_ID_E810C_QSFP: 198 switch (hw->subsystem_device_id) { 199 case ICE_SUBDEV_ID_E810T2: 200 case ICE_SUBDEV_ID_E810T3: 201 case ICE_SUBDEV_ID_E810T5: 202 return true; 203 } 204 break; 205 default: 206 break; 207 } 208 209 return false; 210 } 211 212 /** 213 * ice_is_e823 214 * @hw: pointer to the hardware structure 215 * 216 * returns true if the device is E823-L or E823-C based, false if not. 217 */ 218 bool ice_is_e823(struct ice_hw *hw) 219 { 220 switch (hw->device_id) { 221 case ICE_DEV_ID_E823L_BACKPLANE: 222 case ICE_DEV_ID_E823L_SFP: 223 case ICE_DEV_ID_E823L_10G_BASE_T: 224 case ICE_DEV_ID_E823L_1GBE: 225 case ICE_DEV_ID_E823L_QSFP: 226 case ICE_DEV_ID_E823C_BACKPLANE: 227 case ICE_DEV_ID_E823C_QSFP: 228 case ICE_DEV_ID_E823C_SFP: 229 case ICE_DEV_ID_E823C_10G_BASE_T: 230 case ICE_DEV_ID_E823C_SGMII: 231 return true; 232 default: 233 return false; 234 } 235 } 236 237 /** 238 * ice_clear_pf_cfg - Clear PF configuration 239 * @hw: pointer to the hardware structure 240 * 241 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 242 * configuration, flow director filters, etc.). 243 */ 244 int ice_clear_pf_cfg(struct ice_hw *hw) 245 { 246 struct ice_aq_desc desc; 247 248 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 249 250 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 251 } 252 253 /** 254 * ice_aq_manage_mac_read - manage MAC address read command 255 * @hw: pointer to the HW struct 256 * @buf: a virtual buffer to hold the manage MAC read response 257 * @buf_size: Size of the virtual buffer 258 * @cd: pointer to command details structure or NULL 259 * 260 * This function is used to return per PF station MAC address (0x0107). 261 * NOTE: Upon successful completion of this command, MAC address information 262 * is returned in user specified buffer. Please interpret user specified 263 * buffer as "manage_mac_read" response. 264 * Response such as various MAC addresses are stored in HW struct (port.mac) 265 * ice_discover_dev_caps is expected to be called before this function is 266 * called. 267 */ 268 static int 269 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 270 struct ice_sq_cd *cd) 271 { 272 struct ice_aqc_manage_mac_read_resp *resp; 273 struct ice_aqc_manage_mac_read *cmd; 274 struct ice_aq_desc desc; 275 int status; 276 u16 flags; 277 u8 i; 278 279 cmd = &desc.params.mac_read; 280 281 if (buf_size < sizeof(*resp)) 282 return -EINVAL; 283 284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 285 286 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 287 if (status) 288 return status; 289 290 resp = buf; 291 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 292 293 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 294 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 295 return -EIO; 296 } 297 298 /* A single port can report up to two (LAN and WoL) addresses */ 299 for (i = 0; i < cmd->num_addr; i++) 300 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 301 ether_addr_copy(hw->port_info->mac.lan_addr, 302 resp[i].mac_addr); 303 ether_addr_copy(hw->port_info->mac.perm_addr, 304 resp[i].mac_addr); 305 break; 306 } 307 308 return 0; 309 } 310 311 /** 312 * ice_aq_get_phy_caps - returns PHY capabilities 313 * @pi: port information structure 314 * @qual_mods: report qualified modules 315 * @report_mode: report mode capabilities 316 * @pcaps: structure for PHY capabilities to be filled 317 * @cd: pointer to command details structure or NULL 318 * 319 * Returns the various PHY capabilities supported on the Port (0x0600) 320 */ 321 int 322 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 323 struct ice_aqc_get_phy_caps_data *pcaps, 324 struct ice_sq_cd *cd) 325 { 326 struct ice_aqc_get_phy_caps *cmd; 327 u16 pcaps_size = sizeof(*pcaps); 328 struct ice_aq_desc desc; 329 const char *prefix; 330 struct ice_hw *hw; 331 int status; 332 333 cmd = &desc.params.get_phy; 334 335 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 336 return -EINVAL; 337 hw = pi->hw; 338 339 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 340 !ice_fw_supports_report_dflt_cfg(hw)) 341 return -EINVAL; 342 343 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 344 345 if (qual_mods) 346 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 347 348 cmd->param0 |= cpu_to_le16(report_mode); 349 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 350 351 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 352 353 switch (report_mode) { 354 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 355 prefix = "phy_caps_media"; 356 break; 357 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 358 prefix = "phy_caps_no_media"; 359 break; 360 case ICE_AQC_REPORT_ACTIVE_CFG: 361 prefix = "phy_caps_active"; 362 break; 363 case ICE_AQC_REPORT_DFLT_CFG: 364 prefix = "phy_caps_default"; 365 break; 366 default: 367 prefix = "phy_caps_invalid"; 368 } 369 370 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 371 le64_to_cpu(pcaps->phy_type_high), prefix); 372 373 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 374 prefix, report_mode); 375 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 376 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 377 pcaps->low_power_ctrl_an); 378 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 379 pcaps->eee_cap); 380 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 381 pcaps->eeer_value); 382 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 383 pcaps->link_fec_options); 384 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 385 prefix, pcaps->module_compliance_enforcement); 386 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 387 prefix, pcaps->extended_compliance_code); 388 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 389 pcaps->module_type[0]); 390 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 391 pcaps->module_type[1]); 392 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 393 pcaps->module_type[2]); 394 395 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 396 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 397 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 398 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 399 sizeof(pi->phy.link_info.module_type)); 400 } 401 402 return status; 403 } 404 405 /** 406 * ice_aq_get_link_topo_handle - get link topology node return status 407 * @pi: port information structure 408 * @node_type: requested node type 409 * @cd: pointer to command details structure or NULL 410 * 411 * Get link topology node return status for specified node type (0x06E0) 412 * 413 * Node type cage can be used to determine if cage is present. If AQC 414 * returns error (ENOENT), then no cage present. If no cage present, then 415 * connection type is backplane or BASE-T. 416 */ 417 static int 418 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 419 struct ice_sq_cd *cd) 420 { 421 struct ice_aqc_get_link_topo *cmd; 422 struct ice_aq_desc desc; 423 424 cmd = &desc.params.get_link_topo; 425 426 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 427 428 cmd->addr.topo_params.node_type_ctx = 429 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 430 ICE_AQC_LINK_TOPO_NODE_CTX_S); 431 432 /* set node type */ 433 cmd->addr.topo_params.node_type_ctx |= 434 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 435 436 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 437 } 438 439 /** 440 * ice_aq_get_netlist_node 441 * @hw: pointer to the hw struct 442 * @cmd: get_link_topo AQ structure 443 * @node_part_number: output node part number if node found 444 * @node_handle: output node handle parameter if node found 445 * 446 * Get netlist node handle. 447 */ 448 int 449 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 450 u8 *node_part_number, u16 *node_handle) 451 { 452 struct ice_aq_desc desc; 453 454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 455 desc.params.get_link_topo = *cmd; 456 457 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 458 return -EINTR; 459 460 if (node_handle) 461 *node_handle = 462 le16_to_cpu(desc.params.get_link_topo.addr.handle); 463 if (node_part_number) 464 *node_part_number = desc.params.get_link_topo.node_part_num; 465 466 return 0; 467 } 468 469 /** 470 * ice_find_netlist_node 471 * @hw: pointer to the hw struct 472 * @node_type_ctx: type of netlist node to look for 473 * @node_part_number: node part number to look for 474 * @node_handle: output parameter if node found - optional 475 * 476 * Find and return the node handle for a given node type and part number in the 477 * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST 478 * otherwise. If node_handle provided, it would be set to found node handle. 479 */ 480 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 481 u8 node_part_number, u16 *node_handle) 482 { 483 struct ice_aqc_get_link_topo cmd; 484 u8 rec_node_part_number; 485 u16 rec_node_handle; 486 u8 idx; 487 488 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 489 int status; 490 491 memset(&cmd, 0, sizeof(cmd)); 492 493 cmd.addr.topo_params.node_type_ctx = 494 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S); 495 cmd.addr.topo_params.index = idx; 496 497 status = ice_aq_get_netlist_node(hw, &cmd, 498 &rec_node_part_number, 499 &rec_node_handle); 500 if (status) 501 return status; 502 503 if (rec_node_part_number == node_part_number) { 504 if (node_handle) 505 *node_handle = rec_node_handle; 506 return 0; 507 } 508 } 509 510 return -ENOTBLK; 511 } 512 513 /** 514 * ice_is_media_cage_present 515 * @pi: port information structure 516 * 517 * Returns true if media cage is present, else false. If no cage, then 518 * media type is backplane or BASE-T. 519 */ 520 static bool ice_is_media_cage_present(struct ice_port_info *pi) 521 { 522 /* Node type cage can be used to determine if cage is present. If AQC 523 * returns error (ENOENT), then no cage present. If no cage present then 524 * connection type is backplane or BASE-T. 525 */ 526 return !ice_aq_get_link_topo_handle(pi, 527 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 528 NULL); 529 } 530 531 /** 532 * ice_get_media_type - Gets media type 533 * @pi: port information structure 534 */ 535 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 536 { 537 struct ice_link_status *hw_link_info; 538 539 if (!pi) 540 return ICE_MEDIA_UNKNOWN; 541 542 hw_link_info = &pi->phy.link_info; 543 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 544 /* If more than one media type is selected, report unknown */ 545 return ICE_MEDIA_UNKNOWN; 546 547 if (hw_link_info->phy_type_low) { 548 /* 1G SGMII is a special case where some DA cable PHYs 549 * may show this as an option when it really shouldn't 550 * be since SGMII is meant to be between a MAC and a PHY 551 * in a backplane. Try to detect this case and handle it 552 */ 553 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 554 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 555 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 556 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 557 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 558 return ICE_MEDIA_DA; 559 560 switch (hw_link_info->phy_type_low) { 561 case ICE_PHY_TYPE_LOW_1000BASE_SX: 562 case ICE_PHY_TYPE_LOW_1000BASE_LX: 563 case ICE_PHY_TYPE_LOW_10GBASE_SR: 564 case ICE_PHY_TYPE_LOW_10GBASE_LR: 565 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 566 case ICE_PHY_TYPE_LOW_25GBASE_SR: 567 case ICE_PHY_TYPE_LOW_25GBASE_LR: 568 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 569 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 570 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 571 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 572 case ICE_PHY_TYPE_LOW_50GBASE_SR: 573 case ICE_PHY_TYPE_LOW_50GBASE_FR: 574 case ICE_PHY_TYPE_LOW_50GBASE_LR: 575 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 576 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 577 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 578 case ICE_PHY_TYPE_LOW_100GBASE_DR: 579 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 580 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 581 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 582 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 583 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 584 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 585 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 586 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 587 return ICE_MEDIA_FIBER; 588 case ICE_PHY_TYPE_LOW_100BASE_TX: 589 case ICE_PHY_TYPE_LOW_1000BASE_T: 590 case ICE_PHY_TYPE_LOW_2500BASE_T: 591 case ICE_PHY_TYPE_LOW_5GBASE_T: 592 case ICE_PHY_TYPE_LOW_10GBASE_T: 593 case ICE_PHY_TYPE_LOW_25GBASE_T: 594 return ICE_MEDIA_BASET; 595 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 596 case ICE_PHY_TYPE_LOW_25GBASE_CR: 597 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 598 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 599 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 600 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 601 case ICE_PHY_TYPE_LOW_50GBASE_CP: 602 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 603 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 604 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 605 return ICE_MEDIA_DA; 606 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 607 case ICE_PHY_TYPE_LOW_40G_XLAUI: 608 case ICE_PHY_TYPE_LOW_50G_LAUI2: 609 case ICE_PHY_TYPE_LOW_50G_AUI2: 610 case ICE_PHY_TYPE_LOW_50G_AUI1: 611 case ICE_PHY_TYPE_LOW_100G_AUI4: 612 case ICE_PHY_TYPE_LOW_100G_CAUI4: 613 if (ice_is_media_cage_present(pi)) 614 return ICE_MEDIA_DA; 615 fallthrough; 616 case ICE_PHY_TYPE_LOW_1000BASE_KX: 617 case ICE_PHY_TYPE_LOW_2500BASE_KX: 618 case ICE_PHY_TYPE_LOW_2500BASE_X: 619 case ICE_PHY_TYPE_LOW_5GBASE_KR: 620 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 621 case ICE_PHY_TYPE_LOW_25GBASE_KR: 622 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 623 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 624 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 625 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 626 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 627 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 628 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 629 return ICE_MEDIA_BACKPLANE; 630 } 631 } else { 632 switch (hw_link_info->phy_type_high) { 633 case ICE_PHY_TYPE_HIGH_100G_AUI2: 634 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 635 if (ice_is_media_cage_present(pi)) 636 return ICE_MEDIA_DA; 637 fallthrough; 638 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 639 return ICE_MEDIA_BACKPLANE; 640 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 641 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 642 return ICE_MEDIA_FIBER; 643 } 644 } 645 return ICE_MEDIA_UNKNOWN; 646 } 647 648 /** 649 * ice_aq_get_link_info 650 * @pi: port information structure 651 * @ena_lse: enable/disable LinkStatusEvent reporting 652 * @link: pointer to link status structure - optional 653 * @cd: pointer to command details structure or NULL 654 * 655 * Get Link Status (0x607). Returns the link status of the adapter. 656 */ 657 int 658 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 659 struct ice_link_status *link, struct ice_sq_cd *cd) 660 { 661 struct ice_aqc_get_link_status_data link_data = { 0 }; 662 struct ice_aqc_get_link_status *resp; 663 struct ice_link_status *li_old, *li; 664 enum ice_media_type *hw_media_type; 665 struct ice_fc_info *hw_fc_info; 666 bool tx_pause, rx_pause; 667 struct ice_aq_desc desc; 668 struct ice_hw *hw; 669 u16 cmd_flags; 670 int status; 671 672 if (!pi) 673 return -EINVAL; 674 hw = pi->hw; 675 li_old = &pi->phy.link_info_old; 676 hw_media_type = &pi->phy.media_type; 677 li = &pi->phy.link_info; 678 hw_fc_info = &pi->fc; 679 680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 681 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 682 resp = &desc.params.get_link_status; 683 resp->cmd_flags = cpu_to_le16(cmd_flags); 684 resp->lport_num = pi->lport; 685 686 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 687 688 if (status) 689 return status; 690 691 /* save off old link status information */ 692 *li_old = *li; 693 694 /* update current link status information */ 695 li->link_speed = le16_to_cpu(link_data.link_speed); 696 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 697 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 698 *hw_media_type = ice_get_media_type(pi); 699 li->link_info = link_data.link_info; 700 li->link_cfg_err = link_data.link_cfg_err; 701 li->an_info = link_data.an_info; 702 li->ext_info = link_data.ext_info; 703 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 704 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 705 li->topo_media_conflict = link_data.topo_media_conflict; 706 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 707 ICE_AQ_CFG_PACING_TYPE_M); 708 709 /* update fc info */ 710 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 711 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 712 if (tx_pause && rx_pause) 713 hw_fc_info->current_mode = ICE_FC_FULL; 714 else if (tx_pause) 715 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 716 else if (rx_pause) 717 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 718 else 719 hw_fc_info->current_mode = ICE_FC_NONE; 720 721 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 722 723 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 724 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 725 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 726 (unsigned long long)li->phy_type_low); 727 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 728 (unsigned long long)li->phy_type_high); 729 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 730 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 731 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 732 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 733 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 734 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 735 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 736 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 737 li->max_frame_size); 738 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 739 740 /* save link status information */ 741 if (link) 742 *link = *li; 743 744 /* flag cleared so calling functions don't call AQ again */ 745 pi->phy.get_link_info = false; 746 747 return 0; 748 } 749 750 /** 751 * ice_fill_tx_timer_and_fc_thresh 752 * @hw: pointer to the HW struct 753 * @cmd: pointer to MAC cfg structure 754 * 755 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 756 * descriptor 757 */ 758 static void 759 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 760 struct ice_aqc_set_mac_cfg *cmd) 761 { 762 u16 fc_thres_val, tx_timer_val; 763 u32 val; 764 765 /* We read back the transmit timer and FC threshold value of 766 * LFC. Thus, we will use index = 767 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 768 * 769 * Also, because we are operating on transmit timer and FC 770 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 771 */ 772 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 773 774 /* Retrieve the transmit timer */ 775 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 776 tx_timer_val = val & 777 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 778 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 779 780 /* Retrieve the FC threshold */ 781 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 782 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 783 784 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 785 } 786 787 /** 788 * ice_aq_set_mac_cfg 789 * @hw: pointer to the HW struct 790 * @max_frame_size: Maximum Frame Size to be supported 791 * @cd: pointer to command details structure or NULL 792 * 793 * Set MAC configuration (0x0603) 794 */ 795 int 796 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 797 { 798 struct ice_aqc_set_mac_cfg *cmd; 799 struct ice_aq_desc desc; 800 801 cmd = &desc.params.set_mac_cfg; 802 803 if (max_frame_size == 0) 804 return -EINVAL; 805 806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 807 808 cmd->max_frame_size = cpu_to_le16(max_frame_size); 809 810 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 811 812 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 813 } 814 815 /** 816 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 817 * @hw: pointer to the HW struct 818 */ 819 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 820 { 821 struct ice_switch_info *sw; 822 int status; 823 824 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 825 sizeof(*hw->switch_info), GFP_KERNEL); 826 sw = hw->switch_info; 827 828 if (!sw) 829 return -ENOMEM; 830 831 INIT_LIST_HEAD(&sw->vsi_list_map_head); 832 sw->prof_res_bm_init = 0; 833 834 status = ice_init_def_sw_recp(hw); 835 if (status) { 836 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 837 return status; 838 } 839 return 0; 840 } 841 842 /** 843 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 844 * @hw: pointer to the HW struct 845 */ 846 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 847 { 848 struct ice_switch_info *sw = hw->switch_info; 849 struct ice_vsi_list_map_info *v_pos_map; 850 struct ice_vsi_list_map_info *v_tmp_map; 851 struct ice_sw_recipe *recps; 852 u8 i; 853 854 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 855 list_entry) { 856 list_del(&v_pos_map->list_entry); 857 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 858 } 859 recps = sw->recp_list; 860 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 861 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 862 863 recps[i].root_rid = i; 864 list_for_each_entry_safe(rg_entry, tmprg_entry, 865 &recps[i].rg_list, l_entry) { 866 list_del(&rg_entry->l_entry); 867 devm_kfree(ice_hw_to_dev(hw), rg_entry); 868 } 869 870 if (recps[i].adv_rule) { 871 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 872 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 873 874 mutex_destroy(&recps[i].filt_rule_lock); 875 list_for_each_entry_safe(lst_itr, tmp_entry, 876 &recps[i].filt_rules, 877 list_entry) { 878 list_del(&lst_itr->list_entry); 879 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 880 devm_kfree(ice_hw_to_dev(hw), lst_itr); 881 } 882 } else { 883 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 884 885 mutex_destroy(&recps[i].filt_rule_lock); 886 list_for_each_entry_safe(lst_itr, tmp_entry, 887 &recps[i].filt_rules, 888 list_entry) { 889 list_del(&lst_itr->list_entry); 890 devm_kfree(ice_hw_to_dev(hw), lst_itr); 891 } 892 } 893 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 894 } 895 ice_rm_all_sw_replay_rule_info(hw); 896 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 897 devm_kfree(ice_hw_to_dev(hw), sw); 898 } 899 900 /** 901 * ice_get_fw_log_cfg - get FW logging configuration 902 * @hw: pointer to the HW struct 903 */ 904 static int ice_get_fw_log_cfg(struct ice_hw *hw) 905 { 906 struct ice_aq_desc desc; 907 __le16 *config; 908 int status; 909 u16 size; 910 911 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 912 config = kzalloc(size, GFP_KERNEL); 913 if (!config) 914 return -ENOMEM; 915 916 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 917 918 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 919 if (!status) { 920 u16 i; 921 922 /* Save FW logging information into the HW structure */ 923 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 924 u16 v, m, flgs; 925 926 v = le16_to_cpu(config[i]); 927 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 928 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 929 930 if (m < ICE_AQC_FW_LOG_ID_MAX) 931 hw->fw_log.evnts[m].cur = flgs; 932 } 933 } 934 935 kfree(config); 936 937 return status; 938 } 939 940 /** 941 * ice_cfg_fw_log - configure FW logging 942 * @hw: pointer to the HW struct 943 * @enable: enable certain FW logging events if true, disable all if false 944 * 945 * This function enables/disables the FW logging via Rx CQ events and a UART 946 * port based on predetermined configurations. FW logging via the Rx CQ can be 947 * enabled/disabled for individual PF's. However, FW logging via the UART can 948 * only be enabled/disabled for all PFs on the same device. 949 * 950 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 951 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 952 * before initializing the device. 953 * 954 * When re/configuring FW logging, callers need to update the "cfg" elements of 955 * the hw->fw_log.evnts array with the desired logging event configurations for 956 * modules of interest. When disabling FW logging completely, the callers can 957 * just pass false in the "enable" parameter. On completion, the function will 958 * update the "cur" element of the hw->fw_log.evnts array with the resulting 959 * logging event configurations of the modules that are being re/configured. FW 960 * logging modules that are not part of a reconfiguration operation retain their 961 * previous states. 962 * 963 * Before resetting the device, it is recommended that the driver disables FW 964 * logging before shutting down the control queue. When disabling FW logging 965 * ("enable" = false), the latest configurations of FW logging events stored in 966 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 967 * a device reset. 968 * 969 * When enabling FW logging to emit log messages via the Rx CQ during the 970 * device's initialization phase, a mechanism alternative to interrupt handlers 971 * needs to be used to extract FW log messages from the Rx CQ periodically and 972 * to prevent the Rx CQ from being full and stalling other types of control 973 * messages from FW to SW. Interrupts are typically disabled during the device's 974 * initialization phase. 975 */ 976 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 977 { 978 struct ice_aqc_fw_logging *cmd; 979 u16 i, chgs = 0, len = 0; 980 struct ice_aq_desc desc; 981 __le16 *data = NULL; 982 u8 actv_evnts = 0; 983 void *buf = NULL; 984 int status = 0; 985 986 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 987 return 0; 988 989 /* Disable FW logging only when the control queue is still responsive */ 990 if (!enable && 991 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 992 return 0; 993 994 /* Get current FW log settings */ 995 status = ice_get_fw_log_cfg(hw); 996 if (status) 997 return status; 998 999 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 1000 cmd = &desc.params.fw_logging; 1001 1002 /* Indicate which controls are valid */ 1003 if (hw->fw_log.cq_en) 1004 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 1005 1006 if (hw->fw_log.uart_en) 1007 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 1008 1009 if (enable) { 1010 /* Fill in an array of entries with FW logging modules and 1011 * logging events being reconfigured. 1012 */ 1013 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 1014 u16 val; 1015 1016 /* Keep track of enabled event types */ 1017 actv_evnts |= hw->fw_log.evnts[i].cfg; 1018 1019 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 1020 continue; 1021 1022 if (!data) { 1023 data = devm_kcalloc(ice_hw_to_dev(hw), 1024 ICE_AQC_FW_LOG_ID_MAX, 1025 sizeof(*data), 1026 GFP_KERNEL); 1027 if (!data) 1028 return -ENOMEM; 1029 } 1030 1031 val = i << ICE_AQC_FW_LOG_ID_S; 1032 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 1033 data[chgs++] = cpu_to_le16(val); 1034 } 1035 1036 /* Only enable FW logging if at least one module is specified. 1037 * If FW logging is currently enabled but all modules are not 1038 * enabled to emit log messages, disable FW logging altogether. 1039 */ 1040 if (actv_evnts) { 1041 /* Leave if there is effectively no change */ 1042 if (!chgs) 1043 goto out; 1044 1045 if (hw->fw_log.cq_en) 1046 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 1047 1048 if (hw->fw_log.uart_en) 1049 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 1050 1051 buf = data; 1052 len = sizeof(*data) * chgs; 1053 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1054 } 1055 } 1056 1057 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 1058 if (!status) { 1059 /* Update the current configuration to reflect events enabled. 1060 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 1061 * logging mode is enabled for the device. They do not reflect 1062 * actual modules being enabled to emit log messages. So, their 1063 * values remain unchanged even when all modules are disabled. 1064 */ 1065 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 1066 1067 hw->fw_log.actv_evnts = actv_evnts; 1068 for (i = 0; i < cnt; i++) { 1069 u16 v, m; 1070 1071 if (!enable) { 1072 /* When disabling all FW logging events as part 1073 * of device's de-initialization, the original 1074 * configurations are retained, and can be used 1075 * to reconfigure FW logging later if the device 1076 * is re-initialized. 1077 */ 1078 hw->fw_log.evnts[i].cur = 0; 1079 continue; 1080 } 1081 1082 v = le16_to_cpu(data[i]); 1083 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 1084 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 1085 } 1086 } 1087 1088 out: 1089 devm_kfree(ice_hw_to_dev(hw), data); 1090 1091 return status; 1092 } 1093 1094 /** 1095 * ice_output_fw_log 1096 * @hw: pointer to the HW struct 1097 * @desc: pointer to the AQ message descriptor 1098 * @buf: pointer to the buffer accompanying the AQ message 1099 * 1100 * Formats a FW Log message and outputs it via the standard driver logs. 1101 */ 1102 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1103 { 1104 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1105 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1106 le16_to_cpu(desc->datalen)); 1107 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1108 } 1109 1110 /** 1111 * ice_get_itr_intrl_gran 1112 * @hw: pointer to the HW struct 1113 * 1114 * Determines the ITR/INTRL granularities based on the maximum aggregate 1115 * bandwidth according to the device's configuration during power-on. 1116 */ 1117 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1118 { 1119 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1120 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1121 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1122 1123 switch (max_agg_bw) { 1124 case ICE_MAX_AGG_BW_200G: 1125 case ICE_MAX_AGG_BW_100G: 1126 case ICE_MAX_AGG_BW_50G: 1127 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1128 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1129 break; 1130 case ICE_MAX_AGG_BW_25G: 1131 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1132 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1133 break; 1134 } 1135 } 1136 1137 /** 1138 * ice_init_hw - main hardware initialization routine 1139 * @hw: pointer to the hardware structure 1140 */ 1141 int ice_init_hw(struct ice_hw *hw) 1142 { 1143 struct ice_aqc_get_phy_caps_data *pcaps; 1144 u16 mac_buf_len; 1145 void *mac_buf; 1146 int status; 1147 1148 /* Set MAC type based on DeviceID */ 1149 status = ice_set_mac_type(hw); 1150 if (status) 1151 return status; 1152 1153 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1154 PF_FUNC_RID_FUNC_NUM_M) >> 1155 PF_FUNC_RID_FUNC_NUM_S; 1156 1157 status = ice_reset(hw, ICE_RESET_PFR); 1158 if (status) 1159 return status; 1160 1161 ice_get_itr_intrl_gran(hw); 1162 1163 status = ice_create_all_ctrlq(hw); 1164 if (status) 1165 goto err_unroll_cqinit; 1166 1167 /* Enable FW logging. Not fatal if this fails. */ 1168 status = ice_cfg_fw_log(hw, true); 1169 if (status) 1170 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1171 1172 status = ice_clear_pf_cfg(hw); 1173 if (status) 1174 goto err_unroll_cqinit; 1175 1176 /* Set bit to enable Flow Director filters */ 1177 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1178 INIT_LIST_HEAD(&hw->fdir_list_head); 1179 1180 ice_clear_pxe_mode(hw); 1181 1182 status = ice_init_nvm(hw); 1183 if (status) 1184 goto err_unroll_cqinit; 1185 1186 status = ice_get_caps(hw); 1187 if (status) 1188 goto err_unroll_cqinit; 1189 1190 if (!hw->port_info) 1191 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1192 sizeof(*hw->port_info), 1193 GFP_KERNEL); 1194 if (!hw->port_info) { 1195 status = -ENOMEM; 1196 goto err_unroll_cqinit; 1197 } 1198 1199 /* set the back pointer to HW */ 1200 hw->port_info->hw = hw; 1201 1202 /* Initialize port_info struct with switch configuration data */ 1203 status = ice_get_initial_sw_cfg(hw); 1204 if (status) 1205 goto err_unroll_alloc; 1206 1207 hw->evb_veb = true; 1208 1209 /* init xarray for identifying scheduling nodes uniquely */ 1210 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1211 1212 /* Query the allocated resources for Tx scheduler */ 1213 status = ice_sched_query_res_alloc(hw); 1214 if (status) { 1215 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1216 goto err_unroll_alloc; 1217 } 1218 ice_sched_get_psm_clk_freq(hw); 1219 1220 /* Initialize port_info struct with scheduler data */ 1221 status = ice_sched_init_port(hw->port_info); 1222 if (status) 1223 goto err_unroll_sched; 1224 1225 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1226 if (!pcaps) { 1227 status = -ENOMEM; 1228 goto err_unroll_sched; 1229 } 1230 1231 /* Initialize port_info struct with PHY capabilities */ 1232 status = ice_aq_get_phy_caps(hw->port_info, false, 1233 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1234 NULL); 1235 devm_kfree(ice_hw_to_dev(hw), pcaps); 1236 if (status) 1237 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1238 status); 1239 1240 /* Initialize port_info struct with link information */ 1241 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1242 if (status) 1243 goto err_unroll_sched; 1244 1245 /* need a valid SW entry point to build a Tx tree */ 1246 if (!hw->sw_entry_point_layer) { 1247 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1248 status = -EIO; 1249 goto err_unroll_sched; 1250 } 1251 INIT_LIST_HEAD(&hw->agg_list); 1252 /* Initialize max burst size */ 1253 if (!hw->max_burst_size) 1254 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1255 1256 status = ice_init_fltr_mgmt_struct(hw); 1257 if (status) 1258 goto err_unroll_sched; 1259 1260 /* Get MAC information */ 1261 /* A single port can report up to two (LAN and WoL) addresses */ 1262 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1263 sizeof(struct ice_aqc_manage_mac_read_resp), 1264 GFP_KERNEL); 1265 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1266 1267 if (!mac_buf) { 1268 status = -ENOMEM; 1269 goto err_unroll_fltr_mgmt_struct; 1270 } 1271 1272 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1273 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1274 1275 if (status) 1276 goto err_unroll_fltr_mgmt_struct; 1277 /* enable jumbo frame support at MAC level */ 1278 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1279 if (status) 1280 goto err_unroll_fltr_mgmt_struct; 1281 /* Obtain counter base index which would be used by flow director */ 1282 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1283 if (status) 1284 goto err_unroll_fltr_mgmt_struct; 1285 status = ice_init_hw_tbls(hw); 1286 if (status) 1287 goto err_unroll_fltr_mgmt_struct; 1288 mutex_init(&hw->tnl_lock); 1289 return 0; 1290 1291 err_unroll_fltr_mgmt_struct: 1292 ice_cleanup_fltr_mgmt_struct(hw); 1293 err_unroll_sched: 1294 ice_sched_cleanup_all(hw); 1295 err_unroll_alloc: 1296 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1297 err_unroll_cqinit: 1298 ice_destroy_all_ctrlq(hw); 1299 return status; 1300 } 1301 1302 /** 1303 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1304 * @hw: pointer to the hardware structure 1305 * 1306 * This should be called only during nominal operation, not as a result of 1307 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1308 * applicable initializations if it fails for any reason. 1309 */ 1310 void ice_deinit_hw(struct ice_hw *hw) 1311 { 1312 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1313 ice_cleanup_fltr_mgmt_struct(hw); 1314 1315 ice_sched_cleanup_all(hw); 1316 ice_sched_clear_agg(hw); 1317 ice_free_seg(hw); 1318 ice_free_hw_tbls(hw); 1319 mutex_destroy(&hw->tnl_lock); 1320 1321 /* Attempt to disable FW logging before shutting down control queues */ 1322 ice_cfg_fw_log(hw, false); 1323 ice_destroy_all_ctrlq(hw); 1324 1325 /* Clear VSI contexts if not already cleared */ 1326 ice_clear_all_vsi_ctx(hw); 1327 } 1328 1329 /** 1330 * ice_check_reset - Check to see if a global reset is complete 1331 * @hw: pointer to the hardware structure 1332 */ 1333 int ice_check_reset(struct ice_hw *hw) 1334 { 1335 u32 cnt, reg = 0, grst_timeout, uld_mask; 1336 1337 /* Poll for Device Active state in case a recent CORER, GLOBR, 1338 * or EMPR has occurred. The grst delay value is in 100ms units. 1339 * Add 1sec for outstanding AQ commands that can take a long time. 1340 */ 1341 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1342 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1343 1344 for (cnt = 0; cnt < grst_timeout; cnt++) { 1345 mdelay(100); 1346 reg = rd32(hw, GLGEN_RSTAT); 1347 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1348 break; 1349 } 1350 1351 if (cnt == grst_timeout) { 1352 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1353 return -EIO; 1354 } 1355 1356 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1357 GLNVM_ULD_PCIER_DONE_1_M |\ 1358 GLNVM_ULD_CORER_DONE_M |\ 1359 GLNVM_ULD_GLOBR_DONE_M |\ 1360 GLNVM_ULD_POR_DONE_M |\ 1361 GLNVM_ULD_POR_DONE_1_M |\ 1362 GLNVM_ULD_PCIER_DONE_2_M) 1363 1364 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1365 GLNVM_ULD_PE_DONE_M : 0); 1366 1367 /* Device is Active; check Global Reset processes are done */ 1368 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1369 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1370 if (reg == uld_mask) { 1371 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1372 break; 1373 } 1374 mdelay(10); 1375 } 1376 1377 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1378 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1379 reg); 1380 return -EIO; 1381 } 1382 1383 return 0; 1384 } 1385 1386 /** 1387 * ice_pf_reset - Reset the PF 1388 * @hw: pointer to the hardware structure 1389 * 1390 * If a global reset has been triggered, this function checks 1391 * for its completion and then issues the PF reset 1392 */ 1393 static int ice_pf_reset(struct ice_hw *hw) 1394 { 1395 u32 cnt, reg; 1396 1397 /* If at function entry a global reset was already in progress, i.e. 1398 * state is not 'device active' or any of the reset done bits are not 1399 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1400 * global reset is done. 1401 */ 1402 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1403 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1404 /* poll on global reset currently in progress until done */ 1405 if (ice_check_reset(hw)) 1406 return -EIO; 1407 1408 return 0; 1409 } 1410 1411 /* Reset the PF */ 1412 reg = rd32(hw, PFGEN_CTRL); 1413 1414 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1415 1416 /* Wait for the PFR to complete. The wait time is the global config lock 1417 * timeout plus the PFR timeout which will account for a possible reset 1418 * that is occurring during a download package operation. 1419 */ 1420 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1421 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1422 reg = rd32(hw, PFGEN_CTRL); 1423 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1424 break; 1425 1426 mdelay(1); 1427 } 1428 1429 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1430 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1431 return -EIO; 1432 } 1433 1434 return 0; 1435 } 1436 1437 /** 1438 * ice_reset - Perform different types of reset 1439 * @hw: pointer to the hardware structure 1440 * @req: reset request 1441 * 1442 * This function triggers a reset as specified by the req parameter. 1443 * 1444 * Note: 1445 * If anything other than a PF reset is triggered, PXE mode is restored. 1446 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1447 * interface has been restored in the rebuild flow. 1448 */ 1449 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1450 { 1451 u32 val = 0; 1452 1453 switch (req) { 1454 case ICE_RESET_PFR: 1455 return ice_pf_reset(hw); 1456 case ICE_RESET_CORER: 1457 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1458 val = GLGEN_RTRIG_CORER_M; 1459 break; 1460 case ICE_RESET_GLOBR: 1461 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1462 val = GLGEN_RTRIG_GLOBR_M; 1463 break; 1464 default: 1465 return -EINVAL; 1466 } 1467 1468 val |= rd32(hw, GLGEN_RTRIG); 1469 wr32(hw, GLGEN_RTRIG, val); 1470 ice_flush(hw); 1471 1472 /* wait for the FW to be ready */ 1473 return ice_check_reset(hw); 1474 } 1475 1476 /** 1477 * ice_copy_rxq_ctx_to_hw 1478 * @hw: pointer to the hardware structure 1479 * @ice_rxq_ctx: pointer to the rxq context 1480 * @rxq_index: the index of the Rx queue 1481 * 1482 * Copies rxq context from dense structure to HW register space 1483 */ 1484 static int 1485 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1486 { 1487 u8 i; 1488 1489 if (!ice_rxq_ctx) 1490 return -EINVAL; 1491 1492 if (rxq_index > QRX_CTRL_MAX_INDEX) 1493 return -EINVAL; 1494 1495 /* Copy each dword separately to HW */ 1496 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1497 wr32(hw, QRX_CONTEXT(i, rxq_index), 1498 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1499 1500 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1501 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1502 } 1503 1504 return 0; 1505 } 1506 1507 /* LAN Rx Queue Context */ 1508 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1509 /* Field Width LSB */ 1510 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1511 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1512 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1513 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1514 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1515 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1516 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1517 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1518 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1519 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1520 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1521 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1522 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1523 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1524 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1525 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1526 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1527 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1528 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1529 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1530 { 0 } 1531 }; 1532 1533 /** 1534 * ice_write_rxq_ctx 1535 * @hw: pointer to the hardware structure 1536 * @rlan_ctx: pointer to the rxq context 1537 * @rxq_index: the index of the Rx queue 1538 * 1539 * Converts rxq context from sparse to dense structure and then writes 1540 * it to HW register space and enables the hardware to prefetch descriptors 1541 * instead of only fetching them on demand 1542 */ 1543 int 1544 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1545 u32 rxq_index) 1546 { 1547 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1548 1549 if (!rlan_ctx) 1550 return -EINVAL; 1551 1552 rlan_ctx->prefena = 1; 1553 1554 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1555 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1556 } 1557 1558 /* LAN Tx Queue Context */ 1559 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1560 /* Field Width LSB */ 1561 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1562 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1563 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1564 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1565 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1566 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1567 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1568 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1569 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1570 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1571 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1572 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1573 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1574 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1575 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1576 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1577 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1578 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1579 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1580 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1581 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1582 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1583 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1584 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1585 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1586 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1587 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1588 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1589 { 0 } 1590 }; 1591 1592 /* Sideband Queue command wrappers */ 1593 1594 /** 1595 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1596 * @hw: pointer to the HW struct 1597 * @desc: descriptor describing the command 1598 * @buf: buffer to use for indirect commands (NULL for direct commands) 1599 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1600 * @cd: pointer to command details structure 1601 */ 1602 static int 1603 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1604 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1605 { 1606 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1607 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1608 } 1609 1610 /** 1611 * ice_sbq_rw_reg - Fill Sideband Queue command 1612 * @hw: pointer to the HW struct 1613 * @in: message info to be filled in descriptor 1614 */ 1615 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1616 { 1617 struct ice_sbq_cmd_desc desc = {0}; 1618 struct ice_sbq_msg_req msg = {0}; 1619 u16 msg_len; 1620 int status; 1621 1622 msg_len = sizeof(msg); 1623 1624 msg.dest_dev = in->dest_dev; 1625 msg.opcode = in->opcode; 1626 msg.flags = ICE_SBQ_MSG_FLAGS; 1627 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1628 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1629 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1630 1631 if (in->opcode) 1632 msg.data = cpu_to_le32(in->data); 1633 else 1634 /* data read comes back in completion, so shorten the struct by 1635 * sizeof(msg.data) 1636 */ 1637 msg_len -= sizeof(msg.data); 1638 1639 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1640 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1641 desc.param0.cmd_len = cpu_to_le16(msg_len); 1642 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1643 if (!status && !in->opcode) 1644 in->data = le32_to_cpu 1645 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1646 return status; 1647 } 1648 1649 /* FW Admin Queue command wrappers */ 1650 1651 /* Software lock/mutex that is meant to be held while the Global Config Lock 1652 * in firmware is acquired by the software to prevent most (but not all) types 1653 * of AQ commands from being sent to FW 1654 */ 1655 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1656 1657 /** 1658 * ice_should_retry_sq_send_cmd 1659 * @opcode: AQ opcode 1660 * 1661 * Decide if we should retry the send command routine for the ATQ, depending 1662 * on the opcode. 1663 */ 1664 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1665 { 1666 switch (opcode) { 1667 case ice_aqc_opc_get_link_topo: 1668 case ice_aqc_opc_lldp_stop: 1669 case ice_aqc_opc_lldp_start: 1670 case ice_aqc_opc_lldp_filter_ctrl: 1671 return true; 1672 } 1673 1674 return false; 1675 } 1676 1677 /** 1678 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1679 * @hw: pointer to the HW struct 1680 * @cq: pointer to the specific Control queue 1681 * @desc: prefilled descriptor describing the command 1682 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1683 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1684 * @cd: pointer to command details structure 1685 * 1686 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1687 * Queue if the EBUSY AQ error is returned. 1688 */ 1689 static int 1690 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1691 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1692 struct ice_sq_cd *cd) 1693 { 1694 struct ice_aq_desc desc_cpy; 1695 bool is_cmd_for_retry; 1696 u8 idx = 0; 1697 u16 opcode; 1698 int status; 1699 1700 opcode = le16_to_cpu(desc->opcode); 1701 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1702 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1703 1704 if (is_cmd_for_retry) { 1705 /* All retryable cmds are direct, without buf. */ 1706 WARN_ON(buf); 1707 1708 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1709 } 1710 1711 do { 1712 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1713 1714 if (!is_cmd_for_retry || !status || 1715 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1716 break; 1717 1718 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1719 1720 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1721 1722 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1723 1724 return status; 1725 } 1726 1727 /** 1728 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1729 * @hw: pointer to the HW struct 1730 * @desc: descriptor describing the command 1731 * @buf: buffer to use for indirect commands (NULL for direct commands) 1732 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1733 * @cd: pointer to command details structure 1734 * 1735 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1736 */ 1737 int 1738 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1739 u16 buf_size, struct ice_sq_cd *cd) 1740 { 1741 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1742 bool lock_acquired = false; 1743 int status; 1744 1745 /* When a package download is in process (i.e. when the firmware's 1746 * Global Configuration Lock resource is held), only the Download 1747 * Package, Get Version, Get Package Info List, Upload Section, 1748 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1749 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1750 * Recipes to Profile Association, and Release Resource (with resource 1751 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1752 * must block until the package download completes and the Global Config 1753 * Lock is released. See also ice_acquire_global_cfg_lock(). 1754 */ 1755 switch (le16_to_cpu(desc->opcode)) { 1756 case ice_aqc_opc_download_pkg: 1757 case ice_aqc_opc_get_pkg_info_list: 1758 case ice_aqc_opc_get_ver: 1759 case ice_aqc_opc_upload_section: 1760 case ice_aqc_opc_update_pkg: 1761 case ice_aqc_opc_set_port_params: 1762 case ice_aqc_opc_get_vlan_mode_parameters: 1763 case ice_aqc_opc_set_vlan_mode_parameters: 1764 case ice_aqc_opc_add_recipe: 1765 case ice_aqc_opc_recipe_to_profile: 1766 case ice_aqc_opc_get_recipe: 1767 case ice_aqc_opc_get_recipe_to_profile: 1768 break; 1769 case ice_aqc_opc_release_res: 1770 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1771 break; 1772 fallthrough; 1773 default: 1774 mutex_lock(&ice_global_cfg_lock_sw); 1775 lock_acquired = true; 1776 break; 1777 } 1778 1779 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1780 if (lock_acquired) 1781 mutex_unlock(&ice_global_cfg_lock_sw); 1782 1783 return status; 1784 } 1785 1786 /** 1787 * ice_aq_get_fw_ver 1788 * @hw: pointer to the HW struct 1789 * @cd: pointer to command details structure or NULL 1790 * 1791 * Get the firmware version (0x0001) from the admin queue commands 1792 */ 1793 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1794 { 1795 struct ice_aqc_get_ver *resp; 1796 struct ice_aq_desc desc; 1797 int status; 1798 1799 resp = &desc.params.get_ver; 1800 1801 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1802 1803 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1804 1805 if (!status) { 1806 hw->fw_branch = resp->fw_branch; 1807 hw->fw_maj_ver = resp->fw_major; 1808 hw->fw_min_ver = resp->fw_minor; 1809 hw->fw_patch = resp->fw_patch; 1810 hw->fw_build = le32_to_cpu(resp->fw_build); 1811 hw->api_branch = resp->api_branch; 1812 hw->api_maj_ver = resp->api_major; 1813 hw->api_min_ver = resp->api_minor; 1814 hw->api_patch = resp->api_patch; 1815 } 1816 1817 return status; 1818 } 1819 1820 /** 1821 * ice_aq_send_driver_ver 1822 * @hw: pointer to the HW struct 1823 * @dv: driver's major, minor version 1824 * @cd: pointer to command details structure or NULL 1825 * 1826 * Send the driver version (0x0002) to the firmware 1827 */ 1828 int 1829 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1830 struct ice_sq_cd *cd) 1831 { 1832 struct ice_aqc_driver_ver *cmd; 1833 struct ice_aq_desc desc; 1834 u16 len; 1835 1836 cmd = &desc.params.driver_ver; 1837 1838 if (!dv) 1839 return -EINVAL; 1840 1841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1842 1843 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1844 cmd->major_ver = dv->major_ver; 1845 cmd->minor_ver = dv->minor_ver; 1846 cmd->build_ver = dv->build_ver; 1847 cmd->subbuild_ver = dv->subbuild_ver; 1848 1849 len = 0; 1850 while (len < sizeof(dv->driver_string) && 1851 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1852 len++; 1853 1854 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1855 } 1856 1857 /** 1858 * ice_aq_q_shutdown 1859 * @hw: pointer to the HW struct 1860 * @unloading: is the driver unloading itself 1861 * 1862 * Tell the Firmware that we're shutting down the AdminQ and whether 1863 * or not the driver is unloading as well (0x0003). 1864 */ 1865 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1866 { 1867 struct ice_aqc_q_shutdown *cmd; 1868 struct ice_aq_desc desc; 1869 1870 cmd = &desc.params.q_shutdown; 1871 1872 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1873 1874 if (unloading) 1875 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1876 1877 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1878 } 1879 1880 /** 1881 * ice_aq_req_res 1882 * @hw: pointer to the HW struct 1883 * @res: resource ID 1884 * @access: access type 1885 * @sdp_number: resource number 1886 * @timeout: the maximum time in ms that the driver may hold the resource 1887 * @cd: pointer to command details structure or NULL 1888 * 1889 * Requests common resource using the admin queue commands (0x0008). 1890 * When attempting to acquire the Global Config Lock, the driver can 1891 * learn of three states: 1892 * 1) 0 - acquired lock, and can perform download package 1893 * 2) -EIO - did not get lock, driver should fail to load 1894 * 3) -EALREADY - did not get lock, but another driver has 1895 * successfully downloaded the package; the driver does 1896 * not have to download the package and can continue 1897 * loading 1898 * 1899 * Note that if the caller is in an acquire lock, perform action, release lock 1900 * phase of operation, it is possible that the FW may detect a timeout and issue 1901 * a CORER. In this case, the driver will receive a CORER interrupt and will 1902 * have to determine its cause. The calling thread that is handling this flow 1903 * will likely get an error propagated back to it indicating the Download 1904 * Package, Update Package or the Release Resource AQ commands timed out. 1905 */ 1906 static int 1907 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1908 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1909 struct ice_sq_cd *cd) 1910 { 1911 struct ice_aqc_req_res *cmd_resp; 1912 struct ice_aq_desc desc; 1913 int status; 1914 1915 cmd_resp = &desc.params.res_owner; 1916 1917 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1918 1919 cmd_resp->res_id = cpu_to_le16(res); 1920 cmd_resp->access_type = cpu_to_le16(access); 1921 cmd_resp->res_number = cpu_to_le32(sdp_number); 1922 cmd_resp->timeout = cpu_to_le32(*timeout); 1923 *timeout = 0; 1924 1925 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1926 1927 /* The completion specifies the maximum time in ms that the driver 1928 * may hold the resource in the Timeout field. 1929 */ 1930 1931 /* Global config lock response utilizes an additional status field. 1932 * 1933 * If the Global config lock resource is held by some other driver, the 1934 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1935 * and the timeout field indicates the maximum time the current owner 1936 * of the resource has to free it. 1937 */ 1938 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1939 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1940 *timeout = le32_to_cpu(cmd_resp->timeout); 1941 return 0; 1942 } else if (le16_to_cpu(cmd_resp->status) == 1943 ICE_AQ_RES_GLBL_IN_PROG) { 1944 *timeout = le32_to_cpu(cmd_resp->timeout); 1945 return -EIO; 1946 } else if (le16_to_cpu(cmd_resp->status) == 1947 ICE_AQ_RES_GLBL_DONE) { 1948 return -EALREADY; 1949 } 1950 1951 /* invalid FW response, force a timeout immediately */ 1952 *timeout = 0; 1953 return -EIO; 1954 } 1955 1956 /* If the resource is held by some other driver, the command completes 1957 * with a busy return value and the timeout field indicates the maximum 1958 * time the current owner of the resource has to free it. 1959 */ 1960 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1961 *timeout = le32_to_cpu(cmd_resp->timeout); 1962 1963 return status; 1964 } 1965 1966 /** 1967 * ice_aq_release_res 1968 * @hw: pointer to the HW struct 1969 * @res: resource ID 1970 * @sdp_number: resource number 1971 * @cd: pointer to command details structure or NULL 1972 * 1973 * release common resource using the admin queue commands (0x0009) 1974 */ 1975 static int 1976 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1977 struct ice_sq_cd *cd) 1978 { 1979 struct ice_aqc_req_res *cmd; 1980 struct ice_aq_desc desc; 1981 1982 cmd = &desc.params.res_owner; 1983 1984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1985 1986 cmd->res_id = cpu_to_le16(res); 1987 cmd->res_number = cpu_to_le32(sdp_number); 1988 1989 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1990 } 1991 1992 /** 1993 * ice_acquire_res 1994 * @hw: pointer to the HW structure 1995 * @res: resource ID 1996 * @access: access type (read or write) 1997 * @timeout: timeout in milliseconds 1998 * 1999 * This function will attempt to acquire the ownership of a resource. 2000 */ 2001 int 2002 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 2003 enum ice_aq_res_access_type access, u32 timeout) 2004 { 2005 #define ICE_RES_POLLING_DELAY_MS 10 2006 u32 delay = ICE_RES_POLLING_DELAY_MS; 2007 u32 time_left = timeout; 2008 int status; 2009 2010 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2011 2012 /* A return code of -EALREADY means that another driver has 2013 * previously acquired the resource and performed any necessary updates; 2014 * in this case the caller does not obtain the resource and has no 2015 * further work to do. 2016 */ 2017 if (status == -EALREADY) 2018 goto ice_acquire_res_exit; 2019 2020 if (status) 2021 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2022 2023 /* If necessary, poll until the current lock owner timeouts */ 2024 timeout = time_left; 2025 while (status && timeout && time_left) { 2026 mdelay(delay); 2027 timeout = (timeout > delay) ? timeout - delay : 0; 2028 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2029 2030 if (status == -EALREADY) 2031 /* lock free, but no work to do */ 2032 break; 2033 2034 if (!status) 2035 /* lock acquired */ 2036 break; 2037 } 2038 if (status && status != -EALREADY) 2039 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2040 2041 ice_acquire_res_exit: 2042 if (status == -EALREADY) { 2043 if (access == ICE_RES_WRITE) 2044 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2045 else 2046 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 2047 } 2048 return status; 2049 } 2050 2051 /** 2052 * ice_release_res 2053 * @hw: pointer to the HW structure 2054 * @res: resource ID 2055 * 2056 * This function will release a resource using the proper Admin Command. 2057 */ 2058 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2059 { 2060 unsigned long timeout; 2061 int status; 2062 2063 /* there are some rare cases when trying to release the resource 2064 * results in an admin queue timeout, so handle them correctly 2065 */ 2066 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 2067 do { 2068 status = ice_aq_release_res(hw, res, 0, NULL); 2069 if (status != -EIO) 2070 break; 2071 usleep_range(1000, 2000); 2072 } while (time_before(jiffies, timeout)); 2073 } 2074 2075 /** 2076 * ice_aq_alloc_free_res - command to allocate/free resources 2077 * @hw: pointer to the HW struct 2078 * @buf: Indirect buffer to hold data parameters and response 2079 * @buf_size: size of buffer for indirect commands 2080 * @opc: pass in the command opcode 2081 * 2082 * Helper function to allocate/free resources using the admin queue commands 2083 */ 2084 int ice_aq_alloc_free_res(struct ice_hw *hw, 2085 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2086 enum ice_adminq_opc opc) 2087 { 2088 struct ice_aqc_alloc_free_res_cmd *cmd; 2089 struct ice_aq_desc desc; 2090 2091 cmd = &desc.params.sw_res_ctrl; 2092 2093 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2094 return -EINVAL; 2095 2096 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2097 2098 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2099 2100 cmd->num_entries = cpu_to_le16(1); 2101 2102 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2103 } 2104 2105 /** 2106 * ice_alloc_hw_res - allocate resource 2107 * @hw: pointer to the HW struct 2108 * @type: type of resource 2109 * @num: number of resources to allocate 2110 * @btm: allocate from bottom 2111 * @res: pointer to array that will receive the resources 2112 */ 2113 int 2114 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2115 { 2116 struct ice_aqc_alloc_free_res_elem *buf; 2117 u16 buf_len; 2118 int status; 2119 2120 buf_len = struct_size(buf, elem, num); 2121 buf = kzalloc(buf_len, GFP_KERNEL); 2122 if (!buf) 2123 return -ENOMEM; 2124 2125 /* Prepare buffer to allocate resource. */ 2126 buf->num_elems = cpu_to_le16(num); 2127 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2128 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2129 if (btm) 2130 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2131 2132 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2133 if (status) 2134 goto ice_alloc_res_exit; 2135 2136 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2137 2138 ice_alloc_res_exit: 2139 kfree(buf); 2140 return status; 2141 } 2142 2143 /** 2144 * ice_free_hw_res - free allocated HW resource 2145 * @hw: pointer to the HW struct 2146 * @type: type of resource to free 2147 * @num: number of resources 2148 * @res: pointer to array that contains the resources to free 2149 */ 2150 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2151 { 2152 struct ice_aqc_alloc_free_res_elem *buf; 2153 u16 buf_len; 2154 int status; 2155 2156 buf_len = struct_size(buf, elem, num); 2157 buf = kzalloc(buf_len, GFP_KERNEL); 2158 if (!buf) 2159 return -ENOMEM; 2160 2161 /* Prepare buffer to free resource. */ 2162 buf->num_elems = cpu_to_le16(num); 2163 buf->res_type = cpu_to_le16(type); 2164 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2165 2166 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2167 if (status) 2168 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2169 2170 kfree(buf); 2171 return status; 2172 } 2173 2174 /** 2175 * ice_get_num_per_func - determine number of resources per PF 2176 * @hw: pointer to the HW structure 2177 * @max: value to be evenly split between each PF 2178 * 2179 * Determine the number of valid functions by going through the bitmap returned 2180 * from parsing capabilities and use this to calculate the number of resources 2181 * per PF based on the max value passed in. 2182 */ 2183 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2184 { 2185 u8 funcs; 2186 2187 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2188 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2189 ICE_CAPS_VALID_FUNCS_M); 2190 2191 if (!funcs) 2192 return 0; 2193 2194 return max / funcs; 2195 } 2196 2197 /** 2198 * ice_parse_common_caps - parse common device/function capabilities 2199 * @hw: pointer to the HW struct 2200 * @caps: pointer to common capabilities structure 2201 * @elem: the capability element to parse 2202 * @prefix: message prefix for tracing capabilities 2203 * 2204 * Given a capability element, extract relevant details into the common 2205 * capability structure. 2206 * 2207 * Returns: true if the capability matches one of the common capability ids, 2208 * false otherwise. 2209 */ 2210 static bool 2211 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2212 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2213 { 2214 u32 logical_id = le32_to_cpu(elem->logical_id); 2215 u32 phys_id = le32_to_cpu(elem->phys_id); 2216 u32 number = le32_to_cpu(elem->number); 2217 u16 cap = le16_to_cpu(elem->cap); 2218 bool found = true; 2219 2220 switch (cap) { 2221 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2222 caps->valid_functions = number; 2223 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2224 caps->valid_functions); 2225 break; 2226 case ICE_AQC_CAPS_SRIOV: 2227 caps->sr_iov_1_1 = (number == 1); 2228 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2229 caps->sr_iov_1_1); 2230 break; 2231 case ICE_AQC_CAPS_DCB: 2232 caps->dcb = (number == 1); 2233 caps->active_tc_bitmap = logical_id; 2234 caps->maxtc = phys_id; 2235 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2236 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2237 caps->active_tc_bitmap); 2238 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2239 break; 2240 case ICE_AQC_CAPS_RSS: 2241 caps->rss_table_size = number; 2242 caps->rss_table_entry_width = logical_id; 2243 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2244 caps->rss_table_size); 2245 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2246 caps->rss_table_entry_width); 2247 break; 2248 case ICE_AQC_CAPS_RXQS: 2249 caps->num_rxq = number; 2250 caps->rxq_first_id = phys_id; 2251 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2252 caps->num_rxq); 2253 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2254 caps->rxq_first_id); 2255 break; 2256 case ICE_AQC_CAPS_TXQS: 2257 caps->num_txq = number; 2258 caps->txq_first_id = phys_id; 2259 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2260 caps->num_txq); 2261 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2262 caps->txq_first_id); 2263 break; 2264 case ICE_AQC_CAPS_MSIX: 2265 caps->num_msix_vectors = number; 2266 caps->msix_vector_first_id = phys_id; 2267 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2268 caps->num_msix_vectors); 2269 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2270 caps->msix_vector_first_id); 2271 break; 2272 case ICE_AQC_CAPS_PENDING_NVM_VER: 2273 caps->nvm_update_pending_nvm = true; 2274 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2275 break; 2276 case ICE_AQC_CAPS_PENDING_OROM_VER: 2277 caps->nvm_update_pending_orom = true; 2278 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2279 break; 2280 case ICE_AQC_CAPS_PENDING_NET_VER: 2281 caps->nvm_update_pending_netlist = true; 2282 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2283 break; 2284 case ICE_AQC_CAPS_NVM_MGMT: 2285 caps->nvm_unified_update = 2286 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2287 true : false; 2288 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2289 caps->nvm_unified_update); 2290 break; 2291 case ICE_AQC_CAPS_RDMA: 2292 caps->rdma = (number == 1); 2293 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2294 break; 2295 case ICE_AQC_CAPS_MAX_MTU: 2296 caps->max_mtu = number; 2297 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2298 prefix, caps->max_mtu); 2299 break; 2300 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2301 caps->pcie_reset_avoidance = (number > 0); 2302 ice_debug(hw, ICE_DBG_INIT, 2303 "%s: pcie_reset_avoidance = %d\n", prefix, 2304 caps->pcie_reset_avoidance); 2305 break; 2306 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2307 caps->reset_restrict_support = (number == 1); 2308 ice_debug(hw, ICE_DBG_INIT, 2309 "%s: reset_restrict_support = %d\n", prefix, 2310 caps->reset_restrict_support); 2311 break; 2312 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2313 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2314 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2315 prefix, caps->roce_lag); 2316 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2317 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2318 prefix, caps->sriov_lag); 2319 break; 2320 default: 2321 /* Not one of the recognized common capabilities */ 2322 found = false; 2323 } 2324 2325 return found; 2326 } 2327 2328 /** 2329 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2330 * @hw: pointer to the HW structure 2331 * @caps: pointer to capabilities structure to fix 2332 * 2333 * Re-calculate the capabilities that are dependent on the number of physical 2334 * ports; i.e. some features are not supported or function differently on 2335 * devices with more than 4 ports. 2336 */ 2337 static void 2338 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2339 { 2340 /* This assumes device capabilities are always scanned before function 2341 * capabilities during the initialization flow. 2342 */ 2343 if (hw->dev_caps.num_funcs > 4) { 2344 /* Max 4 TCs per port */ 2345 caps->maxtc = 4; 2346 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2347 caps->maxtc); 2348 if (caps->rdma) { 2349 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2350 caps->rdma = 0; 2351 } 2352 2353 /* print message only when processing device capabilities 2354 * during initialization. 2355 */ 2356 if (caps == &hw->dev_caps.common_cap) 2357 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2358 } 2359 } 2360 2361 /** 2362 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2363 * @hw: pointer to the HW struct 2364 * @func_p: pointer to function capabilities structure 2365 * @cap: pointer to the capability element to parse 2366 * 2367 * Extract function capabilities for ICE_AQC_CAPS_VF. 2368 */ 2369 static void 2370 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2371 struct ice_aqc_list_caps_elem *cap) 2372 { 2373 u32 logical_id = le32_to_cpu(cap->logical_id); 2374 u32 number = le32_to_cpu(cap->number); 2375 2376 func_p->num_allocd_vfs = number; 2377 func_p->vf_base_id = logical_id; 2378 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2379 func_p->num_allocd_vfs); 2380 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2381 func_p->vf_base_id); 2382 } 2383 2384 /** 2385 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2386 * @hw: pointer to the HW struct 2387 * @func_p: pointer to function capabilities structure 2388 * @cap: pointer to the capability element to parse 2389 * 2390 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2391 */ 2392 static void 2393 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2394 struct ice_aqc_list_caps_elem *cap) 2395 { 2396 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2397 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2398 le32_to_cpu(cap->number)); 2399 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2400 func_p->guar_num_vsi); 2401 } 2402 2403 /** 2404 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2405 * @hw: pointer to the HW struct 2406 * @func_p: pointer to function capabilities structure 2407 * @cap: pointer to the capability element to parse 2408 * 2409 * Extract function capabilities for ICE_AQC_CAPS_1588. 2410 */ 2411 static void 2412 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2413 struct ice_aqc_list_caps_elem *cap) 2414 { 2415 struct ice_ts_func_info *info = &func_p->ts_func_info; 2416 u32 number = le32_to_cpu(cap->number); 2417 2418 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2419 func_p->common_cap.ieee_1588 = info->ena; 2420 2421 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2422 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2423 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2424 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2425 2426 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2427 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2428 2429 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2430 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2431 } else { 2432 /* Unknown clock frequency, so assume a (probably incorrect) 2433 * default to avoid out-of-bounds look ups of frequency 2434 * related information. 2435 */ 2436 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2437 info->clk_freq); 2438 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2439 } 2440 2441 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2442 func_p->common_cap.ieee_1588); 2443 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2444 info->src_tmr_owned); 2445 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2446 info->tmr_ena); 2447 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2448 info->tmr_index_owned); 2449 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2450 info->tmr_index_assoc); 2451 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2452 info->clk_freq); 2453 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2454 info->clk_src); 2455 } 2456 2457 /** 2458 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2459 * @hw: pointer to the HW struct 2460 * @func_p: pointer to function capabilities structure 2461 * 2462 * Extract function capabilities for ICE_AQC_CAPS_FD. 2463 */ 2464 static void 2465 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2466 { 2467 u32 reg_val, val; 2468 2469 reg_val = rd32(hw, GLQF_FD_SIZE); 2470 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2471 GLQF_FD_SIZE_FD_GSIZE_S; 2472 func_p->fd_fltr_guar = 2473 ice_get_num_per_func(hw, val); 2474 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2475 GLQF_FD_SIZE_FD_BSIZE_S; 2476 func_p->fd_fltr_best_effort = val; 2477 2478 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2479 func_p->fd_fltr_guar); 2480 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2481 func_p->fd_fltr_best_effort); 2482 } 2483 2484 /** 2485 * ice_parse_func_caps - Parse function capabilities 2486 * @hw: pointer to the HW struct 2487 * @func_p: pointer to function capabilities structure 2488 * @buf: buffer containing the function capability records 2489 * @cap_count: the number of capabilities 2490 * 2491 * Helper function to parse function (0x000A) capabilities list. For 2492 * capabilities shared between device and function, this relies on 2493 * ice_parse_common_caps. 2494 * 2495 * Loop through the list of provided capabilities and extract the relevant 2496 * data into the function capabilities structured. 2497 */ 2498 static void 2499 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2500 void *buf, u32 cap_count) 2501 { 2502 struct ice_aqc_list_caps_elem *cap_resp; 2503 u32 i; 2504 2505 cap_resp = buf; 2506 2507 memset(func_p, 0, sizeof(*func_p)); 2508 2509 for (i = 0; i < cap_count; i++) { 2510 u16 cap = le16_to_cpu(cap_resp[i].cap); 2511 bool found; 2512 2513 found = ice_parse_common_caps(hw, &func_p->common_cap, 2514 &cap_resp[i], "func caps"); 2515 2516 switch (cap) { 2517 case ICE_AQC_CAPS_VF: 2518 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2519 break; 2520 case ICE_AQC_CAPS_VSI: 2521 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2522 break; 2523 case ICE_AQC_CAPS_1588: 2524 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2525 break; 2526 case ICE_AQC_CAPS_FD: 2527 ice_parse_fdir_func_caps(hw, func_p); 2528 break; 2529 default: 2530 /* Don't list common capabilities as unknown */ 2531 if (!found) 2532 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2533 i, cap); 2534 break; 2535 } 2536 } 2537 2538 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2539 } 2540 2541 /** 2542 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2543 * @hw: pointer to the HW struct 2544 * @dev_p: pointer to device capabilities structure 2545 * @cap: capability element to parse 2546 * 2547 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2548 */ 2549 static void 2550 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2551 struct ice_aqc_list_caps_elem *cap) 2552 { 2553 u32 number = le32_to_cpu(cap->number); 2554 2555 dev_p->num_funcs = hweight32(number); 2556 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2557 dev_p->num_funcs); 2558 } 2559 2560 /** 2561 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2562 * @hw: pointer to the HW struct 2563 * @dev_p: pointer to device capabilities structure 2564 * @cap: capability element to parse 2565 * 2566 * Parse ICE_AQC_CAPS_VF for device capabilities. 2567 */ 2568 static void 2569 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2570 struct ice_aqc_list_caps_elem *cap) 2571 { 2572 u32 number = le32_to_cpu(cap->number); 2573 2574 dev_p->num_vfs_exposed = number; 2575 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2576 dev_p->num_vfs_exposed); 2577 } 2578 2579 /** 2580 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2581 * @hw: pointer to the HW struct 2582 * @dev_p: pointer to device capabilities structure 2583 * @cap: capability element to parse 2584 * 2585 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2586 */ 2587 static void 2588 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2589 struct ice_aqc_list_caps_elem *cap) 2590 { 2591 u32 number = le32_to_cpu(cap->number); 2592 2593 dev_p->num_vsi_allocd_to_host = number; 2594 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2595 dev_p->num_vsi_allocd_to_host); 2596 } 2597 2598 /** 2599 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2600 * @hw: pointer to the HW struct 2601 * @dev_p: pointer to device capabilities structure 2602 * @cap: capability element to parse 2603 * 2604 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2605 */ 2606 static void 2607 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2608 struct ice_aqc_list_caps_elem *cap) 2609 { 2610 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2611 u32 logical_id = le32_to_cpu(cap->logical_id); 2612 u32 phys_id = le32_to_cpu(cap->phys_id); 2613 u32 number = le32_to_cpu(cap->number); 2614 2615 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2616 dev_p->common_cap.ieee_1588 = info->ena; 2617 2618 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2619 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2620 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2621 2622 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2623 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2624 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2625 2626 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2627 2628 info->ena_ports = logical_id; 2629 info->tmr_own_map = phys_id; 2630 2631 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2632 dev_p->common_cap.ieee_1588); 2633 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2634 info->tmr0_owner); 2635 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2636 info->tmr0_owned); 2637 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2638 info->tmr0_ena); 2639 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2640 info->tmr1_owner); 2641 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2642 info->tmr1_owned); 2643 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2644 info->tmr1_ena); 2645 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2646 info->ts_ll_read); 2647 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2648 info->ena_ports); 2649 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2650 info->tmr_own_map); 2651 } 2652 2653 /** 2654 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2655 * @hw: pointer to the HW struct 2656 * @dev_p: pointer to device capabilities structure 2657 * @cap: capability element to parse 2658 * 2659 * Parse ICE_AQC_CAPS_FD for device capabilities. 2660 */ 2661 static void 2662 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2663 struct ice_aqc_list_caps_elem *cap) 2664 { 2665 u32 number = le32_to_cpu(cap->number); 2666 2667 dev_p->num_flow_director_fltr = number; 2668 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2669 dev_p->num_flow_director_fltr); 2670 } 2671 2672 /** 2673 * ice_parse_dev_caps - Parse device capabilities 2674 * @hw: pointer to the HW struct 2675 * @dev_p: pointer to device capabilities structure 2676 * @buf: buffer containing the device capability records 2677 * @cap_count: the number of capabilities 2678 * 2679 * Helper device to parse device (0x000B) capabilities list. For 2680 * capabilities shared between device and function, this relies on 2681 * ice_parse_common_caps. 2682 * 2683 * Loop through the list of provided capabilities and extract the relevant 2684 * data into the device capabilities structured. 2685 */ 2686 static void 2687 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2688 void *buf, u32 cap_count) 2689 { 2690 struct ice_aqc_list_caps_elem *cap_resp; 2691 u32 i; 2692 2693 cap_resp = buf; 2694 2695 memset(dev_p, 0, sizeof(*dev_p)); 2696 2697 for (i = 0; i < cap_count; i++) { 2698 u16 cap = le16_to_cpu(cap_resp[i].cap); 2699 bool found; 2700 2701 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2702 &cap_resp[i], "dev caps"); 2703 2704 switch (cap) { 2705 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2706 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2707 break; 2708 case ICE_AQC_CAPS_VF: 2709 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2710 break; 2711 case ICE_AQC_CAPS_VSI: 2712 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2713 break; 2714 case ICE_AQC_CAPS_1588: 2715 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2716 break; 2717 case ICE_AQC_CAPS_FD: 2718 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2719 break; 2720 default: 2721 /* Don't list common capabilities as unknown */ 2722 if (!found) 2723 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2724 i, cap); 2725 break; 2726 } 2727 } 2728 2729 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2730 } 2731 2732 /** 2733 * ice_is_pf_c827 - check if pf contains c827 phy 2734 * @hw: pointer to the hw struct 2735 */ 2736 bool ice_is_pf_c827(struct ice_hw *hw) 2737 { 2738 struct ice_aqc_get_link_topo cmd = {}; 2739 u8 node_part_number; 2740 u16 node_handle; 2741 int status; 2742 2743 if (hw->mac_type != ICE_MAC_E810) 2744 return false; 2745 2746 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2747 return true; 2748 2749 cmd.addr.topo_params.node_type_ctx = 2750 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2751 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2752 cmd.addr.topo_params.index = 0; 2753 2754 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2755 &node_handle); 2756 2757 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2758 return false; 2759 2760 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2761 return true; 2762 2763 return false; 2764 } 2765 2766 /** 2767 * ice_is_phy_rclk_in_netlist 2768 * @hw: pointer to the hw struct 2769 * 2770 * Check if the PHY Recovered Clock device is present in the netlist 2771 */ 2772 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2773 { 2774 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2775 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2776 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2777 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2778 return false; 2779 2780 return true; 2781 } 2782 2783 /** 2784 * ice_is_clock_mux_in_netlist 2785 * @hw: pointer to the hw struct 2786 * 2787 * Check if the Clock Multiplexer device is present in the netlist 2788 */ 2789 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2790 { 2791 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2792 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2793 NULL)) 2794 return false; 2795 2796 return true; 2797 } 2798 2799 /** 2800 * ice_is_cgu_in_netlist - check for CGU presence 2801 * @hw: pointer to the hw struct 2802 * 2803 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2804 * Save the CGU part number in the hw structure for later use. 2805 * Return: 2806 * * true - cgu is present 2807 * * false - cgu is not present 2808 */ 2809 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2810 { 2811 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2812 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2813 NULL)) { 2814 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2815 return true; 2816 } else if (!ice_find_netlist_node(hw, 2817 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2818 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2819 NULL)) { 2820 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2821 return true; 2822 } 2823 2824 return false; 2825 } 2826 2827 /** 2828 * ice_is_gps_in_netlist 2829 * @hw: pointer to the hw struct 2830 * 2831 * Check if the GPS generic device is present in the netlist 2832 */ 2833 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2834 { 2835 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2836 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2837 return false; 2838 2839 return true; 2840 } 2841 2842 /** 2843 * ice_aq_list_caps - query function/device capabilities 2844 * @hw: pointer to the HW struct 2845 * @buf: a buffer to hold the capabilities 2846 * @buf_size: size of the buffer 2847 * @cap_count: if not NULL, set to the number of capabilities reported 2848 * @opc: capabilities type to discover, device or function 2849 * @cd: pointer to command details structure or NULL 2850 * 2851 * Get the function (0x000A) or device (0x000B) capabilities description from 2852 * firmware and store it in the buffer. 2853 * 2854 * If the cap_count pointer is not NULL, then it is set to the number of 2855 * capabilities firmware will report. Note that if the buffer size is too 2856 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2857 * cap_count will still be updated in this case. It is recommended that the 2858 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2859 * firmware could return) to avoid this. 2860 */ 2861 int 2862 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2863 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2864 { 2865 struct ice_aqc_list_caps *cmd; 2866 struct ice_aq_desc desc; 2867 int status; 2868 2869 cmd = &desc.params.get_cap; 2870 2871 if (opc != ice_aqc_opc_list_func_caps && 2872 opc != ice_aqc_opc_list_dev_caps) 2873 return -EINVAL; 2874 2875 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2876 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2877 2878 if (cap_count) 2879 *cap_count = le32_to_cpu(cmd->count); 2880 2881 return status; 2882 } 2883 2884 /** 2885 * ice_discover_dev_caps - Read and extract device capabilities 2886 * @hw: pointer to the hardware structure 2887 * @dev_caps: pointer to device capabilities structure 2888 * 2889 * Read the device capabilities and extract them into the dev_caps structure 2890 * for later use. 2891 */ 2892 int 2893 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2894 { 2895 u32 cap_count = 0; 2896 void *cbuf; 2897 int status; 2898 2899 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2900 if (!cbuf) 2901 return -ENOMEM; 2902 2903 /* Although the driver doesn't know the number of capabilities the 2904 * device will return, we can simply send a 4KB buffer, the maximum 2905 * possible size that firmware can return. 2906 */ 2907 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2908 2909 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2910 ice_aqc_opc_list_dev_caps, NULL); 2911 if (!status) 2912 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2913 kfree(cbuf); 2914 2915 return status; 2916 } 2917 2918 /** 2919 * ice_discover_func_caps - Read and extract function capabilities 2920 * @hw: pointer to the hardware structure 2921 * @func_caps: pointer to function capabilities structure 2922 * 2923 * Read the function capabilities and extract them into the func_caps structure 2924 * for later use. 2925 */ 2926 static int 2927 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2928 { 2929 u32 cap_count = 0; 2930 void *cbuf; 2931 int status; 2932 2933 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2934 if (!cbuf) 2935 return -ENOMEM; 2936 2937 /* Although the driver doesn't know the number of capabilities the 2938 * device will return, we can simply send a 4KB buffer, the maximum 2939 * possible size that firmware can return. 2940 */ 2941 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2942 2943 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2944 ice_aqc_opc_list_func_caps, NULL); 2945 if (!status) 2946 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2947 kfree(cbuf); 2948 2949 return status; 2950 } 2951 2952 /** 2953 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2954 * @hw: pointer to the hardware structure 2955 */ 2956 void ice_set_safe_mode_caps(struct ice_hw *hw) 2957 { 2958 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2959 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2960 struct ice_hw_common_caps cached_caps; 2961 u32 num_funcs; 2962 2963 /* cache some func_caps values that should be restored after memset */ 2964 cached_caps = func_caps->common_cap; 2965 2966 /* unset func capabilities */ 2967 memset(func_caps, 0, sizeof(*func_caps)); 2968 2969 #define ICE_RESTORE_FUNC_CAP(name) \ 2970 func_caps->common_cap.name = cached_caps.name 2971 2972 /* restore cached values */ 2973 ICE_RESTORE_FUNC_CAP(valid_functions); 2974 ICE_RESTORE_FUNC_CAP(txq_first_id); 2975 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2976 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2977 ICE_RESTORE_FUNC_CAP(max_mtu); 2978 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2979 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2980 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2981 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2982 2983 /* one Tx and one Rx queue in safe mode */ 2984 func_caps->common_cap.num_rxq = 1; 2985 func_caps->common_cap.num_txq = 1; 2986 2987 /* two MSIX vectors, one for traffic and one for misc causes */ 2988 func_caps->common_cap.num_msix_vectors = 2; 2989 func_caps->guar_num_vsi = 1; 2990 2991 /* cache some dev_caps values that should be restored after memset */ 2992 cached_caps = dev_caps->common_cap; 2993 num_funcs = dev_caps->num_funcs; 2994 2995 /* unset dev capabilities */ 2996 memset(dev_caps, 0, sizeof(*dev_caps)); 2997 2998 #define ICE_RESTORE_DEV_CAP(name) \ 2999 dev_caps->common_cap.name = cached_caps.name 3000 3001 /* restore cached values */ 3002 ICE_RESTORE_DEV_CAP(valid_functions); 3003 ICE_RESTORE_DEV_CAP(txq_first_id); 3004 ICE_RESTORE_DEV_CAP(rxq_first_id); 3005 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3006 ICE_RESTORE_DEV_CAP(max_mtu); 3007 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3008 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 3009 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 3010 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 3011 dev_caps->num_funcs = num_funcs; 3012 3013 /* one Tx and one Rx queue per function in safe mode */ 3014 dev_caps->common_cap.num_rxq = num_funcs; 3015 dev_caps->common_cap.num_txq = num_funcs; 3016 3017 /* two MSIX vectors per function */ 3018 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3019 } 3020 3021 /** 3022 * ice_get_caps - get info about the HW 3023 * @hw: pointer to the hardware structure 3024 */ 3025 int ice_get_caps(struct ice_hw *hw) 3026 { 3027 int status; 3028 3029 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3030 if (status) 3031 return status; 3032 3033 return ice_discover_func_caps(hw, &hw->func_caps); 3034 } 3035 3036 /** 3037 * ice_aq_manage_mac_write - manage MAC address write command 3038 * @hw: pointer to the HW struct 3039 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3040 * @flags: flags to control write behavior 3041 * @cd: pointer to command details structure or NULL 3042 * 3043 * This function is used to write MAC address to the NVM (0x0108). 3044 */ 3045 int 3046 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3047 struct ice_sq_cd *cd) 3048 { 3049 struct ice_aqc_manage_mac_write *cmd; 3050 struct ice_aq_desc desc; 3051 3052 cmd = &desc.params.mac_write; 3053 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3054 3055 cmd->flags = flags; 3056 ether_addr_copy(cmd->mac_addr, mac_addr); 3057 3058 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3059 } 3060 3061 /** 3062 * ice_aq_clear_pxe_mode 3063 * @hw: pointer to the HW struct 3064 * 3065 * Tell the firmware that the driver is taking over from PXE (0x0110). 3066 */ 3067 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3068 { 3069 struct ice_aq_desc desc; 3070 3071 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3072 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3073 3074 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3075 } 3076 3077 /** 3078 * ice_clear_pxe_mode - clear pxe operations mode 3079 * @hw: pointer to the HW struct 3080 * 3081 * Make sure all PXE mode settings are cleared, including things 3082 * like descriptor fetch/write-back mode. 3083 */ 3084 void ice_clear_pxe_mode(struct ice_hw *hw) 3085 { 3086 if (ice_check_sq_alive(hw, &hw->adminq)) 3087 ice_aq_clear_pxe_mode(hw); 3088 } 3089 3090 /** 3091 * ice_aq_set_port_params - set physical port parameters. 3092 * @pi: pointer to the port info struct 3093 * @double_vlan: if set double VLAN is enabled 3094 * @cd: pointer to command details structure or NULL 3095 * 3096 * Set Physical port parameters (0x0203) 3097 */ 3098 int 3099 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3100 struct ice_sq_cd *cd) 3101 3102 { 3103 struct ice_aqc_set_port_params *cmd; 3104 struct ice_hw *hw = pi->hw; 3105 struct ice_aq_desc desc; 3106 u16 cmd_flags = 0; 3107 3108 cmd = &desc.params.set_port_params; 3109 3110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3111 if (double_vlan) 3112 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3113 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3114 3115 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3116 } 3117 3118 /** 3119 * ice_is_100m_speed_supported 3120 * @hw: pointer to the HW struct 3121 * 3122 * returns true if 100M speeds are supported by the device, 3123 * false otherwise. 3124 */ 3125 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3126 { 3127 switch (hw->device_id) { 3128 case ICE_DEV_ID_E822C_SGMII: 3129 case ICE_DEV_ID_E822L_SGMII: 3130 case ICE_DEV_ID_E823L_1GBE: 3131 case ICE_DEV_ID_E823C_SGMII: 3132 return true; 3133 default: 3134 return false; 3135 } 3136 } 3137 3138 /** 3139 * ice_get_link_speed_based_on_phy_type - returns link speed 3140 * @phy_type_low: lower part of phy_type 3141 * @phy_type_high: higher part of phy_type 3142 * 3143 * This helper function will convert an entry in PHY type structure 3144 * [phy_type_low, phy_type_high] to its corresponding link speed. 3145 * Note: In the structure of [phy_type_low, phy_type_high], there should 3146 * be one bit set, as this function will convert one PHY type to its 3147 * speed. 3148 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3149 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3150 */ 3151 static u16 3152 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3153 { 3154 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3155 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3156 3157 switch (phy_type_low) { 3158 case ICE_PHY_TYPE_LOW_100BASE_TX: 3159 case ICE_PHY_TYPE_LOW_100M_SGMII: 3160 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3161 break; 3162 case ICE_PHY_TYPE_LOW_1000BASE_T: 3163 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3164 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3165 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3166 case ICE_PHY_TYPE_LOW_1G_SGMII: 3167 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3168 break; 3169 case ICE_PHY_TYPE_LOW_2500BASE_T: 3170 case ICE_PHY_TYPE_LOW_2500BASE_X: 3171 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3172 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3173 break; 3174 case ICE_PHY_TYPE_LOW_5GBASE_T: 3175 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3176 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3177 break; 3178 case ICE_PHY_TYPE_LOW_10GBASE_T: 3179 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3180 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3181 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3182 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3183 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3184 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3185 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3186 break; 3187 case ICE_PHY_TYPE_LOW_25GBASE_T: 3188 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3189 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3190 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3191 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3192 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3193 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3194 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3195 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3196 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3197 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3198 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3199 break; 3200 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3201 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3202 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3203 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3204 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3205 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3206 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3207 break; 3208 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3209 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3210 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3211 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3212 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3213 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3214 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3215 case ICE_PHY_TYPE_LOW_50G_AUI2: 3216 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3217 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3218 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3219 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3220 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3221 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3222 case ICE_PHY_TYPE_LOW_50G_AUI1: 3223 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3224 break; 3225 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3226 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3227 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3228 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3229 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3230 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3231 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3232 case ICE_PHY_TYPE_LOW_100G_AUI4: 3233 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3234 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3235 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3236 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3237 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3238 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3239 break; 3240 default: 3241 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3242 break; 3243 } 3244 3245 switch (phy_type_high) { 3246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3247 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3248 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3249 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3250 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3251 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3252 break; 3253 default: 3254 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3255 break; 3256 } 3257 3258 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3259 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3260 return ICE_AQ_LINK_SPEED_UNKNOWN; 3261 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3262 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3263 return ICE_AQ_LINK_SPEED_UNKNOWN; 3264 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3265 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3266 return speed_phy_type_low; 3267 else 3268 return speed_phy_type_high; 3269 } 3270 3271 /** 3272 * ice_update_phy_type 3273 * @phy_type_low: pointer to the lower part of phy_type 3274 * @phy_type_high: pointer to the higher part of phy_type 3275 * @link_speeds_bitmap: targeted link speeds bitmap 3276 * 3277 * Note: For the link_speeds_bitmap structure, you can check it at 3278 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3279 * link_speeds_bitmap include multiple speeds. 3280 * 3281 * Each entry in this [phy_type_low, phy_type_high] structure will 3282 * present a certain link speed. This helper function will turn on bits 3283 * in [phy_type_low, phy_type_high] structure based on the value of 3284 * link_speeds_bitmap input parameter. 3285 */ 3286 void 3287 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3288 u16 link_speeds_bitmap) 3289 { 3290 u64 pt_high; 3291 u64 pt_low; 3292 int index; 3293 u16 speed; 3294 3295 /* We first check with low part of phy_type */ 3296 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3297 pt_low = BIT_ULL(index); 3298 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3299 3300 if (link_speeds_bitmap & speed) 3301 *phy_type_low |= BIT_ULL(index); 3302 } 3303 3304 /* We then check with high part of phy_type */ 3305 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3306 pt_high = BIT_ULL(index); 3307 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3308 3309 if (link_speeds_bitmap & speed) 3310 *phy_type_high |= BIT_ULL(index); 3311 } 3312 } 3313 3314 /** 3315 * ice_aq_set_phy_cfg 3316 * @hw: pointer to the HW struct 3317 * @pi: port info structure of the interested logical port 3318 * @cfg: structure with PHY configuration data to be set 3319 * @cd: pointer to command details structure or NULL 3320 * 3321 * Set the various PHY configuration parameters supported on the Port. 3322 * One or more of the Set PHY config parameters may be ignored in an MFP 3323 * mode as the PF may not have the privilege to set some of the PHY Config 3324 * parameters. This status will be indicated by the command response (0x0601). 3325 */ 3326 int 3327 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3328 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3329 { 3330 struct ice_aq_desc desc; 3331 int status; 3332 3333 if (!cfg) 3334 return -EINVAL; 3335 3336 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3337 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3338 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3339 cfg->caps); 3340 3341 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3342 } 3343 3344 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3345 desc.params.set_phy.lport_num = pi->lport; 3346 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3347 3348 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3349 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3350 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3351 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3352 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3353 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3354 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3355 cfg->low_power_ctrl_an); 3356 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3357 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3358 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3359 cfg->link_fec_opt); 3360 3361 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3362 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3363 status = 0; 3364 3365 if (!status) 3366 pi->phy.curr_user_phy_cfg = *cfg; 3367 3368 return status; 3369 } 3370 3371 /** 3372 * ice_update_link_info - update status of the HW network link 3373 * @pi: port info structure of the interested logical port 3374 */ 3375 int ice_update_link_info(struct ice_port_info *pi) 3376 { 3377 struct ice_link_status *li; 3378 int status; 3379 3380 if (!pi) 3381 return -EINVAL; 3382 3383 li = &pi->phy.link_info; 3384 3385 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3386 if (status) 3387 return status; 3388 3389 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3390 struct ice_aqc_get_phy_caps_data *pcaps; 3391 struct ice_hw *hw; 3392 3393 hw = pi->hw; 3394 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3395 GFP_KERNEL); 3396 if (!pcaps) 3397 return -ENOMEM; 3398 3399 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3400 pcaps, NULL); 3401 3402 devm_kfree(ice_hw_to_dev(hw), pcaps); 3403 } 3404 3405 return status; 3406 } 3407 3408 /** 3409 * ice_cache_phy_user_req 3410 * @pi: port information structure 3411 * @cache_data: PHY logging data 3412 * @cache_mode: PHY logging mode 3413 * 3414 * Log the user request on (FC, FEC, SPEED) for later use. 3415 */ 3416 static void 3417 ice_cache_phy_user_req(struct ice_port_info *pi, 3418 struct ice_phy_cache_mode_data cache_data, 3419 enum ice_phy_cache_mode cache_mode) 3420 { 3421 if (!pi) 3422 return; 3423 3424 switch (cache_mode) { 3425 case ICE_FC_MODE: 3426 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3427 break; 3428 case ICE_SPEED_MODE: 3429 pi->phy.curr_user_speed_req = 3430 cache_data.data.curr_user_speed_req; 3431 break; 3432 case ICE_FEC_MODE: 3433 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3434 break; 3435 default: 3436 break; 3437 } 3438 } 3439 3440 /** 3441 * ice_caps_to_fc_mode 3442 * @caps: PHY capabilities 3443 * 3444 * Convert PHY FC capabilities to ice FC mode 3445 */ 3446 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3447 { 3448 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3449 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3450 return ICE_FC_FULL; 3451 3452 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3453 return ICE_FC_TX_PAUSE; 3454 3455 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3456 return ICE_FC_RX_PAUSE; 3457 3458 return ICE_FC_NONE; 3459 } 3460 3461 /** 3462 * ice_caps_to_fec_mode 3463 * @caps: PHY capabilities 3464 * @fec_options: Link FEC options 3465 * 3466 * Convert PHY FEC capabilities to ice FEC mode 3467 */ 3468 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3469 { 3470 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3471 return ICE_FEC_AUTO; 3472 3473 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3474 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3475 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3476 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3477 return ICE_FEC_BASER; 3478 3479 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3480 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3481 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3482 return ICE_FEC_RS; 3483 3484 return ICE_FEC_NONE; 3485 } 3486 3487 /** 3488 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3489 * @pi: port information structure 3490 * @cfg: PHY configuration data to set FC mode 3491 * @req_mode: FC mode to configure 3492 */ 3493 int 3494 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3495 enum ice_fc_mode req_mode) 3496 { 3497 struct ice_phy_cache_mode_data cache_data; 3498 u8 pause_mask = 0x0; 3499 3500 if (!pi || !cfg) 3501 return -EINVAL; 3502 3503 switch (req_mode) { 3504 case ICE_FC_FULL: 3505 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3506 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3507 break; 3508 case ICE_FC_RX_PAUSE: 3509 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3510 break; 3511 case ICE_FC_TX_PAUSE: 3512 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3513 break; 3514 default: 3515 break; 3516 } 3517 3518 /* clear the old pause settings */ 3519 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3520 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3521 3522 /* set the new capabilities */ 3523 cfg->caps |= pause_mask; 3524 3525 /* Cache user FC request */ 3526 cache_data.data.curr_user_fc_req = req_mode; 3527 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3528 3529 return 0; 3530 } 3531 3532 /** 3533 * ice_set_fc 3534 * @pi: port information structure 3535 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3536 * @ena_auto_link_update: enable automatic link update 3537 * 3538 * Set the requested flow control mode. 3539 */ 3540 int 3541 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3542 { 3543 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3544 struct ice_aqc_get_phy_caps_data *pcaps; 3545 struct ice_hw *hw; 3546 int status; 3547 3548 if (!pi || !aq_failures) 3549 return -EINVAL; 3550 3551 *aq_failures = 0; 3552 hw = pi->hw; 3553 3554 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3555 if (!pcaps) 3556 return -ENOMEM; 3557 3558 /* Get the current PHY config */ 3559 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3560 pcaps, NULL); 3561 if (status) { 3562 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3563 goto out; 3564 } 3565 3566 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3567 3568 /* Configure the set PHY data */ 3569 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3570 if (status) 3571 goto out; 3572 3573 /* If the capabilities have changed, then set the new config */ 3574 if (cfg.caps != pcaps->caps) { 3575 int retry_count, retry_max = 10; 3576 3577 /* Auto restart link so settings take effect */ 3578 if (ena_auto_link_update) 3579 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3580 3581 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3582 if (status) { 3583 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3584 goto out; 3585 } 3586 3587 /* Update the link info 3588 * It sometimes takes a really long time for link to 3589 * come back from the atomic reset. Thus, we wait a 3590 * little bit. 3591 */ 3592 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3593 status = ice_update_link_info(pi); 3594 3595 if (!status) 3596 break; 3597 3598 mdelay(100); 3599 } 3600 3601 if (status) 3602 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3603 } 3604 3605 out: 3606 devm_kfree(ice_hw_to_dev(hw), pcaps); 3607 return status; 3608 } 3609 3610 /** 3611 * ice_phy_caps_equals_cfg 3612 * @phy_caps: PHY capabilities 3613 * @phy_cfg: PHY configuration 3614 * 3615 * Helper function to determine if PHY capabilities matches PHY 3616 * configuration 3617 */ 3618 bool 3619 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3620 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3621 { 3622 u8 caps_mask, cfg_mask; 3623 3624 if (!phy_caps || !phy_cfg) 3625 return false; 3626 3627 /* These bits are not common between capabilities and configuration. 3628 * Do not use them to determine equality. 3629 */ 3630 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3631 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3632 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3633 3634 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3635 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3636 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3637 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3638 phy_caps->eee_cap != phy_cfg->eee_cap || 3639 phy_caps->eeer_value != phy_cfg->eeer_value || 3640 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3641 return false; 3642 3643 return true; 3644 } 3645 3646 /** 3647 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3648 * @pi: port information structure 3649 * @caps: PHY ability structure to copy date from 3650 * @cfg: PHY configuration structure to copy data to 3651 * 3652 * Helper function to copy AQC PHY get ability data to PHY set configuration 3653 * data structure 3654 */ 3655 void 3656 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3657 struct ice_aqc_get_phy_caps_data *caps, 3658 struct ice_aqc_set_phy_cfg_data *cfg) 3659 { 3660 if (!pi || !caps || !cfg) 3661 return; 3662 3663 memset(cfg, 0, sizeof(*cfg)); 3664 cfg->phy_type_low = caps->phy_type_low; 3665 cfg->phy_type_high = caps->phy_type_high; 3666 cfg->caps = caps->caps; 3667 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3668 cfg->eee_cap = caps->eee_cap; 3669 cfg->eeer_value = caps->eeer_value; 3670 cfg->link_fec_opt = caps->link_fec_options; 3671 cfg->module_compliance_enforcement = 3672 caps->module_compliance_enforcement; 3673 } 3674 3675 /** 3676 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3677 * @pi: port information structure 3678 * @cfg: PHY configuration data to set FEC mode 3679 * @fec: FEC mode to configure 3680 */ 3681 int 3682 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3683 enum ice_fec_mode fec) 3684 { 3685 struct ice_aqc_get_phy_caps_data *pcaps; 3686 struct ice_hw *hw; 3687 int status; 3688 3689 if (!pi || !cfg) 3690 return -EINVAL; 3691 3692 hw = pi->hw; 3693 3694 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3695 if (!pcaps) 3696 return -ENOMEM; 3697 3698 status = ice_aq_get_phy_caps(pi, false, 3699 (ice_fw_supports_report_dflt_cfg(hw) ? 3700 ICE_AQC_REPORT_DFLT_CFG : 3701 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3702 if (status) 3703 goto out; 3704 3705 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3706 cfg->link_fec_opt = pcaps->link_fec_options; 3707 3708 switch (fec) { 3709 case ICE_FEC_BASER: 3710 /* Clear RS bits, and AND BASE-R ability 3711 * bits and OR request bits. 3712 */ 3713 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3714 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3715 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3716 ICE_AQC_PHY_FEC_25G_KR_REQ; 3717 break; 3718 case ICE_FEC_RS: 3719 /* Clear BASE-R bits, and AND RS ability 3720 * bits and OR request bits. 3721 */ 3722 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3723 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3724 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3725 break; 3726 case ICE_FEC_NONE: 3727 /* Clear all FEC option bits. */ 3728 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3729 break; 3730 case ICE_FEC_AUTO: 3731 /* AND auto FEC bit, and all caps bits. */ 3732 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3733 cfg->link_fec_opt |= pcaps->link_fec_options; 3734 break; 3735 default: 3736 status = -EINVAL; 3737 break; 3738 } 3739 3740 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3741 !ice_fw_supports_report_dflt_cfg(hw)) { 3742 struct ice_link_default_override_tlv tlv = { 0 }; 3743 3744 status = ice_get_link_default_override(&tlv, pi); 3745 if (status) 3746 goto out; 3747 3748 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3749 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3750 cfg->link_fec_opt = tlv.fec_options; 3751 } 3752 3753 out: 3754 kfree(pcaps); 3755 3756 return status; 3757 } 3758 3759 /** 3760 * ice_get_link_status - get status of the HW network link 3761 * @pi: port information structure 3762 * @link_up: pointer to bool (true/false = linkup/linkdown) 3763 * 3764 * Variable link_up is true if link is up, false if link is down. 3765 * The variable link_up is invalid if status is non zero. As a 3766 * result of this call, link status reporting becomes enabled 3767 */ 3768 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3769 { 3770 struct ice_phy_info *phy_info; 3771 int status = 0; 3772 3773 if (!pi || !link_up) 3774 return -EINVAL; 3775 3776 phy_info = &pi->phy; 3777 3778 if (phy_info->get_link_info) { 3779 status = ice_update_link_info(pi); 3780 3781 if (status) 3782 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3783 status); 3784 } 3785 3786 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3787 3788 return status; 3789 } 3790 3791 /** 3792 * ice_aq_set_link_restart_an 3793 * @pi: pointer to the port information structure 3794 * @ena_link: if true: enable link, if false: disable link 3795 * @cd: pointer to command details structure or NULL 3796 * 3797 * Sets up the link and restarts the Auto-Negotiation over the link. 3798 */ 3799 int 3800 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3801 struct ice_sq_cd *cd) 3802 { 3803 struct ice_aqc_restart_an *cmd; 3804 struct ice_aq_desc desc; 3805 3806 cmd = &desc.params.restart_an; 3807 3808 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3809 3810 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3811 cmd->lport_num = pi->lport; 3812 if (ena_link) 3813 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3814 else 3815 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3816 3817 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3818 } 3819 3820 /** 3821 * ice_aq_set_event_mask 3822 * @hw: pointer to the HW struct 3823 * @port_num: port number of the physical function 3824 * @mask: event mask to be set 3825 * @cd: pointer to command details structure or NULL 3826 * 3827 * Set event mask (0x0613) 3828 */ 3829 int 3830 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3831 struct ice_sq_cd *cd) 3832 { 3833 struct ice_aqc_set_event_mask *cmd; 3834 struct ice_aq_desc desc; 3835 3836 cmd = &desc.params.set_event_mask; 3837 3838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3839 3840 cmd->lport_num = port_num; 3841 3842 cmd->event_mask = cpu_to_le16(mask); 3843 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3844 } 3845 3846 /** 3847 * ice_aq_set_mac_loopback 3848 * @hw: pointer to the HW struct 3849 * @ena_lpbk: Enable or Disable loopback 3850 * @cd: pointer to command details structure or NULL 3851 * 3852 * Enable/disable loopback on a given port 3853 */ 3854 int 3855 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3856 { 3857 struct ice_aqc_set_mac_lb *cmd; 3858 struct ice_aq_desc desc; 3859 3860 cmd = &desc.params.set_mac_lb; 3861 3862 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3863 if (ena_lpbk) 3864 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3865 3866 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3867 } 3868 3869 /** 3870 * ice_aq_set_port_id_led 3871 * @pi: pointer to the port information 3872 * @is_orig_mode: is this LED set to original mode (by the net-list) 3873 * @cd: pointer to command details structure or NULL 3874 * 3875 * Set LED value for the given port (0x06e9) 3876 */ 3877 int 3878 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3879 struct ice_sq_cd *cd) 3880 { 3881 struct ice_aqc_set_port_id_led *cmd; 3882 struct ice_hw *hw = pi->hw; 3883 struct ice_aq_desc desc; 3884 3885 cmd = &desc.params.set_port_id_led; 3886 3887 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3888 3889 if (is_orig_mode) 3890 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3891 else 3892 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3893 3894 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3895 } 3896 3897 /** 3898 * ice_aq_get_port_options 3899 * @hw: pointer to the HW struct 3900 * @options: buffer for the resultant port options 3901 * @option_count: input - size of the buffer in port options structures, 3902 * output - number of returned port options 3903 * @lport: logical port to call the command with (optional) 3904 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3905 * when PF owns more than 1 port it must be true 3906 * @active_option_idx: index of active port option in returned buffer 3907 * @active_option_valid: active option in returned buffer is valid 3908 * @pending_option_idx: index of pending port option in returned buffer 3909 * @pending_option_valid: pending option in returned buffer is valid 3910 * 3911 * Calls Get Port Options AQC (0x06ea) and verifies result. 3912 */ 3913 int 3914 ice_aq_get_port_options(struct ice_hw *hw, 3915 struct ice_aqc_get_port_options_elem *options, 3916 u8 *option_count, u8 lport, bool lport_valid, 3917 u8 *active_option_idx, bool *active_option_valid, 3918 u8 *pending_option_idx, bool *pending_option_valid) 3919 { 3920 struct ice_aqc_get_port_options *cmd; 3921 struct ice_aq_desc desc; 3922 int status; 3923 u8 i; 3924 3925 /* options buffer shall be able to hold max returned options */ 3926 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3927 return -EINVAL; 3928 3929 cmd = &desc.params.get_port_options; 3930 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3931 3932 if (lport_valid) 3933 cmd->lport_num = lport; 3934 cmd->lport_num_valid = lport_valid; 3935 3936 status = ice_aq_send_cmd(hw, &desc, options, 3937 *option_count * sizeof(*options), NULL); 3938 if (status) 3939 return status; 3940 3941 /* verify direct FW response & set output parameters */ 3942 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3943 cmd->port_options_count); 3944 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3945 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3946 cmd->port_options); 3947 if (*active_option_valid) { 3948 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3949 cmd->port_options); 3950 if (*active_option_idx > (*option_count - 1)) 3951 return -EIO; 3952 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3953 *active_option_idx); 3954 } 3955 3956 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3957 cmd->pending_port_option_status); 3958 if (*pending_option_valid) { 3959 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3960 cmd->pending_port_option_status); 3961 if (*pending_option_idx > (*option_count - 1)) 3962 return -EIO; 3963 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3964 *pending_option_idx); 3965 } 3966 3967 /* mask output options fields */ 3968 for (i = 0; i < *option_count; i++) { 3969 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3970 options[i].pmd); 3971 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3972 options[i].max_lane_speed); 3973 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3974 options[i].pmd, options[i].max_lane_speed); 3975 } 3976 3977 return 0; 3978 } 3979 3980 /** 3981 * ice_aq_set_port_option 3982 * @hw: pointer to the HW struct 3983 * @lport: logical port to call the command with 3984 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3985 * when PF owns more than 1 port it must be true 3986 * @new_option: new port option to be written 3987 * 3988 * Calls Set Port Options AQC (0x06eb). 3989 */ 3990 int 3991 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3992 u8 new_option) 3993 { 3994 struct ice_aqc_set_port_option *cmd; 3995 struct ice_aq_desc desc; 3996 3997 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3998 return -EINVAL; 3999 4000 cmd = &desc.params.set_port_option; 4001 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4002 4003 if (lport_valid) 4004 cmd->lport_num = lport; 4005 4006 cmd->lport_num_valid = lport_valid; 4007 cmd->selected_port_option = new_option; 4008 4009 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4010 } 4011 4012 /** 4013 * ice_aq_sff_eeprom 4014 * @hw: pointer to the HW struct 4015 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4016 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4017 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4018 * @page: QSFP page 4019 * @set_page: set or ignore the page 4020 * @data: pointer to data buffer to be read/written to the I2C device. 4021 * @length: 1-16 for read, 1 for write. 4022 * @write: 0 read, 1 for write. 4023 * @cd: pointer to command details structure or NULL 4024 * 4025 * Read/Write SFF EEPROM (0x06EE) 4026 */ 4027 int 4028 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4029 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4030 bool write, struct ice_sq_cd *cd) 4031 { 4032 struct ice_aqc_sff_eeprom *cmd; 4033 struct ice_aq_desc desc; 4034 int status; 4035 4036 if (!data || (mem_addr & 0xff00)) 4037 return -EINVAL; 4038 4039 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4040 cmd = &desc.params.read_write_sff_param; 4041 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4042 cmd->lport_num = (u8)(lport & 0xff); 4043 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4044 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 4045 ICE_AQC_SFF_I2CBUS_7BIT_M) | 4046 ((set_page << 4047 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 4048 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 4049 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4050 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 4051 if (write) 4052 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 4053 4054 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4055 return status; 4056 } 4057 4058 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4059 { 4060 switch (type) { 4061 case ICE_LUT_VSI: 4062 return ICE_LUT_VSI_SIZE; 4063 case ICE_LUT_GLOBAL: 4064 return ICE_LUT_GLOBAL_SIZE; 4065 case ICE_LUT_PF: 4066 return ICE_LUT_PF_SIZE; 4067 } 4068 WARN_ONCE(1, "incorrect type passed"); 4069 return ICE_LUT_VSI_SIZE; 4070 } 4071 4072 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4073 { 4074 switch (size) { 4075 case ICE_LUT_VSI_SIZE: 4076 return ICE_AQC_LUT_SIZE_SMALL; 4077 case ICE_LUT_GLOBAL_SIZE: 4078 return ICE_AQC_LUT_SIZE_512; 4079 case ICE_LUT_PF_SIZE: 4080 return ICE_AQC_LUT_SIZE_2K; 4081 } 4082 WARN_ONCE(1, "incorrect size passed"); 4083 return 0; 4084 } 4085 4086 /** 4087 * __ice_aq_get_set_rss_lut 4088 * @hw: pointer to the hardware structure 4089 * @params: RSS LUT parameters 4090 * @set: set true to set the table, false to get the table 4091 * 4092 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4093 */ 4094 static int 4095 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4096 struct ice_aq_get_set_rss_lut_params *params, bool set) 4097 { 4098 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4099 enum ice_lut_type lut_type = params->lut_type; 4100 struct ice_aqc_get_set_rss_lut *desc_params; 4101 enum ice_aqc_lut_flags flags; 4102 enum ice_lut_size lut_size; 4103 struct ice_aq_desc desc; 4104 u8 *lut = params->lut; 4105 4106 4107 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4108 return -EINVAL; 4109 4110 lut_size = ice_lut_type_to_size(lut_type); 4111 if (lut_size > params->lut_size) 4112 return -EINVAL; 4113 else if (set && lut_size != params->lut_size) 4114 return -EINVAL; 4115 4116 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4117 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4118 if (set) 4119 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4120 4121 desc_params = &desc.params.get_set_rss_lut; 4122 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4123 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4124 4125 if (lut_type == ICE_LUT_GLOBAL) 4126 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4127 params->global_lut_id); 4128 4129 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4130 desc_params->flags = cpu_to_le16(flags); 4131 4132 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4133 } 4134 4135 /** 4136 * ice_aq_get_rss_lut 4137 * @hw: pointer to the hardware structure 4138 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4139 * 4140 * get the RSS lookup table, PF or VSI type 4141 */ 4142 int 4143 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4144 { 4145 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4146 } 4147 4148 /** 4149 * ice_aq_set_rss_lut 4150 * @hw: pointer to the hardware structure 4151 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4152 * 4153 * set the RSS lookup table, PF or VSI type 4154 */ 4155 int 4156 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4157 { 4158 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4159 } 4160 4161 /** 4162 * __ice_aq_get_set_rss_key 4163 * @hw: pointer to the HW struct 4164 * @vsi_id: VSI FW index 4165 * @key: pointer to key info struct 4166 * @set: set true to set the key, false to get the key 4167 * 4168 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4169 */ 4170 static int 4171 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4172 struct ice_aqc_get_set_rss_keys *key, bool set) 4173 { 4174 struct ice_aqc_get_set_rss_key *desc_params; 4175 u16 key_size = sizeof(*key); 4176 struct ice_aq_desc desc; 4177 4178 if (set) { 4179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4180 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4181 } else { 4182 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4183 } 4184 4185 desc_params = &desc.params.get_set_rss_key; 4186 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4187 4188 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4189 } 4190 4191 /** 4192 * ice_aq_get_rss_key 4193 * @hw: pointer to the HW struct 4194 * @vsi_handle: software VSI handle 4195 * @key: pointer to key info struct 4196 * 4197 * get the RSS key per VSI 4198 */ 4199 int 4200 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4201 struct ice_aqc_get_set_rss_keys *key) 4202 { 4203 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4204 return -EINVAL; 4205 4206 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4207 key, false); 4208 } 4209 4210 /** 4211 * ice_aq_set_rss_key 4212 * @hw: pointer to the HW struct 4213 * @vsi_handle: software VSI handle 4214 * @keys: pointer to key info struct 4215 * 4216 * set the RSS key per VSI 4217 */ 4218 int 4219 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4220 struct ice_aqc_get_set_rss_keys *keys) 4221 { 4222 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4223 return -EINVAL; 4224 4225 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4226 keys, true); 4227 } 4228 4229 /** 4230 * ice_aq_add_lan_txq 4231 * @hw: pointer to the hardware structure 4232 * @num_qgrps: Number of added queue groups 4233 * @qg_list: list of queue groups to be added 4234 * @buf_size: size of buffer for indirect command 4235 * @cd: pointer to command details structure or NULL 4236 * 4237 * Add Tx LAN queue (0x0C30) 4238 * 4239 * NOTE: 4240 * Prior to calling add Tx LAN queue: 4241 * Initialize the following as part of the Tx queue context: 4242 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4243 * Cache profile and Packet shaper profile. 4244 * 4245 * After add Tx LAN queue AQ command is completed: 4246 * Interrupts should be associated with specific queues, 4247 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4248 * flow. 4249 */ 4250 static int 4251 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4252 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4253 struct ice_sq_cd *cd) 4254 { 4255 struct ice_aqc_add_tx_qgrp *list; 4256 struct ice_aqc_add_txqs *cmd; 4257 struct ice_aq_desc desc; 4258 u16 i, sum_size = 0; 4259 4260 cmd = &desc.params.add_txqs; 4261 4262 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4263 4264 if (!qg_list) 4265 return -EINVAL; 4266 4267 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4268 return -EINVAL; 4269 4270 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4271 sum_size += struct_size(list, txqs, list->num_txqs); 4272 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4273 list->num_txqs); 4274 } 4275 4276 if (buf_size != sum_size) 4277 return -EINVAL; 4278 4279 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4280 4281 cmd->num_qgrps = num_qgrps; 4282 4283 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4284 } 4285 4286 /** 4287 * ice_aq_dis_lan_txq 4288 * @hw: pointer to the hardware structure 4289 * @num_qgrps: number of groups in the list 4290 * @qg_list: the list of groups to disable 4291 * @buf_size: the total size of the qg_list buffer in bytes 4292 * @rst_src: if called due to reset, specifies the reset source 4293 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4294 * @cd: pointer to command details structure or NULL 4295 * 4296 * Disable LAN Tx queue (0x0C31) 4297 */ 4298 static int 4299 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4300 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4301 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4302 struct ice_sq_cd *cd) 4303 { 4304 struct ice_aqc_dis_txq_item *item; 4305 struct ice_aqc_dis_txqs *cmd; 4306 struct ice_aq_desc desc; 4307 u16 i, sz = 0; 4308 int status; 4309 4310 cmd = &desc.params.dis_txqs; 4311 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4312 4313 /* qg_list can be NULL only in VM/VF reset flow */ 4314 if (!qg_list && !rst_src) 4315 return -EINVAL; 4316 4317 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4318 return -EINVAL; 4319 4320 cmd->num_entries = num_qgrps; 4321 4322 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4323 ICE_AQC_Q_DIS_TIMEOUT_M); 4324 4325 switch (rst_src) { 4326 case ICE_VM_RESET: 4327 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4328 cmd->vmvf_and_timeout |= 4329 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4330 break; 4331 case ICE_VF_RESET: 4332 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4333 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4334 cmd->vmvf_and_timeout |= 4335 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4336 ICE_AQC_Q_DIS_VMVF_NUM_M); 4337 break; 4338 case ICE_NO_RESET: 4339 default: 4340 break; 4341 } 4342 4343 /* flush pipe on time out */ 4344 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4345 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4346 if (!qg_list) 4347 goto do_aq; 4348 4349 /* set RD bit to indicate that command buffer is provided by the driver 4350 * and it needs to be read by the firmware 4351 */ 4352 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4353 4354 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4355 u16 item_size = struct_size(item, q_id, item->num_qs); 4356 4357 /* If the num of queues is even, add 2 bytes of padding */ 4358 if ((item->num_qs % 2) == 0) 4359 item_size += 2; 4360 4361 sz += item_size; 4362 4363 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4364 } 4365 4366 if (buf_size != sz) 4367 return -EINVAL; 4368 4369 do_aq: 4370 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4371 if (status) { 4372 if (!qg_list) 4373 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4374 vmvf_num, hw->adminq.sq_last_status); 4375 else 4376 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4377 le16_to_cpu(qg_list[0].q_id[0]), 4378 hw->adminq.sq_last_status); 4379 } 4380 return status; 4381 } 4382 4383 /** 4384 * ice_aq_cfg_lan_txq 4385 * @hw: pointer to the hardware structure 4386 * @buf: buffer for command 4387 * @buf_size: size of buffer in bytes 4388 * @num_qs: number of queues being configured 4389 * @oldport: origination lport 4390 * @newport: destination lport 4391 * @cd: pointer to command details structure or NULL 4392 * 4393 * Move/Configure LAN Tx queue (0x0C32) 4394 * 4395 * There is a better AQ command to use for moving nodes, so only coding 4396 * this one for configuring the node. 4397 */ 4398 int 4399 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4400 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4401 struct ice_sq_cd *cd) 4402 { 4403 struct ice_aqc_cfg_txqs *cmd; 4404 struct ice_aq_desc desc; 4405 int status; 4406 4407 cmd = &desc.params.cfg_txqs; 4408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4409 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4410 4411 if (!buf) 4412 return -EINVAL; 4413 4414 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4415 cmd->num_qs = num_qs; 4416 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4417 cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & 4418 ICE_AQC_Q_CFG_DST_PRT_M; 4419 cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & 4420 ICE_AQC_Q_CFG_TIMEOUT_M; 4421 cmd->blocked_cgds = 0; 4422 4423 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4424 if (status) 4425 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4426 hw->adminq.sq_last_status); 4427 return status; 4428 } 4429 4430 /** 4431 * ice_aq_add_rdma_qsets 4432 * @hw: pointer to the hardware structure 4433 * @num_qset_grps: Number of RDMA Qset groups 4434 * @qset_list: list of Qset groups to be added 4435 * @buf_size: size of buffer for indirect command 4436 * @cd: pointer to command details structure or NULL 4437 * 4438 * Add Tx RDMA Qsets (0x0C33) 4439 */ 4440 static int 4441 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4442 struct ice_aqc_add_rdma_qset_data *qset_list, 4443 u16 buf_size, struct ice_sq_cd *cd) 4444 { 4445 struct ice_aqc_add_rdma_qset_data *list; 4446 struct ice_aqc_add_rdma_qset *cmd; 4447 struct ice_aq_desc desc; 4448 u16 i, sum_size = 0; 4449 4450 cmd = &desc.params.add_rdma_qset; 4451 4452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4453 4454 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4455 return -EINVAL; 4456 4457 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4458 u16 num_qsets = le16_to_cpu(list->num_qsets); 4459 4460 sum_size += struct_size(list, rdma_qsets, num_qsets); 4461 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4462 num_qsets); 4463 } 4464 4465 if (buf_size != sum_size) 4466 return -EINVAL; 4467 4468 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4469 4470 cmd->num_qset_grps = num_qset_grps; 4471 4472 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4473 } 4474 4475 /* End of FW Admin Queue command wrappers */ 4476 4477 /** 4478 * ice_write_byte - write a byte to a packed context structure 4479 * @src_ctx: the context structure to read from 4480 * @dest_ctx: the context to be written to 4481 * @ce_info: a description of the struct to be filled 4482 */ 4483 static void 4484 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4485 { 4486 u8 src_byte, dest_byte, mask; 4487 u8 *from, *dest; 4488 u16 shift_width; 4489 4490 /* copy from the next struct field */ 4491 from = src_ctx + ce_info->offset; 4492 4493 /* prepare the bits and mask */ 4494 shift_width = ce_info->lsb % 8; 4495 mask = (u8)(BIT(ce_info->width) - 1); 4496 4497 src_byte = *from; 4498 src_byte &= mask; 4499 4500 /* shift to correct alignment */ 4501 mask <<= shift_width; 4502 src_byte <<= shift_width; 4503 4504 /* get the current bits from the target bit string */ 4505 dest = dest_ctx + (ce_info->lsb / 8); 4506 4507 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4508 4509 dest_byte &= ~mask; /* get the bits not changing */ 4510 dest_byte |= src_byte; /* add in the new bits */ 4511 4512 /* put it all back */ 4513 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4514 } 4515 4516 /** 4517 * ice_write_word - write a word to a packed context structure 4518 * @src_ctx: the context structure to read from 4519 * @dest_ctx: the context to be written to 4520 * @ce_info: a description of the struct to be filled 4521 */ 4522 static void 4523 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4524 { 4525 u16 src_word, mask; 4526 __le16 dest_word; 4527 u8 *from, *dest; 4528 u16 shift_width; 4529 4530 /* copy from the next struct field */ 4531 from = src_ctx + ce_info->offset; 4532 4533 /* prepare the bits and mask */ 4534 shift_width = ce_info->lsb % 8; 4535 mask = BIT(ce_info->width) - 1; 4536 4537 /* don't swizzle the bits until after the mask because the mask bits 4538 * will be in a different bit position on big endian machines 4539 */ 4540 src_word = *(u16 *)from; 4541 src_word &= mask; 4542 4543 /* shift to correct alignment */ 4544 mask <<= shift_width; 4545 src_word <<= shift_width; 4546 4547 /* get the current bits from the target bit string */ 4548 dest = dest_ctx + (ce_info->lsb / 8); 4549 4550 memcpy(&dest_word, dest, sizeof(dest_word)); 4551 4552 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4553 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4554 4555 /* put it all back */ 4556 memcpy(dest, &dest_word, sizeof(dest_word)); 4557 } 4558 4559 /** 4560 * ice_write_dword - write a dword to a packed context structure 4561 * @src_ctx: the context structure to read from 4562 * @dest_ctx: the context to be written to 4563 * @ce_info: a description of the struct to be filled 4564 */ 4565 static void 4566 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4567 { 4568 u32 src_dword, mask; 4569 __le32 dest_dword; 4570 u8 *from, *dest; 4571 u16 shift_width; 4572 4573 /* copy from the next struct field */ 4574 from = src_ctx + ce_info->offset; 4575 4576 /* prepare the bits and mask */ 4577 shift_width = ce_info->lsb % 8; 4578 4579 /* if the field width is exactly 32 on an x86 machine, then the shift 4580 * operation will not work because the SHL instructions count is masked 4581 * to 5 bits so the shift will do nothing 4582 */ 4583 if (ce_info->width < 32) 4584 mask = BIT(ce_info->width) - 1; 4585 else 4586 mask = (u32)~0; 4587 4588 /* don't swizzle the bits until after the mask because the mask bits 4589 * will be in a different bit position on big endian machines 4590 */ 4591 src_dword = *(u32 *)from; 4592 src_dword &= mask; 4593 4594 /* shift to correct alignment */ 4595 mask <<= shift_width; 4596 src_dword <<= shift_width; 4597 4598 /* get the current bits from the target bit string */ 4599 dest = dest_ctx + (ce_info->lsb / 8); 4600 4601 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4602 4603 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4604 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4605 4606 /* put it all back */ 4607 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4608 } 4609 4610 /** 4611 * ice_write_qword - write a qword to a packed context structure 4612 * @src_ctx: the context structure to read from 4613 * @dest_ctx: the context to be written to 4614 * @ce_info: a description of the struct to be filled 4615 */ 4616 static void 4617 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4618 { 4619 u64 src_qword, mask; 4620 __le64 dest_qword; 4621 u8 *from, *dest; 4622 u16 shift_width; 4623 4624 /* copy from the next struct field */ 4625 from = src_ctx + ce_info->offset; 4626 4627 /* prepare the bits and mask */ 4628 shift_width = ce_info->lsb % 8; 4629 4630 /* if the field width is exactly 64 on an x86 machine, then the shift 4631 * operation will not work because the SHL instructions count is masked 4632 * to 6 bits so the shift will do nothing 4633 */ 4634 if (ce_info->width < 64) 4635 mask = BIT_ULL(ce_info->width) - 1; 4636 else 4637 mask = (u64)~0; 4638 4639 /* don't swizzle the bits until after the mask because the mask bits 4640 * will be in a different bit position on big endian machines 4641 */ 4642 src_qword = *(u64 *)from; 4643 src_qword &= mask; 4644 4645 /* shift to correct alignment */ 4646 mask <<= shift_width; 4647 src_qword <<= shift_width; 4648 4649 /* get the current bits from the target bit string */ 4650 dest = dest_ctx + (ce_info->lsb / 8); 4651 4652 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4653 4654 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4655 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4656 4657 /* put it all back */ 4658 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4659 } 4660 4661 /** 4662 * ice_set_ctx - set context bits in packed structure 4663 * @hw: pointer to the hardware structure 4664 * @src_ctx: pointer to a generic non-packed context structure 4665 * @dest_ctx: pointer to memory for the packed structure 4666 * @ce_info: a description of the structure to be transformed 4667 */ 4668 int 4669 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4670 const struct ice_ctx_ele *ce_info) 4671 { 4672 int f; 4673 4674 for (f = 0; ce_info[f].width; f++) { 4675 /* We have to deal with each element of the FW response 4676 * using the correct size so that we are correct regardless 4677 * of the endianness of the machine. 4678 */ 4679 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4680 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4681 f, ce_info[f].width, ce_info[f].size_of); 4682 continue; 4683 } 4684 switch (ce_info[f].size_of) { 4685 case sizeof(u8): 4686 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4687 break; 4688 case sizeof(u16): 4689 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4690 break; 4691 case sizeof(u32): 4692 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4693 break; 4694 case sizeof(u64): 4695 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4696 break; 4697 default: 4698 return -EINVAL; 4699 } 4700 } 4701 4702 return 0; 4703 } 4704 4705 /** 4706 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4707 * @hw: pointer to the HW struct 4708 * @vsi_handle: software VSI handle 4709 * @tc: TC number 4710 * @q_handle: software queue handle 4711 */ 4712 struct ice_q_ctx * 4713 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4714 { 4715 struct ice_vsi_ctx *vsi; 4716 struct ice_q_ctx *q_ctx; 4717 4718 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4719 if (!vsi) 4720 return NULL; 4721 if (q_handle >= vsi->num_lan_q_entries[tc]) 4722 return NULL; 4723 if (!vsi->lan_q_ctx[tc]) 4724 return NULL; 4725 q_ctx = vsi->lan_q_ctx[tc]; 4726 return &q_ctx[q_handle]; 4727 } 4728 4729 /** 4730 * ice_ena_vsi_txq 4731 * @pi: port information structure 4732 * @vsi_handle: software VSI handle 4733 * @tc: TC number 4734 * @q_handle: software queue handle 4735 * @num_qgrps: Number of added queue groups 4736 * @buf: list of queue groups to be added 4737 * @buf_size: size of buffer for indirect command 4738 * @cd: pointer to command details structure or NULL 4739 * 4740 * This function adds one LAN queue 4741 */ 4742 int 4743 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4744 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4745 struct ice_sq_cd *cd) 4746 { 4747 struct ice_aqc_txsched_elem_data node = { 0 }; 4748 struct ice_sched_node *parent; 4749 struct ice_q_ctx *q_ctx; 4750 struct ice_hw *hw; 4751 int status; 4752 4753 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4754 return -EIO; 4755 4756 if (num_qgrps > 1 || buf->num_txqs > 1) 4757 return -ENOSPC; 4758 4759 hw = pi->hw; 4760 4761 if (!ice_is_vsi_valid(hw, vsi_handle)) 4762 return -EINVAL; 4763 4764 mutex_lock(&pi->sched_lock); 4765 4766 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4767 if (!q_ctx) { 4768 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4769 q_handle); 4770 status = -EINVAL; 4771 goto ena_txq_exit; 4772 } 4773 4774 /* find a parent node */ 4775 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4776 ICE_SCHED_NODE_OWNER_LAN); 4777 if (!parent) { 4778 status = -EINVAL; 4779 goto ena_txq_exit; 4780 } 4781 4782 buf->parent_teid = parent->info.node_teid; 4783 node.parent_teid = parent->info.node_teid; 4784 /* Mark that the values in the "generic" section as valid. The default 4785 * value in the "generic" section is zero. This means that : 4786 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4787 * - 0 priority among siblings, indicated by Bit 1-3. 4788 * - WFQ, indicated by Bit 4. 4789 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4790 * Bit 5-6. 4791 * - Bit 7 is reserved. 4792 * Without setting the generic section as valid in valid_sections, the 4793 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4794 */ 4795 buf->txqs[0].info.valid_sections = 4796 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4797 ICE_AQC_ELEM_VALID_EIR; 4798 buf->txqs[0].info.generic = 0; 4799 buf->txqs[0].info.cir_bw.bw_profile_idx = 4800 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4801 buf->txqs[0].info.cir_bw.bw_alloc = 4802 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4803 buf->txqs[0].info.eir_bw.bw_profile_idx = 4804 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4805 buf->txqs[0].info.eir_bw.bw_alloc = 4806 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4807 4808 /* add the LAN queue */ 4809 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4810 if (status) { 4811 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4812 le16_to_cpu(buf->txqs[0].txq_id), 4813 hw->adminq.sq_last_status); 4814 goto ena_txq_exit; 4815 } 4816 4817 node.node_teid = buf->txqs[0].q_teid; 4818 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4819 q_ctx->q_handle = q_handle; 4820 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4821 4822 /* add a leaf node into scheduler tree queue layer */ 4823 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4824 if (!status) 4825 status = ice_sched_replay_q_bw(pi, q_ctx); 4826 4827 ena_txq_exit: 4828 mutex_unlock(&pi->sched_lock); 4829 return status; 4830 } 4831 4832 /** 4833 * ice_dis_vsi_txq 4834 * @pi: port information structure 4835 * @vsi_handle: software VSI handle 4836 * @tc: TC number 4837 * @num_queues: number of queues 4838 * @q_handles: pointer to software queue handle array 4839 * @q_ids: pointer to the q_id array 4840 * @q_teids: pointer to queue node teids 4841 * @rst_src: if called due to reset, specifies the reset source 4842 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4843 * @cd: pointer to command details structure or NULL 4844 * 4845 * This function removes queues and their corresponding nodes in SW DB 4846 */ 4847 int 4848 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4849 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4850 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4851 struct ice_sq_cd *cd) 4852 { 4853 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4854 u16 i, buf_size = __struct_size(qg_list); 4855 struct ice_q_ctx *q_ctx; 4856 int status = -ENOENT; 4857 struct ice_hw *hw; 4858 4859 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4860 return -EIO; 4861 4862 hw = pi->hw; 4863 4864 if (!num_queues) { 4865 /* if queue is disabled already yet the disable queue command 4866 * has to be sent to complete the VF reset, then call 4867 * ice_aq_dis_lan_txq without any queue information 4868 */ 4869 if (rst_src) 4870 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4871 vmvf_num, NULL); 4872 return -EIO; 4873 } 4874 4875 mutex_lock(&pi->sched_lock); 4876 4877 for (i = 0; i < num_queues; i++) { 4878 struct ice_sched_node *node; 4879 4880 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4881 if (!node) 4882 continue; 4883 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4884 if (!q_ctx) { 4885 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4886 q_handles[i]); 4887 continue; 4888 } 4889 if (q_ctx->q_handle != q_handles[i]) { 4890 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4891 q_ctx->q_handle, q_handles[i]); 4892 continue; 4893 } 4894 qg_list->parent_teid = node->info.parent_teid; 4895 qg_list->num_qs = 1; 4896 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4897 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4898 vmvf_num, cd); 4899 4900 if (status) 4901 break; 4902 ice_free_sched_node(pi, node); 4903 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4904 q_ctx->q_teid = ICE_INVAL_TEID; 4905 } 4906 mutex_unlock(&pi->sched_lock); 4907 return status; 4908 } 4909 4910 /** 4911 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4912 * @pi: port information structure 4913 * @vsi_handle: software VSI handle 4914 * @tc_bitmap: TC bitmap 4915 * @maxqs: max queues array per TC 4916 * @owner: LAN or RDMA 4917 * 4918 * This function adds/updates the VSI queues per TC. 4919 */ 4920 static int 4921 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4922 u16 *maxqs, u8 owner) 4923 { 4924 int status = 0; 4925 u8 i; 4926 4927 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4928 return -EIO; 4929 4930 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4931 return -EINVAL; 4932 4933 mutex_lock(&pi->sched_lock); 4934 4935 ice_for_each_traffic_class(i) { 4936 /* configuration is possible only if TC node is present */ 4937 if (!ice_sched_get_tc_node(pi, i)) 4938 continue; 4939 4940 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4941 ice_is_tc_ena(tc_bitmap, i)); 4942 if (status) 4943 break; 4944 } 4945 4946 mutex_unlock(&pi->sched_lock); 4947 return status; 4948 } 4949 4950 /** 4951 * ice_cfg_vsi_lan - configure VSI LAN queues 4952 * @pi: port information structure 4953 * @vsi_handle: software VSI handle 4954 * @tc_bitmap: TC bitmap 4955 * @max_lanqs: max LAN queues array per TC 4956 * 4957 * This function adds/updates the VSI LAN queues per TC. 4958 */ 4959 int 4960 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4961 u16 *max_lanqs) 4962 { 4963 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4964 ICE_SCHED_NODE_OWNER_LAN); 4965 } 4966 4967 /** 4968 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4969 * @pi: port information structure 4970 * @vsi_handle: software VSI handle 4971 * @tc_bitmap: TC bitmap 4972 * @max_rdmaqs: max RDMA queues array per TC 4973 * 4974 * This function adds/updates the VSI RDMA queues per TC. 4975 */ 4976 int 4977 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4978 u16 *max_rdmaqs) 4979 { 4980 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4981 ICE_SCHED_NODE_OWNER_RDMA); 4982 } 4983 4984 /** 4985 * ice_ena_vsi_rdma_qset 4986 * @pi: port information structure 4987 * @vsi_handle: software VSI handle 4988 * @tc: TC number 4989 * @rdma_qset: pointer to RDMA Qset 4990 * @num_qsets: number of RDMA Qsets 4991 * @qset_teid: pointer to Qset node TEIDs 4992 * 4993 * This function adds RDMA Qset 4994 */ 4995 int 4996 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4997 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4998 { 4999 struct ice_aqc_txsched_elem_data node = { 0 }; 5000 struct ice_aqc_add_rdma_qset_data *buf; 5001 struct ice_sched_node *parent; 5002 struct ice_hw *hw; 5003 u16 i, buf_size; 5004 int ret; 5005 5006 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5007 return -EIO; 5008 hw = pi->hw; 5009 5010 if (!ice_is_vsi_valid(hw, vsi_handle)) 5011 return -EINVAL; 5012 5013 buf_size = struct_size(buf, rdma_qsets, num_qsets); 5014 buf = kzalloc(buf_size, GFP_KERNEL); 5015 if (!buf) 5016 return -ENOMEM; 5017 mutex_lock(&pi->sched_lock); 5018 5019 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5020 ICE_SCHED_NODE_OWNER_RDMA); 5021 if (!parent) { 5022 ret = -EINVAL; 5023 goto rdma_error_exit; 5024 } 5025 buf->parent_teid = parent->info.node_teid; 5026 node.parent_teid = parent->info.node_teid; 5027 5028 buf->num_qsets = cpu_to_le16(num_qsets); 5029 for (i = 0; i < num_qsets; i++) { 5030 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5031 buf->rdma_qsets[i].info.valid_sections = 5032 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5033 ICE_AQC_ELEM_VALID_EIR; 5034 buf->rdma_qsets[i].info.generic = 0; 5035 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5036 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5037 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5038 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5039 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5040 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5041 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5042 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5043 } 5044 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5045 if (ret) { 5046 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5047 goto rdma_error_exit; 5048 } 5049 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5050 for (i = 0; i < num_qsets; i++) { 5051 node.node_teid = buf->rdma_qsets[i].qset_teid; 5052 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5053 &node, NULL); 5054 if (ret) 5055 break; 5056 qset_teid[i] = le32_to_cpu(node.node_teid); 5057 } 5058 rdma_error_exit: 5059 mutex_unlock(&pi->sched_lock); 5060 kfree(buf); 5061 return ret; 5062 } 5063 5064 /** 5065 * ice_dis_vsi_rdma_qset - free RDMA resources 5066 * @pi: port_info struct 5067 * @count: number of RDMA Qsets to free 5068 * @qset_teid: TEID of Qset node 5069 * @q_id: list of queue IDs being disabled 5070 */ 5071 int 5072 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5073 u16 *q_id) 5074 { 5075 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5076 u16 qg_size = __struct_size(qg_list); 5077 struct ice_hw *hw; 5078 int status = 0; 5079 int i; 5080 5081 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5082 return -EIO; 5083 5084 hw = pi->hw; 5085 5086 mutex_lock(&pi->sched_lock); 5087 5088 for (i = 0; i < count; i++) { 5089 struct ice_sched_node *node; 5090 5091 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5092 if (!node) 5093 continue; 5094 5095 qg_list->parent_teid = node->info.parent_teid; 5096 qg_list->num_qs = 1; 5097 qg_list->q_id[0] = 5098 cpu_to_le16(q_id[i] | 5099 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5100 5101 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5102 ICE_NO_RESET, 0, NULL); 5103 if (status) 5104 break; 5105 5106 ice_free_sched_node(pi, node); 5107 } 5108 5109 mutex_unlock(&pi->sched_lock); 5110 return status; 5111 } 5112 5113 /** 5114 * ice_aq_get_cgu_abilities - get cgu abilities 5115 * @hw: pointer to the HW struct 5116 * @abilities: CGU abilities 5117 * 5118 * Get CGU abilities (0x0C61) 5119 * Return: 0 on success or negative value on failure. 5120 */ 5121 int 5122 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5123 struct ice_aqc_get_cgu_abilities *abilities) 5124 { 5125 struct ice_aq_desc desc; 5126 5127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5128 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5129 } 5130 5131 /** 5132 * ice_aq_set_input_pin_cfg - set input pin config 5133 * @hw: pointer to the HW struct 5134 * @input_idx: Input index 5135 * @flags1: Input flags 5136 * @flags2: Input flags 5137 * @freq: Frequency in Hz 5138 * @phase_delay: Delay in ps 5139 * 5140 * Set CGU input config (0x0C62) 5141 * Return: 0 on success or negative value on failure. 5142 */ 5143 int 5144 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5145 u32 freq, s32 phase_delay) 5146 { 5147 struct ice_aqc_set_cgu_input_config *cmd; 5148 struct ice_aq_desc desc; 5149 5150 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5151 cmd = &desc.params.set_cgu_input_config; 5152 cmd->input_idx = input_idx; 5153 cmd->flags1 = flags1; 5154 cmd->flags2 = flags2; 5155 cmd->freq = cpu_to_le32(freq); 5156 cmd->phase_delay = cpu_to_le32(phase_delay); 5157 5158 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5159 } 5160 5161 /** 5162 * ice_aq_get_input_pin_cfg - get input pin config 5163 * @hw: pointer to the HW struct 5164 * @input_idx: Input index 5165 * @status: Pin status 5166 * @type: Pin type 5167 * @flags1: Input flags 5168 * @flags2: Input flags 5169 * @freq: Frequency in Hz 5170 * @phase_delay: Delay in ps 5171 * 5172 * Get CGU input config (0x0C63) 5173 * Return: 0 on success or negative value on failure. 5174 */ 5175 int 5176 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5177 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5178 { 5179 struct ice_aqc_get_cgu_input_config *cmd; 5180 struct ice_aq_desc desc; 5181 int ret; 5182 5183 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5184 cmd = &desc.params.get_cgu_input_config; 5185 cmd->input_idx = input_idx; 5186 5187 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5188 if (!ret) { 5189 if (status) 5190 *status = cmd->status; 5191 if (type) 5192 *type = cmd->type; 5193 if (flags1) 5194 *flags1 = cmd->flags1; 5195 if (flags2) 5196 *flags2 = cmd->flags2; 5197 if (freq) 5198 *freq = le32_to_cpu(cmd->freq); 5199 if (phase_delay) 5200 *phase_delay = le32_to_cpu(cmd->phase_delay); 5201 } 5202 5203 return ret; 5204 } 5205 5206 /** 5207 * ice_aq_set_output_pin_cfg - set output pin config 5208 * @hw: pointer to the HW struct 5209 * @output_idx: Output index 5210 * @flags: Output flags 5211 * @src_sel: Index of DPLL block 5212 * @freq: Output frequency 5213 * @phase_delay: Output phase compensation 5214 * 5215 * Set CGU output config (0x0C64) 5216 * Return: 0 on success or negative value on failure. 5217 */ 5218 int 5219 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5220 u8 src_sel, u32 freq, s32 phase_delay) 5221 { 5222 struct ice_aqc_set_cgu_output_config *cmd; 5223 struct ice_aq_desc desc; 5224 5225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5226 cmd = &desc.params.set_cgu_output_config; 5227 cmd->output_idx = output_idx; 5228 cmd->flags = flags; 5229 cmd->src_sel = src_sel; 5230 cmd->freq = cpu_to_le32(freq); 5231 cmd->phase_delay = cpu_to_le32(phase_delay); 5232 5233 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5234 } 5235 5236 /** 5237 * ice_aq_get_output_pin_cfg - get output pin config 5238 * @hw: pointer to the HW struct 5239 * @output_idx: Output index 5240 * @flags: Output flags 5241 * @src_sel: Internal DPLL source 5242 * @freq: Output frequency 5243 * @src_freq: Source frequency 5244 * 5245 * Get CGU output config (0x0C65) 5246 * Return: 0 on success or negative value on failure. 5247 */ 5248 int 5249 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5250 u8 *src_sel, u32 *freq, u32 *src_freq) 5251 { 5252 struct ice_aqc_get_cgu_output_config *cmd; 5253 struct ice_aq_desc desc; 5254 int ret; 5255 5256 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5257 cmd = &desc.params.get_cgu_output_config; 5258 cmd->output_idx = output_idx; 5259 5260 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5261 if (!ret) { 5262 if (flags) 5263 *flags = cmd->flags; 5264 if (src_sel) 5265 *src_sel = cmd->src_sel; 5266 if (freq) 5267 *freq = le32_to_cpu(cmd->freq); 5268 if (src_freq) 5269 *src_freq = le32_to_cpu(cmd->src_freq); 5270 } 5271 5272 return ret; 5273 } 5274 5275 /** 5276 * ice_aq_get_cgu_dpll_status - get dpll status 5277 * @hw: pointer to the HW struct 5278 * @dpll_num: DPLL index 5279 * @ref_state: Reference clock state 5280 * @config: current DPLL config 5281 * @dpll_state: current DPLL state 5282 * @phase_offset: Phase offset in ns 5283 * @eec_mode: EEC_mode 5284 * 5285 * Get CGU DPLL status (0x0C66) 5286 * Return: 0 on success or negative value on failure. 5287 */ 5288 int 5289 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5290 u8 *dpll_state, u8 *config, s64 *phase_offset, 5291 u8 *eec_mode) 5292 { 5293 struct ice_aqc_get_cgu_dpll_status *cmd; 5294 const s64 nsec_per_psec = 1000LL; 5295 struct ice_aq_desc desc; 5296 int status; 5297 5298 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5299 cmd = &desc.params.get_cgu_dpll_status; 5300 cmd->dpll_num = dpll_num; 5301 5302 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5303 if (!status) { 5304 *ref_state = cmd->ref_state; 5305 *dpll_state = cmd->dpll_state; 5306 *config = cmd->config; 5307 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5308 *phase_offset <<= 32; 5309 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5310 *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), 5311 nsec_per_psec); 5312 *eec_mode = cmd->eec_mode; 5313 } 5314 5315 return status; 5316 } 5317 5318 /** 5319 * ice_aq_set_cgu_dpll_config - set dpll config 5320 * @hw: pointer to the HW struct 5321 * @dpll_num: DPLL index 5322 * @ref_state: Reference clock state 5323 * @config: DPLL config 5324 * @eec_mode: EEC mode 5325 * 5326 * Set CGU DPLL config (0x0C67) 5327 * Return: 0 on success or negative value on failure. 5328 */ 5329 int 5330 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5331 u8 config, u8 eec_mode) 5332 { 5333 struct ice_aqc_set_cgu_dpll_config *cmd; 5334 struct ice_aq_desc desc; 5335 5336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5337 cmd = &desc.params.set_cgu_dpll_config; 5338 cmd->dpll_num = dpll_num; 5339 cmd->ref_state = ref_state; 5340 cmd->config = config; 5341 cmd->eec_mode = eec_mode; 5342 5343 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5344 } 5345 5346 /** 5347 * ice_aq_set_cgu_ref_prio - set input reference priority 5348 * @hw: pointer to the HW struct 5349 * @dpll_num: DPLL index 5350 * @ref_idx: Reference pin index 5351 * @ref_priority: Reference input priority 5352 * 5353 * Set CGU reference priority (0x0C68) 5354 * Return: 0 on success or negative value on failure. 5355 */ 5356 int 5357 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5358 u8 ref_priority) 5359 { 5360 struct ice_aqc_set_cgu_ref_prio *cmd; 5361 struct ice_aq_desc desc; 5362 5363 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5364 cmd = &desc.params.set_cgu_ref_prio; 5365 cmd->dpll_num = dpll_num; 5366 cmd->ref_idx = ref_idx; 5367 cmd->ref_priority = ref_priority; 5368 5369 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5370 } 5371 5372 /** 5373 * ice_aq_get_cgu_ref_prio - get input reference priority 5374 * @hw: pointer to the HW struct 5375 * @dpll_num: DPLL index 5376 * @ref_idx: Reference pin index 5377 * @ref_prio: Reference input priority 5378 * 5379 * Get CGU reference priority (0x0C69) 5380 * Return: 0 on success or negative value on failure. 5381 */ 5382 int 5383 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5384 u8 *ref_prio) 5385 { 5386 struct ice_aqc_get_cgu_ref_prio *cmd; 5387 struct ice_aq_desc desc; 5388 int status; 5389 5390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5391 cmd = &desc.params.get_cgu_ref_prio; 5392 cmd->dpll_num = dpll_num; 5393 cmd->ref_idx = ref_idx; 5394 5395 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5396 if (!status) 5397 *ref_prio = cmd->ref_priority; 5398 5399 return status; 5400 } 5401 5402 /** 5403 * ice_aq_get_cgu_info - get cgu info 5404 * @hw: pointer to the HW struct 5405 * @cgu_id: CGU ID 5406 * @cgu_cfg_ver: CGU config version 5407 * @cgu_fw_ver: CGU firmware version 5408 * 5409 * Get CGU info (0x0C6A) 5410 * Return: 0 on success or negative value on failure. 5411 */ 5412 int 5413 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5414 u32 *cgu_fw_ver) 5415 { 5416 struct ice_aqc_get_cgu_info *cmd; 5417 struct ice_aq_desc desc; 5418 int status; 5419 5420 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5421 cmd = &desc.params.get_cgu_info; 5422 5423 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5424 if (!status) { 5425 *cgu_id = le32_to_cpu(cmd->cgu_id); 5426 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5427 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5428 } 5429 5430 return status; 5431 } 5432 5433 /** 5434 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5435 * @hw: pointer to the HW struct 5436 * @phy_output: PHY reference clock output pin 5437 * @enable: GPIO state to be applied 5438 * @freq: PHY output frequency 5439 * 5440 * Set phy recovered clock as reference (0x0630) 5441 * Return: 0 on success or negative value on failure. 5442 */ 5443 int 5444 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5445 u32 *freq) 5446 { 5447 struct ice_aqc_set_phy_rec_clk_out *cmd; 5448 struct ice_aq_desc desc; 5449 int status; 5450 5451 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5452 cmd = &desc.params.set_phy_rec_clk_out; 5453 cmd->phy_output = phy_output; 5454 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5455 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5456 cmd->freq = cpu_to_le32(*freq); 5457 5458 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5459 if (!status) 5460 *freq = le32_to_cpu(cmd->freq); 5461 5462 return status; 5463 } 5464 5465 /** 5466 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5467 * @hw: pointer to the HW struct 5468 * @phy_output: PHY reference clock output pin 5469 * @port_num: Port number 5470 * @flags: PHY flags 5471 * @node_handle: PHY output frequency 5472 * 5473 * Get PHY recovered clock output info (0x0631) 5474 * Return: 0 on success or negative value on failure. 5475 */ 5476 int 5477 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5478 u8 *flags, u16 *node_handle) 5479 { 5480 struct ice_aqc_get_phy_rec_clk_out *cmd; 5481 struct ice_aq_desc desc; 5482 int status; 5483 5484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5485 cmd = &desc.params.get_phy_rec_clk_out; 5486 cmd->phy_output = *phy_output; 5487 5488 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5489 if (!status) { 5490 *phy_output = cmd->phy_output; 5491 if (port_num) 5492 *port_num = cmd->port_num; 5493 if (flags) 5494 *flags = cmd->flags; 5495 if (node_handle) 5496 *node_handle = le16_to_cpu(cmd->node_handle); 5497 } 5498 5499 return status; 5500 } 5501 5502 /** 5503 * ice_replay_pre_init - replay pre initialization 5504 * @hw: pointer to the HW struct 5505 * 5506 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5507 */ 5508 static int ice_replay_pre_init(struct ice_hw *hw) 5509 { 5510 struct ice_switch_info *sw = hw->switch_info; 5511 u8 i; 5512 5513 /* Delete old entries from replay filter list head if there is any */ 5514 ice_rm_all_sw_replay_rule_info(hw); 5515 /* In start of replay, move entries into replay_rules list, it 5516 * will allow adding rules entries back to filt_rules list, 5517 * which is operational list. 5518 */ 5519 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5520 list_replace_init(&sw->recp_list[i].filt_rules, 5521 &sw->recp_list[i].filt_replay_rules); 5522 ice_sched_replay_agg_vsi_preinit(hw); 5523 5524 return 0; 5525 } 5526 5527 /** 5528 * ice_replay_vsi - replay VSI configuration 5529 * @hw: pointer to the HW struct 5530 * @vsi_handle: driver VSI handle 5531 * 5532 * Restore all VSI configuration after reset. It is required to call this 5533 * function with main VSI first. 5534 */ 5535 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5536 { 5537 int status; 5538 5539 if (!ice_is_vsi_valid(hw, vsi_handle)) 5540 return -EINVAL; 5541 5542 /* Replay pre-initialization if there is any */ 5543 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5544 status = ice_replay_pre_init(hw); 5545 if (status) 5546 return status; 5547 } 5548 /* Replay per VSI all RSS configurations */ 5549 status = ice_replay_rss_cfg(hw, vsi_handle); 5550 if (status) 5551 return status; 5552 /* Replay per VSI all filters */ 5553 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5554 if (!status) 5555 status = ice_replay_vsi_agg(hw, vsi_handle); 5556 return status; 5557 } 5558 5559 /** 5560 * ice_replay_post - post replay configuration cleanup 5561 * @hw: pointer to the HW struct 5562 * 5563 * Post replay cleanup. 5564 */ 5565 void ice_replay_post(struct ice_hw *hw) 5566 { 5567 /* Delete old entries from replay filter list head */ 5568 ice_rm_all_sw_replay_rule_info(hw); 5569 ice_sched_replay_agg(hw); 5570 } 5571 5572 /** 5573 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5574 * @hw: ptr to the hardware info 5575 * @reg: offset of 64 bit HW register to read from 5576 * @prev_stat_loaded: bool to specify if previous stats are loaded 5577 * @prev_stat: ptr to previous loaded stat value 5578 * @cur_stat: ptr to current stat value 5579 */ 5580 void 5581 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5582 u64 *prev_stat, u64 *cur_stat) 5583 { 5584 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5585 5586 /* device stats are not reset at PFR, they likely will not be zeroed 5587 * when the driver starts. Thus, save the value from the first read 5588 * without adding to the statistic value so that we report stats which 5589 * count up from zero. 5590 */ 5591 if (!prev_stat_loaded) { 5592 *prev_stat = new_data; 5593 return; 5594 } 5595 5596 /* Calculate the difference between the new and old values, and then 5597 * add it to the software stat value. 5598 */ 5599 if (new_data >= *prev_stat) 5600 *cur_stat += new_data - *prev_stat; 5601 else 5602 /* to manage the potential roll-over */ 5603 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5604 5605 /* Update the previously stored value to prepare for next read */ 5606 *prev_stat = new_data; 5607 } 5608 5609 /** 5610 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5611 * @hw: ptr to the hardware info 5612 * @reg: offset of HW register to read from 5613 * @prev_stat_loaded: bool to specify if previous stats are loaded 5614 * @prev_stat: ptr to previous loaded stat value 5615 * @cur_stat: ptr to current stat value 5616 */ 5617 void 5618 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5619 u64 *prev_stat, u64 *cur_stat) 5620 { 5621 u32 new_data; 5622 5623 new_data = rd32(hw, reg); 5624 5625 /* device stats are not reset at PFR, they likely will not be zeroed 5626 * when the driver starts. Thus, save the value from the first read 5627 * without adding to the statistic value so that we report stats which 5628 * count up from zero. 5629 */ 5630 if (!prev_stat_loaded) { 5631 *prev_stat = new_data; 5632 return; 5633 } 5634 5635 /* Calculate the difference between the new and old values, and then 5636 * add it to the software stat value. 5637 */ 5638 if (new_data >= *prev_stat) 5639 *cur_stat += new_data - *prev_stat; 5640 else 5641 /* to manage the potential roll-over */ 5642 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5643 5644 /* Update the previously stored value to prepare for next read */ 5645 *prev_stat = new_data; 5646 } 5647 5648 /** 5649 * ice_sched_query_elem - query element information from HW 5650 * @hw: pointer to the HW struct 5651 * @node_teid: node TEID to be queried 5652 * @buf: buffer to element information 5653 * 5654 * This function queries HW element information 5655 */ 5656 int 5657 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5658 struct ice_aqc_txsched_elem_data *buf) 5659 { 5660 u16 buf_size, num_elem_ret = 0; 5661 int status; 5662 5663 buf_size = sizeof(*buf); 5664 memset(buf, 0, buf_size); 5665 buf->node_teid = cpu_to_le32(node_teid); 5666 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5667 NULL); 5668 if (status || num_elem_ret != 1) 5669 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5670 return status; 5671 } 5672 5673 /** 5674 * ice_aq_read_i2c 5675 * @hw: pointer to the hw struct 5676 * @topo_addr: topology address for a device to communicate with 5677 * @bus_addr: 7-bit I2C bus address 5678 * @addr: I2C memory address (I2C offset) with up to 16 bits 5679 * @params: I2C parameters: bit [7] - Repeated start, 5680 * bits [6:5] data offset size, 5681 * bit [4] - I2C address type, 5682 * bits [3:0] - data size to read (0-16 bytes) 5683 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5684 * @cd: pointer to command details structure or NULL 5685 * 5686 * Read I2C (0x06E2) 5687 */ 5688 int 5689 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5690 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5691 struct ice_sq_cd *cd) 5692 { 5693 struct ice_aq_desc desc = { 0 }; 5694 struct ice_aqc_i2c *cmd; 5695 u8 data_size; 5696 int status; 5697 5698 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5699 cmd = &desc.params.read_write_i2c; 5700 5701 if (!data) 5702 return -EINVAL; 5703 5704 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5705 5706 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5707 cmd->topo_addr = topo_addr; 5708 cmd->i2c_params = params; 5709 cmd->i2c_addr = addr; 5710 5711 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5712 if (!status) { 5713 struct ice_aqc_read_i2c_resp *resp; 5714 u8 i; 5715 5716 resp = &desc.params.read_i2c_resp; 5717 for (i = 0; i < data_size; i++) { 5718 *data = resp->i2c_data[i]; 5719 data++; 5720 } 5721 } 5722 5723 return status; 5724 } 5725 5726 /** 5727 * ice_aq_write_i2c 5728 * @hw: pointer to the hw struct 5729 * @topo_addr: topology address for a device to communicate with 5730 * @bus_addr: 7-bit I2C bus address 5731 * @addr: I2C memory address (I2C offset) with up to 16 bits 5732 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5733 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5734 * @cd: pointer to command details structure or NULL 5735 * 5736 * Write I2C (0x06E3) 5737 * 5738 * * Return: 5739 * * 0 - Successful write to the i2c device 5740 * * -EINVAL - Data size greater than 4 bytes 5741 * * -EIO - FW error 5742 */ 5743 int 5744 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5745 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5746 struct ice_sq_cd *cd) 5747 { 5748 struct ice_aq_desc desc = { 0 }; 5749 struct ice_aqc_i2c *cmd; 5750 u8 data_size; 5751 5752 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5753 cmd = &desc.params.read_write_i2c; 5754 5755 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5756 5757 /* data_size limited to 4 */ 5758 if (data_size > 4) 5759 return -EINVAL; 5760 5761 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5762 cmd->topo_addr = topo_addr; 5763 cmd->i2c_params = params; 5764 cmd->i2c_addr = addr; 5765 5766 memcpy(cmd->i2c_data, data, data_size); 5767 5768 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5769 } 5770 5771 /** 5772 * ice_aq_set_gpio 5773 * @hw: pointer to the hw struct 5774 * @gpio_ctrl_handle: GPIO controller node handle 5775 * @pin_idx: IO Number of the GPIO that needs to be set 5776 * @value: SW provide IO value to set in the LSB 5777 * @cd: pointer to command details structure or NULL 5778 * 5779 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5780 */ 5781 int 5782 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5783 struct ice_sq_cd *cd) 5784 { 5785 struct ice_aqc_gpio *cmd; 5786 struct ice_aq_desc desc; 5787 5788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5789 cmd = &desc.params.read_write_gpio; 5790 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5791 cmd->gpio_num = pin_idx; 5792 cmd->gpio_val = value ? 1 : 0; 5793 5794 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5795 } 5796 5797 /** 5798 * ice_aq_get_gpio 5799 * @hw: pointer to the hw struct 5800 * @gpio_ctrl_handle: GPIO controller node handle 5801 * @pin_idx: IO Number of the GPIO that needs to be set 5802 * @value: IO value read 5803 * @cd: pointer to command details structure or NULL 5804 * 5805 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5806 * the topology 5807 */ 5808 int 5809 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5810 bool *value, struct ice_sq_cd *cd) 5811 { 5812 struct ice_aqc_gpio *cmd; 5813 struct ice_aq_desc desc; 5814 int status; 5815 5816 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5817 cmd = &desc.params.read_write_gpio; 5818 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5819 cmd->gpio_num = pin_idx; 5820 5821 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5822 if (status) 5823 return status; 5824 5825 *value = !!cmd->gpio_val; 5826 return 0; 5827 } 5828 5829 /** 5830 * ice_is_fw_api_min_ver 5831 * @hw: pointer to the hardware structure 5832 * @maj: major version 5833 * @min: minor version 5834 * @patch: patch version 5835 * 5836 * Checks if the firmware API is minimum version 5837 */ 5838 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5839 { 5840 if (hw->api_maj_ver == maj) { 5841 if (hw->api_min_ver > min) 5842 return true; 5843 if (hw->api_min_ver == min && hw->api_patch >= patch) 5844 return true; 5845 } else if (hw->api_maj_ver > maj) { 5846 return true; 5847 } 5848 5849 return false; 5850 } 5851 5852 /** 5853 * ice_fw_supports_link_override 5854 * @hw: pointer to the hardware structure 5855 * 5856 * Checks if the firmware supports link override 5857 */ 5858 bool ice_fw_supports_link_override(struct ice_hw *hw) 5859 { 5860 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5861 ICE_FW_API_LINK_OVERRIDE_MIN, 5862 ICE_FW_API_LINK_OVERRIDE_PATCH); 5863 } 5864 5865 /** 5866 * ice_get_link_default_override 5867 * @ldo: pointer to the link default override struct 5868 * @pi: pointer to the port info struct 5869 * 5870 * Gets the link default override for a port 5871 */ 5872 int 5873 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5874 struct ice_port_info *pi) 5875 { 5876 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5877 struct ice_hw *hw = pi->hw; 5878 int status; 5879 5880 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5881 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5882 if (status) { 5883 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5884 return status; 5885 } 5886 5887 /* Each port has its own config; calculate for our port */ 5888 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5889 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5890 5891 /* link options first */ 5892 status = ice_read_sr_word(hw, tlv_start, &buf); 5893 if (status) { 5894 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5895 return status; 5896 } 5897 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5898 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5899 ICE_LINK_OVERRIDE_PHY_CFG_S; 5900 5901 /* link PHY config */ 5902 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5903 status = ice_read_sr_word(hw, offset, &buf); 5904 if (status) { 5905 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5906 return status; 5907 } 5908 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5909 5910 /* PHY types low */ 5911 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5912 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5913 status = ice_read_sr_word(hw, (offset + i), &buf); 5914 if (status) { 5915 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5916 return status; 5917 } 5918 /* shift 16 bits at a time to fill 64 bits */ 5919 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5920 } 5921 5922 /* PHY types high */ 5923 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5924 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5925 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5926 status = ice_read_sr_word(hw, (offset + i), &buf); 5927 if (status) { 5928 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5929 return status; 5930 } 5931 /* shift 16 bits at a time to fill 64 bits */ 5932 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5933 } 5934 5935 return status; 5936 } 5937 5938 /** 5939 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5940 * @caps: get PHY capability data 5941 */ 5942 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5943 { 5944 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5945 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5946 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5947 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5948 return true; 5949 5950 return false; 5951 } 5952 5953 /** 5954 * ice_aq_set_lldp_mib - Set the LLDP MIB 5955 * @hw: pointer to the HW struct 5956 * @mib_type: Local, Remote or both Local and Remote MIBs 5957 * @buf: pointer to the caller-supplied buffer to store the MIB block 5958 * @buf_size: size of the buffer (in bytes) 5959 * @cd: pointer to command details structure or NULL 5960 * 5961 * Set the LLDP MIB. (0x0A08) 5962 */ 5963 int 5964 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5965 struct ice_sq_cd *cd) 5966 { 5967 struct ice_aqc_lldp_set_local_mib *cmd; 5968 struct ice_aq_desc desc; 5969 5970 cmd = &desc.params.lldp_set_mib; 5971 5972 if (buf_size == 0 || !buf) 5973 return -EINVAL; 5974 5975 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5976 5977 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5978 desc.datalen = cpu_to_le16(buf_size); 5979 5980 cmd->type = mib_type; 5981 cmd->length = cpu_to_le16(buf_size); 5982 5983 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5984 } 5985 5986 /** 5987 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5988 * @hw: pointer to HW struct 5989 */ 5990 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5991 { 5992 if (hw->mac_type != ICE_MAC_E810) 5993 return false; 5994 5995 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5996 ICE_FW_API_LLDP_FLTR_MIN, 5997 ICE_FW_API_LLDP_FLTR_PATCH); 5998 } 5999 6000 /** 6001 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6002 * @hw: pointer to HW struct 6003 * @vsi_num: absolute HW index for VSI 6004 * @add: boolean for if adding or removing a filter 6005 */ 6006 int 6007 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6008 { 6009 struct ice_aqc_lldp_filter_ctrl *cmd; 6010 struct ice_aq_desc desc; 6011 6012 cmd = &desc.params.lldp_filter_ctrl; 6013 6014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6015 6016 if (add) 6017 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6018 else 6019 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6020 6021 cmd->vsi_num = cpu_to_le16(vsi_num); 6022 6023 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6024 } 6025 6026 /** 6027 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6028 * @hw: pointer to HW struct 6029 */ 6030 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6031 { 6032 struct ice_aq_desc desc; 6033 6034 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6035 6036 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6037 } 6038 6039 /** 6040 * ice_fw_supports_report_dflt_cfg 6041 * @hw: pointer to the hardware structure 6042 * 6043 * Checks if the firmware supports report default configuration 6044 */ 6045 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6046 { 6047 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6048 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6049 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6050 } 6051 6052 /* each of the indexes into the following array match the speed of a return 6053 * value from the list of AQ returned speeds like the range: 6054 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6055 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6056 * array. The array is defined as 15 elements long because the link_speed 6057 * returned by the firmware is a 16 bit * value, but is indexed 6058 * by [fls(speed) - 1] 6059 */ 6060 static const u32 ice_aq_to_link_speed[] = { 6061 SPEED_10, /* BIT(0) */ 6062 SPEED_100, 6063 SPEED_1000, 6064 SPEED_2500, 6065 SPEED_5000, 6066 SPEED_10000, 6067 SPEED_20000, 6068 SPEED_25000, 6069 SPEED_40000, 6070 SPEED_50000, 6071 SPEED_100000, /* BIT(10) */ 6072 }; 6073 6074 /** 6075 * ice_get_link_speed - get integer speed from table 6076 * @index: array index from fls(aq speed) - 1 6077 * 6078 * Returns: u32 value containing integer speed 6079 */ 6080 u32 ice_get_link_speed(u16 index) 6081 { 6082 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6083 return 0; 6084 6085 return ice_aq_to_link_speed[index]; 6086 } 6087